code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""
Grades API URLs.
"""
from django.conf import settings
from django.conf.urls import include, url
from lms.djangoapps.grades.api import views
app_name = 'lms.djangoapps.grades'
urlpatterns = [
url(
r'^v0/course_grade/{course_id}/users/$'.format(
course_id=settings.COURSE_ID_PATTERN,
),
views.UserGradeView.as_view(), name='user_grade_detail'
),
url(
r'^v0/courses/{course_id}/policy/$'.format(
course_id=settings.COURSE_ID_PATTERN,
),
views.CourseGradingPolicy.as_view(), name='course_grading_policy'
),
url(r'^v1/', include('grades.api.v1.urls', namespace='v1'))
]
|
teltek/edx-platform
|
lms/djangoapps/grades/api/urls.py
|
Python
|
agpl-3.0
| 666
|
# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Test :class:`Sequence`."""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = []
import random
from django.db import connection
from django.db.utils import DatabaseError
from maasserver.sequence import Sequence
from maasserver.testing.factory import factory
from maasserver.testing.testcase import MAASServerTestCase
class TestSequence(MAASServerTestCase):
def query_seq(self, name):
cursor = connection.cursor()
cursor.execute(
"SELECT nextval(%s)", [name])
return cursor.fetchone()[0]
def test_create_sequence(self):
name = factory.make_name('seq', sep='')
seq = Sequence(name)
seq.create()
val = self.query_seq(seq.name)
self.assertEqual(1, val)
def test_sequence_respects_minvalue(self):
name = factory.make_name('seq', sep='')
minvalue = random.randint(1, 50)
seq = Sequence(name, minvalue=minvalue)
seq.create()
val = self.query_seq(seq.name)
self.assertEqual(minvalue, val)
def test_sequence_respects_incr(self):
name = factory.make_name('seq', sep='')
incr = random.randint(1, 50)
seq = Sequence(name, incr=incr)
seq.create()
val = self.query_seq(seq.name)
val = self.query_seq(seq.name)
self.assertEqual(1 + incr, val)
def test_sequence_respects_maxvalue_and_cycles(self):
name = factory.make_name('seq', sep='')
maxvalue = random.randint(10, 50)
seq = Sequence(name, maxvalue=maxvalue)
seq.create()
cursor = connection.cursor()
query = "ALTER SEQUENCE %s" % seq.name
cursor.execute(query + " RESTART WITH %s", [maxvalue])
val = self.query_seq(seq.name)
val = self.query_seq(seq.name)
self.assertEqual(1, val)
def test_drop_sequence(self):
name = factory.make_name('seq', sep='')
seq = Sequence(name)
seq.create()
seq.drop()
self.assertRaisesRegexp(
DatabaseError, "does not exist", self.query_seq,
seq.name)
def test_nextval_returns_sequential_values(self):
name = factory.make_name('seq', sep='')
seq = Sequence(name)
seq.create()
self.assertSequenceEqual(
range(1, 11), [seq.nextval() for i in range(10)])
|
cloudbase/maas
|
src/maasserver/tests/test_sequence.py
|
Python
|
agpl-3.0
| 2,562
|
from lxml.etree import LxmlError
from lxml.html.clean import Cleaner
from django.template import Library
register = Library()
@register.filter
def as_clean_html(value):
try:
return Cleaner(style=True, scripts=True).clean_html(value.strip())
except LxmlError:
return '<p></p>'
|
mysociety/pombola
|
pombola/core/templatetags/clean_html.py
|
Python
|
agpl-3.0
| 303
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-06 10:03
from __future__ import unicode_literals
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import lily.messaging.email.models.models
import re
class Migration(migrations.Migration):
dependencies = [
('tenant', '0008_auto_20180822_1308'),
('email', '0043_auto_20180906_1300'),
]
operations = [
migrations.CreateModel(
name='EmailDraft',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('to', django.contrib.postgres.fields.ArrayField(base_field=models.EmailField(max_length=254), size=None, verbose_name='to')),
('cc', django.contrib.postgres.fields.ArrayField(base_field=models.EmailField(max_length=254), size=None, verbose_name='cc')),
('bcc', django.contrib.postgres.fields.ArrayField(base_field=models.EmailField(max_length=254), size=None, verbose_name='bcc')),
('headers', django.contrib.postgres.fields.jsonb.JSONField(default=dict, verbose_name='email headers')),
('subject', models.CharField(blank=True, max_length=255, verbose_name='subject')),
('body', models.TextField(blank=True, verbose_name='html body')),
('mapped_attachments', models.IntegerField(verbose_name='number of mapped attachments')),
('original_attachment_ids', models.TextField(default=b'', validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')])),
('template_attachment_ids', models.CharField(default=b'', max_length=255, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')])),
('original_message_id', models.CharField(blank=True, db_index=True, default=b'', max_length=50)),
('send_from', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='draft_messages', to='email.EmailAccount', verbose_name='from')),
('tenant', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='tenant.Tenant')),
],
options={
'verbose_name': 'email draft message',
'verbose_name_plural': 'email draft messages',
},
),
migrations.CreateModel(
name='EmailDraftAttachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('inline', models.BooleanField(default=False)),
('attachment', models.FileField(max_length=255, upload_to=lily.messaging.email.models.models.get_outbox_attachment_upload_path)),
('size', models.PositiveIntegerField(default=0)),
('content_type', models.CharField(max_length=255, verbose_name='content type')),
('email_draft', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='attachments', to='email.EmailDraft')),
('tenant', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='tenant.Tenant')),
],
options={
'verbose_name': 'email draft attachment',
'verbose_name_plural': 'email draft attachments',
},
),
migrations.AddField(
model_name='emailmessage',
name='received_by_bcc',
field=models.ManyToManyField(related_name='received_messages_as_bcc', to='email.Recipient'),
),
]
|
HelloLily/hellolily
|
lily/messaging/email/migrations/0044_auto_20181106_1003.py
|
Python
|
agpl-3.0
| 3,907
|
import re
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import HtmlXPathSelector
from lxml.cssselect import CSSSelector
from cocktails.items import CocktailItem
from cocktails.utils import html_to_text
xp_title = CSSSelector('.recipe_title').path
xp_ingredients = CSSSelector('.ingredient').path
class DrinksMixerSpider(CrawlSpider):
name = 'drinksmixer'
allowed_domains = ['www.drinksmixer.com']
start_urls = ['http://www.drinksmixer.com/']
rules = (
Rule(LinkExtractor(allow=r'/drink[^/]+.html$'), callback='parse_recipe'),
Rule(LinkExtractor(allow=r'/cat/')),
)
def parse_recipe(self, response):
hxs = HtmlXPathSelector(response)
for title in hxs.select(xp_title).extract():
break
else:
return []
ingredients = hxs.select(xp_ingredients).extract()
return [CocktailItem(
title=re.sub(r'\s+recipe$', '', html_to_text(title)),
picture=None,
url=response.url,
source='Drinks Mixer',
ingredients=[html_to_text(x) for x in ingredients],
)]
|
snoack/cocktail-search
|
crawler/cocktails/spiders/drinksmixer.py
|
Python
|
agpl-3.0
| 1,188
|
# -*- coding: utf-8 -*-
#
import sys, os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
sys.path.append(os.path.abspath('../../../../'))
sys.path.append(os.path.abspath('../../../'))
from docs.shared.conf import *
sys.path.insert(0, os.path.abspath('.'))
master_doc = 'index'
# Add any paths that contain templates here, relative to this directory.
templates_path.append('source/_templates')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path.append('source/_static')
project = u'edX Data Documentation'
copyright = u'2013, edX Documentation Team'
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
#Added to turn off smart quotes so users can copy JSON values without problems.
html_use_smartypants = False
|
pelikanchik/edx-platform
|
docs/en_us/data/source/conf.py
|
Python
|
agpl-3.0
| 973
|
# -*- coding: utf-8 -*-
# © 2016 Akretion (Alexis de Lattre <alexis.delattre@akretion.com>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.addons.account.tests.account_test_classes\
import AccountingTestCase
from odoo.tools import float_compare
import time
from lxml import etree
ch_iban = 'CH15 3881 5158 3845 3843 7'
class TestSCT_CH(AccountingTestCase):
def setUp(self):
super(TestSCT_CH, self).setUp()
Account = self.env['account.account']
Journal = self.env['account.journal']
PaymentMode = self.env['account.payment.mode']
self.payment_order_model = self.env['account.payment.order']
self.payment_line_model = self.env['account.payment.line']
self.bank_line_model = self.env['bank.payment.line']
self.partner_bank_model = self.env['res.partner.bank']
self.attachment_model = self.env['ir.attachment']
self.invoice_model = self.env['account.invoice']
self.invoice_line_model = self.env['account.invoice.line']
self.main_company = self.env.ref('base.main_company')
self.partner_agrolait = self.env.ref('base.res_partner_2')
self.account_expense = Account.search([(
'user_type_id',
'=',
self.env.ref('account.data_account_type_expenses').id)], limit=1)
self.account_payable = Account.search([(
'user_type_id',
'=',
self.env.ref('account.data_account_type_payable').id)], limit=1)
# Create a swiss bank
ch_bank1 = self.env['res.bank'].create({
'name': 'Alternative Bank Schweiz AG',
'bic': 'ALSWCH21XXX',
'clearing': '38815',
'ccp': '46-110-7',
})
# create a ch bank account for my company
self.cp_partner_bank = self.partner_bank_model.create({
'acc_number': ch_iban,
'partner_id': self.env.ref('base.main_partner').id,
})
self.cp_partner_bank.onchange_acc_number_set_swiss_bank()
# create journal
self.bank_journal = Journal.create({
'name': 'Company Bank journal',
'type': 'bank',
'code': 'BNKFB',
'bank_account_id': self.cp_partner_bank.id,
'bank_id': ch_bank1.id,
})
# create a payment mode
pay_method_id = self.env.ref(
'account_banking_sepa_credit_transfer.sepa_credit_transfer').id
self.payment_mode = PaymentMode.create({
'name': 'CH credit transfer',
'bank_account_link': 'fixed',
'fixed_journal_id': self.bank_journal.id,
'payment_method_id': pay_method_id,
})
self.payment_mode.payment_method_id.pain_version =\
'pain.001.001.03.ch.02'
self.chf_currency = self.env.ref('base.CHF')
self.eur_currency = self.env.ref('base.EUR')
self.main_company.currency_id = self.chf_currency.id
ch_bank2 = self.env['res.bank'].create({
'name': 'Banque Cantonale Vaudoise',
'bic': 'BCVLCH2LXXX',
'clearing': '767',
'ccp': '01-1234-1',
})
# Create a bank account with clearing 767
self.agrolait_partner_bank = self.partner_bank_model.create({
'acc_number': 'CH9100767000S00023455',
'partner_id': self.partner_agrolait.id,
'bank_id': ch_bank2.id,
'ccp': '01-1234-1',
})
def test_sct_ch_payment_type1(self):
invoice1 = self.create_invoice(
self.partner_agrolait.id,
self.agrolait_partner_bank.id, self.chf_currency, 42.0,
'bvr', '132000000000000000000000014')
invoice2 = self.create_invoice(
self.partner_agrolait.id,
self.agrolait_partner_bank.id, self.chf_currency, 12.0,
'bvr', '132000000000004')
for inv in [invoice1, invoice2]:
action = inv.create_account_payment_line()
self.assertEquals(action['res_model'], 'account.payment.order')
self.payment_order = self.payment_order_model.browse(action['res_id'])
self.assertEquals(
self.payment_order.payment_type, 'outbound')
self.assertEquals(
self.payment_order.payment_mode_id, self.payment_mode)
self.assertEquals(
self.payment_order.journal_id, self.bank_journal)
pay_lines = self.payment_line_model.search([
('partner_id', '=', self.partner_agrolait.id),
('order_id', '=', self.payment_order.id)])
self.assertEquals(len(pay_lines), 2)
agrolait_pay_line1 = pay_lines[0]
accpre = self.env['decimal.precision'].precision_get('Account')
self.assertEquals(agrolait_pay_line1.currency_id, self.chf_currency)
self.assertEquals(
agrolait_pay_line1.partner_bank_id, invoice1.partner_bank_id)
self.assertEquals(float_compare(
agrolait_pay_line1.amount_currency, 42, precision_digits=accpre),
0)
self.assertEquals(agrolait_pay_line1.communication_type, 'bvr')
self.assertEquals(
agrolait_pay_line1.communication,
'132000000000000000000000014')
self.payment_order.draft2open()
self.assertEquals(self.payment_order.state, 'open')
self.assertEquals(self.payment_order.sepa, False)
bank_lines = self.bank_line_model.search([
('partner_id', '=', self.partner_agrolait.id)])
self.assertEquals(len(bank_lines), 2)
for bank_line in bank_lines:
self.assertEquals(bank_line.currency_id, self.chf_currency)
self.assertEquals(bank_line.communication_type, 'bvr')
self.assertEquals(
bank_line.communication in [
'132000000000000000000000014',
'132000000000004'], True)
self.assertEquals(
bank_line.partner_bank_id, invoice1.partner_bank_id)
action = self.payment_order.open2generated()
self.assertEquals(self.payment_order.state, 'generated')
attachment = self.attachment_model.browse(action.get('attachment_id',
action['res_id']))
self.assertEquals(attachment.datas_fname[-4:], '.xml')
xml_file = attachment.datas.decode('base64')
xml_root = etree.fromstring(xml_file)
# print "xml_file=", etree.tostring(xml_root, pretty_print=True)
namespaces = xml_root.nsmap
namespaces['p'] = xml_root.nsmap[None]
namespaces.pop(None)
pay_method_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtMtd', namespaces=namespaces)
self.assertEquals(
namespaces['p'],
'http://www.six-interbank-clearing.com/de/'
'pain.001.001.03.ch.02.xsd')
self.assertEquals(pay_method_xpath[0].text, 'TRF')
sepa_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtTpInf/p:SvcLvl/p:Cd', namespaces=namespaces)
self.assertEquals(len(sepa_xpath), 0)
local_instrument_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtTpInf/p:LclInstrm/p:Prtry', namespaces=namespaces)
self.assertEquals(local_instrument_xpath[0].text, 'CH01')
debtor_acc_xpath = xml_root.xpath(
'//p:PmtInf/p:DbtrAcct/p:Id/p:IBAN', namespaces=namespaces)
self.assertEquals(
debtor_acc_xpath[0].text,
self.payment_order.company_partner_bank_id.sanitized_acc_number)
self.payment_order.generated2uploaded()
self.assertEquals(self.payment_order.state, 'uploaded')
for inv in [invoice1, invoice2]:
self.assertEquals(inv.state, 'paid')
return
def test_sct_ch_payment_type3(self):
invoice1 = self.create_invoice(
self.partner_agrolait.id,
self.agrolait_partner_bank.id, self.eur_currency, 4042.0,
'none', 'Inv1242')
invoice2 = self.create_invoice(
self.partner_agrolait.id,
self.agrolait_partner_bank.id, self.eur_currency, 1012.55,
'none', 'Inv1248')
for inv in [invoice1, invoice2]:
action = inv.create_account_payment_line()
self.assertEquals(action['res_model'], 'account.payment.order')
self.payment_order = self.payment_order_model.browse(action['res_id'])
self.assertEquals(
self.payment_order.payment_type, 'outbound')
self.assertEquals(
self.payment_order.payment_mode_id, self.payment_mode)
self.assertEquals(
self.payment_order.journal_id, self.bank_journal)
pay_lines = self.payment_line_model.search([
('partner_id', '=', self.partner_agrolait.id),
('order_id', '=', self.payment_order.id)])
self.assertEquals(len(pay_lines), 2)
agrolait_pay_line1 = pay_lines[0]
accpre = self.env['decimal.precision'].precision_get('Account')
self.assertEquals(agrolait_pay_line1.currency_id, self.eur_currency)
self.assertEquals(
agrolait_pay_line1.partner_bank_id, invoice1.partner_bank_id)
self.assertEquals(float_compare(
agrolait_pay_line1.amount_currency, 4042.0,
precision_digits=accpre), 0)
self.assertEquals(agrolait_pay_line1.communication_type, 'normal')
self.assertEquals(
agrolait_pay_line1.communication, 'Inv1242')
self.payment_order.draft2open()
self.assertEquals(self.payment_order.state, 'open')
self.assertEquals(self.payment_order.sepa, False)
bank_lines = self.bank_line_model.search([
('partner_id', '=', self.partner_agrolait.id)])
self.assertEquals(len(bank_lines), 1)
bank_line = bank_lines[0]
self.assertEquals(bank_line.currency_id, self.eur_currency)
self.assertEquals(bank_line.communication_type, 'normal')
self.assertEquals(bank_line.communication, 'Inv1242-Inv1248')
self.assertEquals(
bank_line.partner_bank_id, invoice1.partner_bank_id)
action = self.payment_order.open2generated()
self.assertEquals(self.payment_order.state, 'generated')
attachment = self.attachment_model.browse(action.get('attachment_id',
action['res_id']))
self.assertEquals(attachment.datas_fname[-4:], '.xml')
xml_file = attachment.datas.decode('base64')
xml_root = etree.fromstring(xml_file)
# print "xml_file=", etree.tostring(xml_root, pretty_print=True)
namespaces = xml_root.nsmap
namespaces['p'] = xml_root.nsmap[None]
namespaces.pop(None)
pay_method_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtMtd', namespaces=namespaces)
self.assertEquals(
namespaces['p'],
'http://www.six-interbank-clearing.com/de/'
'pain.001.001.03.ch.02.xsd')
self.assertEquals(pay_method_xpath[0].text, 'TRF')
sepa_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtTpInf/p:SvcLvl/p:Cd', namespaces=namespaces)
self.assertEquals(len(sepa_xpath), 0)
local_instrument_xpath = xml_root.xpath(
'//p:PmtInf/p:PmtTpInf/p:LclInstrm/p:Prtry', namespaces=namespaces)
self.assertEquals(len(local_instrument_xpath), 0)
debtor_acc_xpath = xml_root.xpath(
'//p:PmtInf/p:DbtrAcct/p:Id/p:IBAN', namespaces=namespaces)
self.assertEquals(
debtor_acc_xpath[0].text,
self.payment_order.company_partner_bank_id.sanitized_acc_number)
self.payment_order.generated2uploaded()
self.assertEquals(self.payment_order.state, 'uploaded')
for inv in [invoice1, invoice2]:
self.assertEquals(inv.state, 'paid')
return
def create_invoice(
self, partner_id, partner_bank_id, currency, price_unit,
ref_type, ref, type='in_invoice'):
invoice = self.invoice_model.create({
'partner_id': partner_id,
'reference_type': ref_type,
'reference': ref,
'currency_id': currency.id,
'name': 'test',
'account_id': self.account_payable.id,
'type': type,
'date_invoice': time.strftime('%Y-%m-%d'),
'payment_mode_id': self.payment_mode.id,
'partner_bank_id': partner_bank_id,
})
self.invoice_line_model.create({
'invoice_id': invoice.id,
'price_unit': price_unit,
'quantity': 1,
'name': 'Great service',
'account_id': self.account_expense.id,
})
invoice.invoice_validate()
invoice.action_move_create()
return invoice
|
CompassionCH/l10n-switzerland
|
l10n_ch_pain_credit_transfer/tests/test_ch_sct.py
|
Python
|
agpl-3.0
| 12,861
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-10-28 22:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0010_auto_20161026_2148'),
]
operations = [
migrations.AddField(
model_name='participationitem',
name='tags',
field=models.ManyToManyField(to='core.Tag'),
),
migrations.AddField(
model_name='userprofile',
name='tags',
field=models.ManyToManyField(to='core.Tag'),
),
]
|
better-dem/portal
|
core/migrations/0011_auto_20161028_2250.py
|
Python
|
agpl-3.0
| 620
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
import superdesk
from flask import current_app as app
from datetime import timedelta, timezone, datetime
from werkzeug.exceptions import HTTPException
from superdesk.notification import push_notification
from superdesk.activity import ACTIVITY_EVENT, notify_and_add_activity
from superdesk.io import providers
from superdesk.celery_app import celery
from superdesk.celery_task_utils import get_lock_id, get_host_id
from superdesk.utc import utcnow, get_expiry_date
from superdesk.workflow import set_default_state
from superdesk.errors import ProviderError
from superdesk.stats import stats
from superdesk.upload import url_for_media
from superdesk.media.media_operations import download_file_from_url, process_file
from superdesk.media.renditions import generate_renditions
from superdesk.io.iptc import subject_codes
from superdesk.metadata.item import GUID_NEWSML, GUID_FIELD, FAMILY_ID, ITEM_TYPE, CONTENT_TYPE
from superdesk.metadata.utils import generate_guid
from superdesk.lock import lock, unlock
UPDATE_SCHEDULE_DEFAULT = {'minutes': 5}
LAST_UPDATED = 'last_updated'
LAST_ITEM_UPDATE = 'last_item_update'
STATE_INGESTED = 'ingested'
IDLE_TIME_DEFAULT = {'hours': 0, 'minutes': 0}
logger = logging.getLogger(__name__)
superdesk.workflow_state(STATE_INGESTED)
superdesk.workflow_action(
name='ingest'
)
def is_valid_type(provider, provider_type_filter=None):
"""Test if given provider has valid type and should be updated.
:param provider: provider to be updated
:param provider_type_filter: active provider type filter
"""
provider_type = provider.get('type')
if provider_type not in providers:
return False
if provider_type_filter and provider_type != provider_type_filter:
return False
return True
def is_scheduled(provider):
"""Test if given provider should be scheduled for update.
:param provider: ingest provider
"""
now = utcnow()
last_updated = provider.get(LAST_UPDATED, now - timedelta(days=100)) # if never updated run now
update_schedule = provider.get('update_schedule', UPDATE_SCHEDULE_DEFAULT)
return last_updated + timedelta(**update_schedule) < now
def is_closed(provider):
"""Test if provider is closed.
:param provider: ingest provider
"""
return provider.get('is_closed', False)
def filter_expired_items(provider, items):
def is_not_expired(item):
if item.get('expiry') or item.get('versioncreated'):
expiry = item.get('expiry', item['versioncreated'] + delta)
if expiry.tzinfo:
return expiry > utcnow()
return False
try:
delta = timedelta(minutes=provider.get('content_expiry', app.config['INGEST_EXPIRY_MINUTES']))
return [item for item in items if is_not_expired(item)]
except Exception as ex:
raise ProviderError.providerFilterExpiredContentError(ex, provider)
def get_provider_rule_set(provider):
if provider.get('rule_set'):
return superdesk.get_resource_service('rule_sets').find_one(_id=provider['rule_set'], req=None)
def get_provider_routing_scheme(provider):
"""Returns the ingests provider's routing scheme configuration.
If provider has a routing scheme defined (i.e. scheme ID is not None), the
scheme is fetched from the database. If not, nothing is returned.
For all scheme rules that have a reference to a content filter defined,
that filter's configuration is fetched from the database as well and
embedded into the corresponding scheme rule.
:param dict provider: ingest provider configuration
:return: fetched provider's routing scheme configuration (if any)
:rtype: dict or None
"""
if not provider.get('routing_scheme'):
return None
schemes_service = superdesk.get_resource_service('routing_schemes')
filters_service = superdesk.get_resource_service('content_filters')
scheme = schemes_service.find_one(_id=provider['routing_scheme'], req=None)
# for those routing rules that have a content filter defined,
# get that filter from DB and embed it into the rule...
rules_filters = (
(rule, str(rule['filter']))
for rule in scheme['rules'] if rule.get('filter'))
for rule, filter_id in rules_filters:
content_filter = filters_service.find_one(_id=filter_id, req=None)
rule['filter'] = content_filter
return scheme
def get_task_ttl(provider):
update_schedule = provider.get('update_schedule', UPDATE_SCHEDULE_DEFAULT)
return update_schedule.get('minutes', 0) * 60 + update_schedule.get('hours', 0) * 3600
def get_is_idle(providor):
last_item = providor.get(LAST_ITEM_UPDATE)
idle_time = providor.get('idle_time', IDLE_TIME_DEFAULT)
if isinstance(idle_time['hours'], datetime):
idle_hours = 0
else:
idle_hours = idle_time['hours']
if isinstance(idle_time['minutes'], datetime):
idle_minutes = 0
else:
idle_minutes = idle_time['minutes']
# there is an update time and the idle time is none zero
if last_item and (idle_hours != 0 or idle_minutes != 0):
if utcnow() > last_item + timedelta(hours=idle_hours, minutes=idle_minutes):
return True
return False
def get_task_id(provider):
return 'update-ingest-{0}-{1}'.format(provider.get('name'), provider.get(superdesk.config.ID_FIELD))
def is_updatable(provider):
"""Test if given provider has service that can update it.
:param provider
"""
service = providers.get(provider.get('type'))
return hasattr(service, 'update')
class UpdateIngest(superdesk.Command):
"""Update ingest providers."""
option_list = (
superdesk.Option('--provider', '-p', dest='provider_type'),
)
def run(self, provider_type=None):
for provider in superdesk.get_resource_service('ingest_providers').get(req=None, lookup={}):
if (is_valid_type(provider, provider_type) and is_updatable(provider)
and is_scheduled(provider) and not is_closed(provider)):
kwargs = {
'provider': provider,
'rule_set': get_provider_rule_set(provider),
'routing_scheme': get_provider_routing_scheme(provider)
}
update_provider.apply_async(
expires=get_task_ttl(provider),
kwargs=kwargs)
@celery.task(soft_time_limit=1800, bind=True)
def update_provider(self, provider, rule_set=None, routing_scheme=None):
"""
Fetches items from ingest provider as per the configuration, ingests them into Superdesk and
updates the provider.
"""
if provider.get('type') == 'search':
return
if not is_updatable(provider):
return
lock_name = get_lock_id('ingest', provider['name'], provider[superdesk.config.ID_FIELD])
host_name = get_host_id(self)
if not lock(lock_name, host_name, expire=1800):
return
try:
update = {
LAST_UPDATED: utcnow()
}
for items in providers[provider.get('type')].update(provider):
ingest_items(items, provider, rule_set, routing_scheme)
stats.incr('ingest.ingested_items', len(items))
if items:
update[LAST_ITEM_UPDATE] = utcnow()
ingest_service = superdesk.get_resource_service('ingest_providers')
ingest_service.system_update(provider[superdesk.config.ID_FIELD], update, provider)
if LAST_ITEM_UPDATE not in update and get_is_idle(provider):
notify_and_add_activity(
ACTIVITY_EVENT,
'Provider {{name}} has gone strangely quiet. Last activity was on {{last}}',
resource='ingest_providers',
user_list=ingest_service._get_administrators(),
name=provider.get('name'),
last=provider[LAST_ITEM_UPDATE].replace(tzinfo=timezone.utc).astimezone(tz=None).strftime("%c"))
logger.info('Provider {0} updated'.format(provider[superdesk.config.ID_FIELD]))
# Only push a notification if there has been an update
if LAST_ITEM_UPDATE in update:
push_notification('ingest:update', provider_id=str(provider[superdesk.config.ID_FIELD]))
finally:
unlock(lock_name, host_name)
def process_anpa_category(item, provider):
try:
anpa_categories = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories')
if anpa_categories:
for item_category in item['anpa_category']:
for anpa_category in anpa_categories['items']:
if anpa_category['is_active'] is True \
and item_category['qcode'].lower() == anpa_category['qcode'].lower():
item_category['name'] = anpa_category['name']
# make the case of the qcode match what we hold in our dictionary
item_category['qcode'] = anpa_category['qcode']
break
except Exception as ex:
raise ProviderError.anpaError(ex, provider)
def derive_category(item, provider):
"""
Assuming that the item has at least one itpc subject use the vocabulary map to derive an anpa category
:param item:
:return: An item with a category if possible
"""
try:
categories = []
subject_map = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='iptc_category_map')
if subject_map:
for entry in (map_entry for map_entry in subject_map['items'] if map_entry['is_active']):
for subject in item.get('subject', []):
if subject['qcode'] == entry['subject']:
if not any(c['qcode'] == entry['category'] for c in categories):
categories.append({'qcode': entry['category']})
if len(categories):
item['anpa_category'] = categories
process_anpa_category(item, provider)
except Exception as ex:
logger.exception(ex)
def process_iptc_codes(item, provider):
"""
Ensures that the higher level IPTC codes are present by inserting them if missing, for example
if given 15039001 (Formula One) make sure that 15039000 (motor racing) and 15000000 (sport) are there as well
:param item: A story item
:return: A story item with possible expanded subjects
"""
try:
def iptc_already_exists(code):
for entry in item['subject']:
if 'qcode' in entry and code == entry['qcode']:
return True
return False
for subject in item['subject']:
if 'qcode' in subject and len(subject['qcode']) == 8:
top_qcode = subject['qcode'][:2] + '000000'
if not iptc_already_exists(top_qcode):
item['subject'].append({'qcode': top_qcode, 'name': subject_codes[top_qcode]})
mid_qcode = subject['qcode'][:5] + '000'
if not iptc_already_exists(mid_qcode):
item['subject'].append({'qcode': mid_qcode, 'name': subject_codes[mid_qcode]})
except Exception as ex:
raise ProviderError.iptcError(ex, provider)
def derive_subject(item):
"""
Assuming that the item has an anpa category try to derive a subject using the anpa category vocabulary
:param item:
:return:
"""
try:
category_map = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories')
if category_map:
for cat in item['anpa_category']:
map_entry = next(
(code for code in category_map['items'] if code['qcode'] == cat['qcode'] and code['is_active']),
None)
if map_entry and 'subject' in map_entry:
item['subject'] = [
{'qcode': map_entry.get('subject'), 'name': subject_codes[map_entry.get('subject')]}]
except Exception as ex:
logger.exception(ex)
def apply_rule_set(item, provider, rule_set=None):
"""
Applies rules set on the item to be ingested into the system. If there's no rule set then the item will
be returned without any change.
:param item: Item to be ingested
:param provider: provider object from whom the item was received
:return: item
"""
try:
if rule_set is None and provider.get('rule_set') is not None:
rule_set = superdesk.get_resource_service('rule_sets').find_one(_id=provider['rule_set'], req=None)
if rule_set and 'body_html' in item:
body = item['body_html']
for rule in rule_set['rules']:
body = body.replace(rule['old'], rule['new'])
item['body_html'] = body
return item
except Exception as ex:
raise ProviderError.ruleError(ex, provider)
def ingest_items(items, provider, rule_set=None, routing_scheme=None):
all_items = filter_expired_items(provider, items)
items_dict = {doc[GUID_FIELD]: doc for doc in all_items}
items_in_package = []
failed_items = set()
for item in [doc for doc in all_items if doc.get(ITEM_TYPE) == CONTENT_TYPE.COMPOSITE]:
items_in_package = [ref['residRef'] for group in item.get('groups', [])
for ref in group.get('refs', []) if 'residRef' in ref]
for item in [doc for doc in all_items if doc.get(ITEM_TYPE) != CONTENT_TYPE.COMPOSITE]:
ingested = ingest_item(item, provider, rule_set,
routing_scheme=routing_scheme if not item[GUID_FIELD] in items_in_package else None)
if not ingested:
failed_items.add(item[GUID_FIELD])
for item in [doc for doc in all_items if doc.get(ITEM_TYPE) == CONTENT_TYPE.COMPOSITE]:
for ref in [ref for group in item.get('groups', [])
for ref in group.get('refs', []) if 'residRef' in ref]:
if ref['residRef'] in failed_items:
failed_items.add(item[GUID_FIELD])
continue
ref.setdefault('location', 'ingest')
itemRendition = items_dict.get(ref['residRef'], {}).get('renditions')
if itemRendition:
ref.setdefault('renditions', itemRendition)
ref[GUID_FIELD] = ref['residRef']
if items_dict.get(ref['residRef']):
ref['residRef'] = items_dict.get(ref['residRef'], {}).get(superdesk.config.ID_FIELD)
if item[GUID_FIELD] in failed_items:
continue
ingested = ingest_item(item, provider, rule_set, routing_scheme)
if not ingested:
failed_items.add(item[GUID_FIELD])
app.data._search_backend('ingest').bulk_insert('ingest', [item for item in all_items
if item[GUID_FIELD] not in failed_items])
if failed_items:
logger.error('Failed to ingest the following items: %s', failed_items)
return failed_items
def ingest_item(item, provider, rule_set=None, routing_scheme=None):
try:
item.setdefault(superdesk.config.ID_FIELD, generate_guid(type=GUID_NEWSML))
item[FAMILY_ID] = item[superdesk.config.ID_FIELD]
providers[provider.get('type')].provider = provider
item['ingest_provider'] = str(provider[superdesk.config.ID_FIELD])
item.setdefault('source', provider.get('source', ''))
set_default_state(item, STATE_INGESTED)
item['expiry'] = get_expiry_date(provider.get('content_expiry', app.config['INGEST_EXPIRY_MINUTES']),
item.get('versioncreated'))
if 'anpa_category' in item:
process_anpa_category(item, provider)
if 'subject' in item:
process_iptc_codes(item, provider)
if 'anpa_category' not in item:
derive_category(item, provider)
elif 'anpa_category' in item:
derive_subject(item)
apply_rule_set(item, provider, rule_set)
ingest_service = superdesk.get_resource_service('ingest')
if item.get('ingest_provider_sequence') is None:
ingest_service.set_ingest_provider_sequence(item, provider)
old_item = ingest_service.find_one(guid=item[GUID_FIELD], req=None)
rend = item.get('renditions', {})
if rend:
baseImageRend = rend.get('baseImage') or next(iter(rend.values()))
if baseImageRend:
href = providers[provider.get('type')].prepare_href(baseImageRend['href'])
update_renditions(item, href, old_item)
new_version = True
if old_item:
# In case we already have the item, preserve the _id
item[superdesk.config.ID_FIELD] = old_item[superdesk.config.ID_FIELD]
ingest_service.put_in_mongo(item[superdesk.config.ID_FIELD], item)
# if the feed is versioned and this is not a new version
if 'version' in item and 'version' in old_item and item.get('version') == old_item.get('version'):
new_version = False
else:
try:
ingest_service.post_in_mongo([item])
except HTTPException as e:
logger.error("Exception while persisting item in ingest collection", e)
if routing_scheme and new_version:
routed = ingest_service.find_one(_id=item[superdesk.config.ID_FIELD], req=None)
superdesk.get_resource_service('routing_schemes').apply_routing_scheme(routed, provider, routing_scheme)
except Exception as ex:
logger.exception(ex)
try:
superdesk.app.sentry.captureException()
except:
pass
return False
return True
def update_renditions(item, href, old_item):
"""
If the old_item has renditions uploaded in to media then the old rendition details are
assigned to the item, this avoids repeatedly downloading the same image and leaving the media entries orphaned.
If there is no old_item the original is downloaded and renditions are
generated.
:param item: parsed item from source
:param href: reference to original
:param old_item: the item that we have already injested, if it exists
:return: item with renditions
"""
inserted = []
try:
# If there is an existing set of renditions we keep those
if old_item:
media = old_item.get('renditions', {}).get('original', {}).get('media', {})
if media:
item['renditions'] = old_item['renditions']
item['mimetype'] = old_item.get('mimetype')
item['filemeta'] = old_item.get('filemeta')
return
content, filename, content_type = download_file_from_url(href)
file_type, ext = content_type.split('/')
metadata = process_file(content, file_type)
file_guid = app.media.put(content, filename, content_type, metadata)
inserted.append(file_guid)
rendition_spec = app.config.get('RENDITIONS', {}).get('picture', {})
renditions = generate_renditions(content, file_guid, inserted, file_type,
content_type, rendition_spec, url_for_media)
item['renditions'] = renditions
item['mimetype'] = content_type
item['filemeta'] = metadata
except Exception:
for file_id in inserted:
app.media.delete(file_id)
raise
superdesk.command('ingest:update', UpdateIngest())
|
plamut/superdesk-core
|
superdesk/io/commands/update_ingest.py
|
Python
|
agpl-3.0
| 20,004
|
# This file is part of Bika LIMS
#
# Copyright 2011-2016 by it's authors.
# Some rights reserved. See LICENSE.txt, AUTHORS.txt.
from AccessControl import getSecurityManager
from Products.CMFPlone.utils import safe_unicode
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.browser.bika_listing import BikaListingView
from bika.lims.content.analysisrequest import schema as AnalysisRequestSchema
from bika.lims.permissions import *
from bika.lims.utils import to_utf8
from bika.lims.workflow import doActionFor
from DateTime import DateTime
from Products.Archetypes import PloneMessageFactory as PMF
from Products.CMFCore.utils import getToolByName
import plone
class AnalysisRequestPublishedResults(BikaListingView):
""" View of published results
Prints the list of pdf files with each publication dates, the user
responsible of that publication, the emails of the addressees (and/or)
client contact names with the publication mode used (pdf, email, etc.)
"""
# I took IViewView away, because transitions selected in the edit-bar
# cause errors due to wrong context, when invoked from this view, and I
# don't know why.
# implements(IViewView)
def __init__(self, context, request):
super(AnalysisRequestPublishedResults, self).__init__(context, request)
self.catalog = "bika_catalog"
self.contentFilter = {'portal_type': 'ARReport',
'sort_order': 'reverse'}
self.context_actions = {}
self.show_select_column = True
self.show_workflow_action_buttons = False
self.form_id = 'published_results'
self.icon = self.portal_url + "/++resource++bika.lims.images/report_big.png"
self.title = self.context.translate(_("Published results"))
self.columns = {
'Title': {'title': _('File')},
'FileSize': {'title': _('Size')},
'Date': {'title': _('Date')},
'PublishedBy': {'title': _('Published By')},
'Recipients': {'title': _('Recipients')},
}
self.review_states = [
{'id': 'default',
'title': 'All',
'contentFilter': {},
'columns': ['Title',
'FileSize',
'Date',
'PublishedBy',
'Recipients']},
]
def __call__(self):
ar = self.context
workflow = getToolByName(ar, 'portal_workflow')
# If is a retracted AR, show the link to child AR and show a warn msg
if workflow.getInfoFor(ar, 'review_state') == 'invalid':
childar = hasattr(ar, 'getChildAnalysisRequest') \
and ar.getChildAnalysisRequest() or None
childid = childar and childar.getRequestID() or None
message = _('This Analysis Request has been withdrawn and is '
'shown for trace-ability purposes only. Retest: '
'${retest_child_id}.',
mapping={'retest_child_id': safe_unicode(childid) or ''})
self.context.plone_utils.addPortalMessage(
self.context.translate(message), 'warning')
# If is an AR automatically generated due to a Retraction, show it's
# parent AR information
if hasattr(ar, 'getParentAnalysisRequest') \
and ar.getParentAnalysisRequest():
par = ar.getParentAnalysisRequest()
message = _('This Analysis Request has been '
'generated automatically due to '
'the retraction of the Analysis '
'Request ${retracted_request_id}.',
mapping={'retracted_request_id': par.getRequestID()})
self.context.plone_utils.addPortalMessage(
self.context.translate(message), 'info')
template = BikaListingView.__call__(self)
return template
def contentsMethod(self, contentFilter):
"""
ARReport objects associated to the current Analysis request.
If the user is not a Manager or LabManager or Client, no items are
displayed.
"""
allowedroles = ['Manager', 'LabManager', 'Client', 'LabClerk']
pm = getToolByName(self.context, "portal_membership")
member = pm.getAuthenticatedMember()
roles = member.getRoles()
allowed = [a for a in allowedroles if a in roles]
return self.context.objectValues('ARReport') if allowed else []
def folderitem(self, obj, item, index):
obj_url = obj.absolute_url()
pdf = obj.getPdf()
filesize = 0
title = _('Download')
anchor = "<a href='%s/at_download/Pdf'>%s</a>" % \
(obj_url, _("Download"))
try:
filesize = pdf.get_size()
filesize = filesize / 1024 if filesize > 0 else 0
except:
# POSKeyError: 'No blob file'
# Show the record, but not the link
title = _('Not available')
anchor = title
item['Title'] = title
item['FileSize'] = '%sKb' % filesize
fmt_date = self.ulocalized_time(obj.created(), long_format=1)
item['Date'] = fmt_date
item['PublishedBy'] = self.user_fullname(obj.Creator())
recip = []
for recipient in obj.getRecipients():
email = recipient['EmailAddress']
val = recipient['Fullname']
if email:
val = "<a href='mailto:%s'>%s</a>" % (email, val)
recip.append(val)
item['replace']['Recipients'] = ', '.join(recip)
item['replace']['Title'] = anchor
return item
|
rockfruit/bika.lims
|
bika/lims/browser/analysisrequest/published_results.py
|
Python
|
agpl-3.0
| 5,766
|
# -*- coding: utf-8; -*-
"""Test suite for `parsers.rescue_bag` subpackage."""
import json
from pathlib import Path
from django.test import TestCase
from django.core.management import call_command
from django.conf import settings
from django.db.models.fields.files import ImageFieldFile
from cerberus import Validator, TypeDefinition
from pharmaship.core.utils import log
from pharmaship.gui.view import GlobalParameters
from pharmaship.inventory import models
from pharmaship.inventory.parsers import rescue_bag
class ParserMethodTestCase(TestCase):
"""Tests for `inventory.parsers.rescue_bag` methods."""
def setUp(self): # noqa: D102
self.assets = Path(settings.BASE_DIR) / "tests/inventory/assets"
# call_command("loaddata", self.assets / "test.dump.yaml")
call_command(
"loaddata",
self.assets / "parsers" / "rescue_bag.yaml"
)
self.params = GlobalParameters()
def test_get_required(self):
output = rescue_bag.get_required(self.params)
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"get_required.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
result = validator.validate(output)
if not result:
log.error(validator.errors)
log.debug(output)
self.assertTrue(result)
def test_create_molecule(self):
required = rescue_bag.get_required(self.params)
molecule = models.Molecule.objects.get(id=3)
output = rescue_bag.create_molecule(molecule, required["molecules"])
image_field = TypeDefinition(
name='image_field',
included_types=(ImageFieldFile,),
excluded_types=()
)
Validator.types_mapping['image_field'] = image_field
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"single_item.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
result = validator.validate(output)
if not result:
log.error(validator.errors)
log.debug(output)
self.assertTrue(result)
def test_create_equipment(self):
required = rescue_bag.get_required(self.params)
equipment = models.Equipment.objects.get(id=2)
output = rescue_bag.create_equipment(equipment, required["equipments"])
image_field = TypeDefinition(
name='image_field',
included_types=(ImageFieldFile,),
excluded_types=()
)
Validator.types_mapping['image_field'] = image_field
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"single_item.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
result = validator.validate(output)
if not result:
log.error(validator.errors)
log.debug(output)
self.assertTrue(result)
def test_create_molecules(self):
required = rescue_bag.get_required(self.params)
output = rescue_bag.create_molecules(
required["molecules"].keys(),
required["molecules"]
)
image_field = TypeDefinition(
name='image_field',
included_types=(ImageFieldFile,),
excluded_types=()
)
Validator.types_mapping['image_field'] = image_field
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"single_item.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
for item in output:
result = validator.validate(output[item])
if not result:
log.error(validator.errors)
log.debug(output[item])
self.assertTrue(result)
schema = {
"data": {
"type": "dict",
"keysrules": {
"type": "integer"
}
}
}
validator = Validator(schema)
self.assertTrue(validator.validate({"data": output}))
def test_create_equipments(self):
required = rescue_bag.get_required(self.params)
output = rescue_bag.create_equipments(
required["equipments"].keys(),
required["equipments"]
)
image_field = TypeDefinition(
name='image_field',
included_types=(ImageFieldFile,),
excluded_types=()
)
Validator.types_mapping['image_field'] = image_field
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"single_item.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
for item in output:
result = validator.validate(output[item])
if not result:
log.error(validator.errors)
log.debug(output[item])
self.assertTrue(result)
schema = {
"data": {
"type": "dict",
"keysrules": {
"type": "integer"
}
}
}
validator = Validator(schema)
self.assertTrue(validator.validate({"data": output}))
def test_get_transactions(self):
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"get_transactions.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
# Test for medicines
content_type = self.params.content_types["medicine"]
items = models.Medicine.objects.filter(used=False).values_list("id", flat=True)
output = rescue_bag.get_transactions(content_type, items)
result = validator.validate({"data": output})
if not result:
log.error(validator.errors)
log.debug(output)
self.assertTrue(result)
# Test for articles
content_type = self.params.content_types["article"]
items = models.Article.objects.filter(used=False).values_list("id", flat=True)
output = rescue_bag.get_transactions(content_type, items)
result = validator.validate({"data": output})
if not result:
log.error(validator.errors)
log.debug(output)
self.assertTrue(result)
def test_get_medicines(self):
required = rescue_bag.get_required(self.params)
output = rescue_bag.get_medicines(
self.params,
required["molecules"],
[100,]
)
image_field = TypeDefinition(
name='image_field',
included_types=(ImageFieldFile,),
excluded_types=()
)
Validator.types_mapping['image_field'] = image_field
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"single_item.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
for item in output:
result = validator.validate(output[item])
if not result:
log.error(validator.errors)
log.debug(output[item])
self.assertTrue(result)
schema = {
"data": {
"type": "dict",
"keysrules": {
"type": "integer"
}
}
}
validator = Validator(schema)
self.assertTrue(validator.validate({"data": output}))
def test_get_articles(self):
required = rescue_bag.get_required(self.params)
output = rescue_bag.get_articles(
self.params,
required["equipments"],
[100,]
)
image_field = TypeDefinition(
name='image_field',
included_types=(ImageFieldFile,),
excluded_types=()
)
Validator.types_mapping['image_field'] = image_field
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"single_item.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
for item in output:
result = validator.validate(output[item])
if not result:
log.error(validator.errors)
log.debug(output[item])
self.assertTrue(result)
schema = {
"data": {
"type": "dict",
"keysrules": {
"type": "integer"
}
}
}
validator = Validator(schema)
self.assertTrue(validator.validate({"data": output}))
def test_merge_bags(self):
required = rescue_bag.get_required(self.params)
equipments = rescue_bag.get_articles(
self.params,
required["equipments"],
[110, 111]
)
molecules = rescue_bag.get_medicines(
self.params,
required["molecules"],
[110, 111]
)
bags = models.RescueBag.objects.all()
output = rescue_bag.merge_bags(bags, molecules, equipments)
image_field = TypeDefinition(
name='image_field',
included_types=(ImageFieldFile,),
excluded_types=()
)
Validator.types_mapping['image_field'] = image_field
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"merged_bags.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
result = validator.validate({"data": output})
if not result:
log.error(validator.errors)
log.debug(output)
self.assertTrue(result)
def test_parser(self):
output = rescue_bag.parser(self.params)
image_field = TypeDefinition(
name='image_field',
included_types=(ImageFieldFile,),
excluded_types=()
)
Validator.types_mapping['image_field'] = image_field
schema_path = settings.VALIDATOR_PATH.joinpath(
"parsers",
"rescue_bag",
"rescue_bag.json"
)
schema = json.loads(schema_path.read_text())
validator = Validator(schema)
result = validator.validate({"data": output})
if not result:
log.error(validator.errors)
log.debug(output)
self.assertTrue(result)
|
tuxite/pharmaship
|
pharmaship/tests/inventory/test_parsers_rescue_bag.py
|
Python
|
agpl-3.0
| 10,995
|
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
from zope.interface import Interface
from flumotion.transcoder import errors
class DataSourceError(errors.TranscoderError):
def __init__(self, *args, **kwargs):
errors.TranscoderError.__init__(self, *args, **kwargs)
class InitializationError(DataSourceError):
def __init__(self, *args, **kwargs):
DataSourceError.__init__(self, *args, **kwargs)
class StoringError(DataSourceError):
def __init__(self, *args, **kwargs):
DataSourceError.__init__(self, *args, **kwargs)
class DeletionError(DataSourceError):
def __init__(self, *args, **kwargs):
DataSourceError.__init__(self, *args, **kwargs)
class ResetError(DataSourceError):
def __init__(self, *args, **kwargs):
DataSourceError.__init__(self, *args, **kwargs)
class RetrievalError(DataSourceError):
def __init__(self, *args, **kwargs):
DataSourceError.__init__(self, *args, **kwargs)
class DataNotFoundError(RetrievalError):
def __init__(self, *args, **kwargs):
RetrievalError.__init__(self, *args, **kwargs)
class ReadOnlyDataError(StoringError):
def __init__(self, *args, **kwargs):
StoringError.__init__(self, *args, **kwargs)
class DuplicatedDataError(StoringError):
def __init__(self, *args, **kwargs):
StoringError.__init__(self, *args, **kwargs)
class DataDependencyError(StoringError):
def __init__(self, *args, **kwargs):
StoringError.__init__(self, *args, **kwargs)
class IDataSource(Interface):
"""
The data source allow the retrieval, creation, insertion and deletion
of "container" objects in an abstract source.
The given container are unspecified, apart for there fields,
the equality operator, and an identifier field that uniquely
and persistently identify a "record" and that is None when not stored.
The equality operator compare if the objects represent the same
element in the source, not that they have the same field values.
Two new element that has not been stored in the source are never equal.
If an element is retrieved and modified more than one time before
storing them, all modification but the last stored one are lost
without warning. THERE IS NO CONCURENT MODIFICATION PROTECTION.
"""
def initialize(self):
"""
Return a deferred.
Initialize the data source.
"""
def store(self, *data):
"""
Returns a deferred.
Store all the specified container objectes.
The objects must have been created by the store.
All the objecte are stored atomically if the
store support it.
"""
def reset(self, *data):
"""
Returns a deferred.
Reset the values of the specified container objects
to there original value from the data source.
If a specified container was never stored,
its values are not changed.
"""
def delete(self, *data):
"""
Return a deferred.
Delete all the specified container objectes.
The objects must have been created by the store.
All the objecte are deleted atomically if the
store support it.
Deletion is not an operation that could be
reversed by calling reset.
"""
class IReportsSource(IDataSource):
"""
The reports source holds the results of transcoding activities.
"""
def newTranscodeReport(self):
"""
Createas a new transcoding report container object.
"""
class IInformationSource(IDataSource):
"""
The information source holds all the information that are not held
by the reports source. This includes customers, profiles,
notifications, etc.
"""
def waitReady(self, timeout=None):
"""
Returns a deferred that is called when the source
is ready to provide data, if the source fail to initialize
or if the specified timeout is reached.
"""
def retrieveDefaults(self):
"""
Returns a deferred.
The result on success is a "container" object
with the following fields:
outputMediaTemplate (str) can be None
outputThumbTemplate (str) can be None
linkFileTemplate (str) can be None
configFileTemplate (str) can be None
reportFileTemplate (str) can be None
accessForceGroup (str) can be None
accessForceUser (str) can be None
accessForceDirMode (int) can be None
accessForceFileMode (int) can be None
monitoringPeriod (int) can be None:
Gives the default period used to monitor the filesystem.
processPriority (int) can be None:
Gives the default process priority for the transcoding job
transcodingPriority (int) can be None:
Gives the default scheduler priority of the transcoding jobs.
transcodingTimeout (int) can be None:
Gives the default timeout of the transcoding jobs.
postprocessTimeout (int) can be None:
Gives the default timeout of the post-processing.
preprocessTimeout (int) can be None:
Gives the default timeout of the pre-processing.
mailSubjectTemplate (str) can be None:
Gives the default template for the mail notifications subject.
mailBodyTemplate (str) can be None:
Gives the default template for the mail notifications body.
mailTimeout (int) can be None:
Gives the default timeout for mail notifications.
mailRetryCount (int) can be None:
Gives the default retry count for mail notifications.
mailRetrySleep (int) can be None:
Gives the default time between retry for mail notifications.
HTTPRequestTimeout (int) can be None:
Gives the default timeout for HTTP request notifications.
HTTPRequestRetryCount (int) can be None:
Gives the default retry count for HTTP request notifications.
HTTPRequestRetrySleep (int) can be None:
Gives the default time between retry
for HTTP request notifications.
sqlTimeout (int) can be None:
Gives the default timeout for sql notifications.
sqlRetryCount (int) can be None:
Gives the default retry count for sql notifications.
sqlRetrySleep (int) can be None:
Gives the default time between retry for sql notifications.
"""
def retrieveCustomers(self):
"""
Returns a deferred.
The result on success is a list of "container" objects
with the following fields:
name (str) : The customer name used by the transcoder.
subdir (str) can be None : The sub-directory where the transcoder
root is. If not specified, it will be deduced from the customer name.
Overriding fields:
inputDir (str) can be None
outputDir (str) can be None
failedDir (str) can be None
doneDir (str) can be None
linkDir (str) can be None
workDir (str) can be None
configDir (str) can be None
tempRepDir (str) can be None
failedRepDir (str) can be None
doneRepDir (str) can be None
outputMediaTemplate (str)
outputThumbTemplate (str)
linkFileTemplate (str)
configFileTemplate (str)
reportFileTemplate (str)
linkTemplate (str) can be None
linkURLPrefix (str) can be None
enablePostprocessing (bool) can be None
enablePreprocessing (bool) can be None
enableLinkFiles (bool) can be None
customerPriority (int) can be None
transcodingPriority (int) can be None
processPriority (int) can be None
preprocessCommand (str) can be None
postprocessCommand (str) can be None
preprocesstimeout (int) can be None
postprocessTimeout (int) can be None
transcodingTimeout (int) can be None
monitoringPeriod (int) can be None
accessForceGroup (str) can be None
accessForceUser (str) can be None
accessForceDirMode (int) can be None
accessForceFileMode (int) can be None
"""
def retrieveCustomerInfo(self, custData):
"""
Returns a deferred.
The result on success is a "container" objects
with the following READ ONLY fields:
name (str) can be None
contact (str) can be None
addresses (str[]) maximum size of 3, can be empty
phone (str) can be None
email (str) can be None
"""
def retrieveProfiles(self, custData):
"""
Returns a deferred.
The result on success is a list of "container" objects
with the following fields:
name (str)
subdir (str) can be None
Overriding fields:
inputDir (str) can be None
outputDir (str) can be None
failedDir (str) can be None
doneDir (str) can be None
linkDir (str) can be None
workDir (str) can be None
configDir (str) can be None
tempRepDir (str) can be None
failedRepDir (str) can be None
doneRepDir (str) can be None
outputMediaTemplate (str) can be None
outputThumbTemplate (str) can be None
linkFileTemplate (str) can be None
configFileTemplate (str) can be None
reportFileTemplate (str) can be None
linkTemplate (str) can be None
linkURLPrefix (str) can be None
enablePostprocessing (bool) can be None
enablePreprocessing (bool) can be None
enableLinkFiles (bool) can be None
transcodingPriority (int) can be None
processPriority (int) can be None
preprocessCommand (str) can be None
postprocessCommand (str) can be None
preprocesstimeout (int) can be None
postprocessTimeout (int) can be None
transcodingTimeout (int) can be None
monitoringPeriod (int) can be None
"""
def retrieveTargets(self, profData):
"""
Returns a deferred.
The result on success is a list of "container" objects
with the following fields:
name (str)
extension (str)
subdir (str) can be None
Overriding fields:
linkTemplate (str) can be None
linkURLPrefix (str) can be None
outputDir (str) can be None
linkDir (str) can be None
workDir (str) can be None
outputFileTemplate (str) can be None
linkFileTemplate (str) can be None
enablePostprocessing (bool) can be None
enableLinkFiles (bool) can be None
postprocessCommand (str) can be None
postprocessTimeout (int) can be None
"""
def retrieveTargetConfig(self, targData):
"""
Returns a deferred.
The result on success is a "container" objects
that depend of the target type.
For all:
type (TargetTypeEnum)
For an Audio and Audio/Video targets, it has the following fields:
muxer (str)
audioEncoder (str)
audioResampler (str)
audioRate (str)
audioChannels (str)
For a video and Audio/Video targets, it has the following fields:
muxer (str)
videoEncoder (str)
videoWidth (int)
videoHeight (int)
videoMaxWidth (int)
videoMaxHeight (int)
videoWidthMultiple (int)
videoHeightMultiple (int)
videoPAR (int[2])
videoFramerate (int[2])
videoScaleMethod (VideoScaleMethodEnum)
For Audio/Video targets, it has the following additional fields:
tolerance (AudioVideoToleranceEnum)
For a Thumbnails targets, it has the following fields:
thumbsWidth (int)
thumbsHeight (int)
periodValue (int)
periodUnit (PeriodUnitEnum)
maxCount (int)
format (ThumbOutputTypeEnum)
ensureOne (bool)
"""
def retrieveGlobalNotifications(self):
"""
Returns a deferred.
The returned list contains all global notifications.
The result on success is a list of "container" objects
with the following fields depending on the notification type:
For all:
type (NotificationTypeEnum)
triggers (set of NotificationTriggerEnum)
timeout (int) can be None
retryMax (int) can be None
retrySleep (int) can be None
For type == NotificationTypeEnum.email:
subjectTemplate (str) can be None
bodyTemplate (str) can be None
attachments (set of DocumentTypeEnum)
recipients dict with MailAddressTypeEnum as keys
of list of tuple with (name, email)
where name can be None
For type == NotificationTypeEnum.http_request:
urlTemplate (str)
For type == NotificationTypeEnum.sql:
databaseURI (str)
sqlTemplate (str)
"""
def retrieveCustomerNotifications(self, custData):
"""
Returns a deferred.
The returned list contains all customers' notifications.
See retrieveGlobalNotifications for result specifications.
"""
def retrieveProfileNotifications(self, profData):
"""
Returns a deferred.
The returned list contains all profiles' notifications.
See retrieveGlobalNotifications for result specifications.
"""
def retrieveTargetNotifications(self, targData):
"""
Returns a deferred.
The returned list contains all targets' notifications.
See retrieveGlobalNotifications for result specifications.
"""
def retrieveActivities(self, type, states=None):
"""
Returns a deferred.
The result on success is a list of the activities
with the specified type and state in the specified
list states (if not None or empty)
as "container" objects with the following fields:
type (ActivityTypeEnum)
subtype (TranscodingTypeEnum or NotificationTypeEnum)
state (ActivityStateEnum)
startTime (datetime)
lastTime (dateTime)
customerIdentifier (str)
profileIdentifier (str)
targetIdentifier (str)
For type == transcoding, reference is a data container:
inputRelPath (str)
For type == notification:
trigger (NotificationTriggerEnum)
timeout (int)
retryCount (int)
retryMax (int)
retrySleep (int)
data (dict)
"""
def newActivity(self, type, subtype):
"""
Creates a new activity container of a specified type and subtype.
"""
def newCustomer(self, custId):
"""
Creates a new customer container.
It's not added to the store, it should be
filled and then the store method should be call.
"""
def newProfile(self, custData):
"""
Creates a new profile container for the specified customer.
It's not added to the store, it should be
filled and then the store method should be call.
"""
def newNotification(self, type, data):
"""
Creates a new notification container
of the specified type (NotificationTypeEnum).
The specified data must be customer data,
profile data, target data or None.
None: apply to all customers transcoding
Customer data: apply to all profiles transcoding
of the specified customer
Profile data: apply to a specific customer's
profile transcoding
Target data: apply to a specific target of a profile
It's not added to the store, it should be
filled and then the store method should be call.
"""
def newTarget(self, profData):
"""
Creates a new target container object.
"""
def newTargetConfig(self, targData):
"""
Creates a new target config container object.
"""
def newReport(self, profData):
"""
Creates a new report container object.
"""
def newTargetReport(self, repData):
"""
Creates a new target report container object.
"""
def newNotificationReport(self, repData, notifData):
"""
Creates a new notification report container object.
"""
|
osiloke/Flumotion-Transcoder
|
flumotion/transcoder/admin/datasource/datasource.py
|
Python
|
lgpl-2.1
| 17,877
|
# -*- coding: utf-8 -*-
#
# OpenLMI Storage Provider documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 4 10:22:18 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../providers'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.viewcode']
plantuml_output_format='svg'
plantuml_latex_output_format='pdf'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenLMI Storage Provider'
copyright = u'2012-2013, Red Hat Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5.1'
# The full version, including alpha/beta/rc tags.
release = '0.5.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openlmitheme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenLMIStorageProviderdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'OpenLMIStorageProvider.tex', u'OpenLMI Storage Provider Documentation',
u'Jan Safranek', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openlmistorageprovider', u'OpenLMI Storage Provider Documentation',
[u'Jan Safranek'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OpenLMIStorageProvider', u'OpenLMI Storage Provider Documentation',
u'Jan Safranek', 'OpenLMIStorageProvider', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
jsafrane/openlmi-storage
|
doc/admin/conf.py
|
Python
|
lgpl-2.1
| 8,036
|
#!/usr/bin/env python
import os
import codecs
class TorqueAcct(object):
""" Process accounting files from torque """
def __init__(self, acct_file, host_name_ext):
self.ncpus = 0
self.nodes = 0
self.fieldmapping = {
"account": ["account", str],
"queue": ["partition", str],
"session": ["session", str],
"owner": ["username", str],
"group": ["group", str],
"exec_host": ["host_list", self.parsehostlist],
"jobname": ["job_name", str],
"user": ["user", str],
"Exit_status": ["status", int],
"Error_Path": ["error_path", str],
"Output_Path": ["output_path", str],
"ctime": ["submit", int],
"etime": ["eligible_time", int],
"qtime": ["queue_time", int],
"start": ["start_time", int],
"end": ["end_time", int],
"Resource_List.ncpus": ["requested_cpus", int],
"Resource_List.walltime": ["requested_walltime", str],
"Resource_List.nodect": ["requested_node", int],
"Resource_List.nodes": ["requested_nodelist", str],
"Resource_List.procs": ["requested_tasks", str],
"Resource_List.host": ["requested_host", str],
"Resource_List.tpn": ["requested_taskspernode", str],
"Resource_List.neednodes": ["requested_neednodes", str],
"Resource_List.mem": ["requested_memory", str],
"Resource_List.pmem": ["requested_vmemory", str],
"Resource_List.cput": ["requested_cpu_time", str],
"Resource_List.pvmem": ["requested_pvmem", str],
"Resource_List.vmem": ["requested_vmem", str],
"resources_used.cput": ["cpu_time", str],
"resources_used.mem": ["mem_used", str],
"resources_used.vmem": ["vmem_used", str],
"resources_used.walltime": ["wall_time", str]
}
self.batch_kind = 'TORQUE'
self.acct_file = acct_file
if len(host_name_ext) > 0:
self.name_ext = '.'+host_name_ext
else:
self.name_ext = ""
def get_host_list_path(self,acct,host_list_dir):
return None
def reader(self,start_time=0, end_time=9223372036854775807L, seek=0):
""" The file format of the Torque logs is sufficently different from the
others to warrant its own reader implmentation
"""
filelist = []
if os.path.isdir(self.acct_file):
for dir_name, subdir_list, file_list in os.walk(self.acct_file):
for fname in file_list:
filelist.append( os.path.join(self.acct_file,dir_name,fname) )
else:
filelist = [ self.acct_file ]
for fname in filelist:
filep = codecs.open(fname, "r", "utf-8", errors="replace")
if seek:
filep.seek(seek, os.SEEK_SET)
for line in filep:
acct = self.parseline(line)
if acct != None and start_time <= acct['end_time'] and acct['end_time'] < end_time:
yield acct
def parseline(self, line):
tokens = line.split(";")
if len(tokens) < 4:
return None
timestamp = tokens[0]
recordtype = tokens[1]
jobid = tokens[2]
record = ";".join(tokens[3:]).strip()
if recordtype != "E":
return None
parts = jobid.split(".")
acct = {"local_jobid": int(parts[0]), "id": jobid}
jobrecs = record.split(" ")
for jobrec in jobrecs:
items = jobrec.split("=")
if len(items) == 2:
try:
mapping = self.fieldmapping[items[0]]
acct[mapping[0]] = mapping[1](items[1])
except KeyError as e:
print line
raise e
except ValueError as e:
print line
raise e
acct['ncpus'] = self.ncpus
acct['nodes'] = self.nodes
return acct
def parsehostlist(self, hostlist):
self.ncpus = 0
hosts = {}
for item in hostlist.split("+"):
tokens = item.split("/")
if len(tokens) == 2:
hosts[tokens[0]] = 1
self.ncpus += 1
self.nodes = len(hosts)
return hosts.keys()
|
ubccr/tacc_stats
|
pickler/torque_acct.py
|
Python
|
lgpl-2.1
| 4,606
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2007-2014 CEA/DEN, EDF R&D, OPEN CASCADE
#
# Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
# CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
#####################################################################
#Created :15/02/2005
#Auhtor :KOVALTCHUK Alexey
#GUI test scenario :PAL-MESH-035 (geometry part)
#####################################################################
#
import salome
salome.salome_init()
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
#Box creation (2.2)
Box_1 = geompy.MakeBoxDXDYDZ(200, 400, 300)
geompy.addToStudy(Box_1, "Box_1")
#Cylinder creation (2.8)
Cylinder_1 = geompy.MakeCylinderRH(100, 300)
geompy.addToStudy(Cylinder_1, "Cylinder_1")
#Cone creation (2.13)
Cone_1 = geompy.MakeConeR1R2H(100, 50, 200)
geompy.addToStudy(Cone_1, "Cone_1")
#Explode box, cone and cylinder on faces and vertices(2.18)
ListOfFaces_Box_1 = geompy.SubShapeAll(Box_1, geompy.ShapeType["FACE"])
NbExplodedFaces_Box_1 = len(ListOfFaces_Box_1)
for i in range (0, NbExplodedFaces_Box_1):
name = "Face_" + str(i+1)
geompy.addToStudyInFather(Box_1, ListOfFaces_Box_1[i], name)
ListOfVertices_Box_1 = geompy.SubShapeAll(Box_1, geompy.ShapeType["VERTEX"])
NbExplodedVertices_Box_1 = len(ListOfVertices_Box_1)
for i in range (0, NbExplodedVertices_Box_1):
name = "Vertex_" + str(i+1)
geompy.addToStudyInFather(Box_1, ListOfVertices_Box_1[i], name)
ListOfFaces_Cylinder_1 = geompy.SubShapeAll(Cylinder_1, geompy.ShapeType["FACE"])
NbExplodedFaces_Cylinder_1 = len(ListOfFaces_Cylinder_1)
for i in range (0, NbExplodedFaces_Cylinder_1):
name = "Face_" + str(NbExplodedFaces_Box_1+i+1)
geompy.addToStudyInFather(Cylinder_1, ListOfFaces_Cylinder_1[i], name)
ListOfFaces_Cone_1 = geompy.SubShapeAll(Cone_1, geompy.ShapeType["FACE"])
NbExplodedFaces_Cone_1 = len(ListOfFaces_Cone_1)
for i in range (0, NbExplodedFaces_Cone_1):
name = "Face_" + str(NbExplodedFaces_Box_1+NbExplodedFaces_Cylinder_1+i+1)
geompy.addToStudyInFather(Cone_1, ListOfFaces_Cone_1[i], name)
#Plane creation (2.32)
Plane_1 = geompy.MakePlaneThreePnt(ListOfVertices_Box_1[0], ListOfVertices_Box_1[1], ListOfVertices_Box_1[3], 600)
geompy.addToStudy(Plane_1, "Plane_1")
#Partition (2.32)
compound = geompy.MakeCompound([ListOfFaces_Cylinder_1[0], ListOfFaces_Cone_1[0]])
Partition_1 = geompy.MakeHalfPartition(compound, Plane_1)
geompy.addToStudy(Partition_1, "Partition_1")
#Explode partition on faces and vertices(2.38)
ListOfFaces_Partition_1 = geompy.SubShapeAll(Partition_1, geompy.ShapeType["FACE"])
NbExplodedFaces_Partition_1 = len(ListOfFaces_Partition_1)
for i in range (0, NbExplodedFaces_Partition_1):
name = "Face_" + str(NbExplodedFaces_Box_1+NbExplodedFaces_Cylinder_1+NbExplodedFaces_Cone_1+i+1)
geompy.addToStudyInFather(Partition_1, ListOfFaces_Partition_1[i], name)
#Explode faces on vertices(2.43)
ListOfVertices_Face_7 = geompy.SubShapeAll(ListOfFaces_Cylinder_1[0], geompy.ShapeType["VERTEX"])
NbExplodedVertices_Face_7 = len(ListOfVertices_Face_7)
for i in range (0, NbExplodedVertices_Face_7):
name = "Vertex_" + str(NbExplodedVertices_Box_1+i+1)
geompy.addToStudyInFather(ListOfFaces_Cylinder_1[0], ListOfVertices_Face_7[i], name)
ListOfVertices_Face_10 = geompy.SubShapeAll(ListOfFaces_Cone_1[0], geompy.ShapeType["VERTEX"])
NbExplodedVertices_Face_10 = len(ListOfVertices_Face_10)
for i in range (0, NbExplodedVertices_Face_10):
name = "Vertex_" + str(NbExplodedVertices_Box_1+NbExplodedVertices_Face_7+i+1)
geompy.addToStudyInFather(ListOfFaces_Cone_1[0], ListOfVertices_Face_10[i], name)
ListOfVertices_Face_15 = geompy.SubShapeAll(ListOfFaces_Partition_1[2], geompy.ShapeType["VERTEX"])
NbExplodedVertices_Face_15 = len(ListOfVertices_Face_15)
for i in range (0, NbExplodedVertices_Face_15):
name = "Vertex_" + str(NbExplodedVertices_Box_1+NbExplodedVertices_Face_7+NbExplodedVertices_Face_10+i+1)
geompy.addToStudyInFather(ListOfFaces_Partition_1[2], ListOfVertices_Face_15[i], name)
ListOfVertices_Face_18 = geompy.SubShapeAll(ListOfFaces_Partition_1[NbExplodedFaces_Partition_1-1], geompy.ShapeType["VERTEX"])
NbExplodedVertices_Face_18 = len(ListOfVertices_Face_18)
for i in range (0, NbExplodedVertices_Face_18):
name = "Vertex_" + str(NbExplodedVertices_Box_1+NbExplodedVertices_Face_7+NbExplodedVertices_Face_10+NbExplodedVertices_Face_15+i+1)
geompy.addToStudyInFather(ListOfFaces_Partition_1[NbExplodedFaces_Partition_1-1], ListOfVertices_Face_18[i], name)
salome.sg.updateObjBrowser(1)
|
FedoraScientific/salome-geom
|
src/GEOM_SWIG/PAL_MESH_035_geometry.py
|
Python
|
lgpl-2.1
| 5,433
|
# RobotMain - Simon Lees simon@simotek.net
# Copyright (C) 2015 Simon Lees
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from PyLibs.uiserver import UiServer, UiServerCallbacks
from PyLibs.arduinointerface import ArduinoInterface, ArduinoInterfaceCallbacks
import argparse
import time
uiServer = None
# only used when serial is stubbed out
def onDriveMotor(args):
print("On DriveMotor")
uiServer.announceLeftMotorSpeed(args[0])
uiServer.announceRightMotorSpeed(args[1])
if __name__ == '__main__':
parser = argparse.ArgumentParser("Main Robot control app")
#parser.add_argument("-s", "--no-serial", type=str, required=False, help="Stub out serial")
parser.add_argument('--no-serial', dest='noserial', action='store_true')
args = parser.parse_args()
serverCallbacks = UiServerCallbacks()
arduinoCallbacks = ArduinoInterfaceCallbacks()
uiServer = UiServer(serverCallbacks)
# hook up stub callbacks
if args.noserial:
serverCallbacks.sendDriveMotor.register(onDriveMotor)
else:
arduinoCallbacks.annLeftDriveMotor.register(uiServer.announceLeftMotorSpeed)
arduinoCallbacks.annRightDriveMotor.register(uiServer.announceRightMotorSpeed)
arduinoInterface = ArduinoInterface(arduinoCallbacks)
servercallbacks = uiServer.getCallbacks()
servercallbacks.sendDriveMotor.register(arduinoInterface.sendDriveMotorSpeed)
uiServer.setCallbacks(servercallbacks)
# Send Ready for status LED
ArduinoInterface.sendReady()
# Main app event loop
while True:
uiServer.processMessages()
arduinoInterface.processMessages()
time.sleep(0.01)
|
simotek/tanko-bot
|
src/robotmain.py
|
Python
|
lgpl-2.1
| 2,285
|
from enthought.tvtk.api import tvtk
import math
import numpy
import scipy.linalg
# All returned arrays are cast into either numpy or numarray arrays
arr=numpy.array
class vtu:
"""Unstructured grid object to deal with VTK unstructured grids."""
def __init__(self, filename):
"""Creates a vtu object by reading the specified file."""
gridreader=tvtk.XMLUnstructuredGridReader(file_name=filename)
gridreader.update()
self.ugrid=gridreader.output
self.filename=filename
def GetScalarField(self, name):
"""Returns an array with the values of the specified scalar field."""
return arr(self.ugrid.point_data.get_array(name))
def GetVectorField(self, name):
"""Returns an array with the values of the specified vector field."""
return arr(self.ugrid.point_data.get_array(name))
def GetVectorNorm(self, name):
"""Return the field with the norm of the specified vector field."""
v = self.GetVectorField(name)
n = []
norm = scipy.linalg.norm
for node in range(self.ugrid.number_of_points):
n.append(norm(v[node]))
return arr(n)
def GetField(self,name):
"""Returns an array with the values of the specified field."""
pointdata=self.ugrid.point_data
vtkdata=pointdata.get_array(name)
nc=vtkdata.number_of_components
nt=vtkdata.number_of_tuples
array=arr(vtkdata)
if nc==9:
return array.reshape(nt,3,3)
elif nc==4:
return array.reshape(nt,2,2)
else:
return array.reshape(nt,nc)
def Write(self, filename=[]):
"""Writes the grid to a vtu file.
If no filename is specified it will use the name of the file originally
read in, thus overwriting it!
"""
if filename==[]:
filename=self.filename
gridwriter=tvtk.XMLUnstructuredGridWriter(file_name=filename, input=self.ugrid)
gridwriter.write()
def AddScalarField(self, name, array):
"""Adds a scalar field with the specified name using the values from the array."""
# In vtktools.py the following used SetNumberOfValues=len(array)
data = tvtk.FloatArray(number_of_tuples=len(array), name=name)
for i in range(len(array)):
data.set_value(i, array[i])
pointdata=self.ugrid.point_data
pointdata.add_array(data)
pointdata.set_active_scalars(name)
def AddVectorField(self, name, array):
"""Adds a vector field with the specified name using the values from the array."""
n=array.size
# In vtktools.py the following used SetNumberOfValues=n
data = tvtk.FloatArray(number_of_components=array.shape[1], number_of_tuples=n, name=name)
for i in range(n):
data.set_value(i, array.reshape(n)[i])
pointdata=self.ugrid.point_data
pointdata.add_array(data)
pointdata.set_active_vectors(name)
def AddField(self, name, array):
"""Adds a field with arbitrary number of components under the specified name using."""
n=array.size
sh=arr(array.shape)
# number of tuples is sh[0]
# number of components is the product of the rest of sh
data = vtk.vtkFloatArray(number_of_components=sh[1:].prod(), number_of_tuples=n, name=name)
flatarray=array.reshape(n)
for i in range(n):
data.set_value(i, flatarray[i])
pointdata=self.ugrid.point_data
pointdata.add_array(data)
def ApplyProjection(self, projection_x, projection_y, projection_z):
"""Applys a projection to the grid coordinates. This overwrites the existing values."""
npoints = self.ugrid.number_of_points
for i in range (npoints):
(x,y,z) = self.ugrid.get_point(i)
new_x = eval (projection_x)
new_y = eval (projection_y)
new_z = eval (projection_z)
self.ugrid.points.set_point(i, new_x, new_y, new_z)
def ProbeData(self, coordinates, name):
"""Interpolate field values at these coordinates."""
# Initialise locator
bbox = self.ugrid.bounds
locator = tvtk.PointLocator(data_set=self.ugrid, tolerance=10.0)
locator.update()
# Initialise probe
points = tvtk.Points()
ilen, jlen = coordinates.shape
for i in range(ilen):
points.insert_next_point(coordinates[i][0], coordinates[i][1], coordinates[i][2])
polydata = tvtk.PolyData(points=points)
probe = tvtk.ProbeFilter(input=polydata, source=self.ugrid)
probe.update()
# Reposition invalid nodes at nearest mesh vertices
alid_ids = probe.valid_points
valid_points = tvtk.Points()
valid_loc = 0
for i in range(ilen):
if valid_ids.get_tuple1(valid_loc) == i:
valid_points.insert_next_point(coordinates[i][0], coordinates[i][1], coordinates[i][2])
valid_loc = valid_loc + 1
else:
nearest = locator.find_closest_point([coordinates[i][0], coordinates[i][1], coordinates[i][2]])
point = self.ugrid.points.get_point(nearest)
valid_points.insert_next_point(point[0], point[1], point[2])
polydata.points=valid_points
probe.input=polydata
probe.update()
# Get final updated values
pointdata=probe.output.point_data
vtkdata=pointdata.get_array(name)
nc=vtkdata.number_of_components()
nt=vtkdata.number_of_tuples()
array = arr(vtkdata)
array.shape = (nt,nc)
return array
def RemoveField(self, name):
"""Removes said field from the unstructured grid."""
self.ugrid.point_data.remove_array(name)
def GetLocations(self):
"""Returns an array with the locations of the nodes."""
return arr(self.ugrid.points.data)
def GetCellPoints(self, id):
"""Returns an array with the node numbers of each cell (ndglno)."""
idlist=tvtk.IdList()
self.ugrid.get_cell_points(id, idlist)
return arr([idlist.get_id(i) for i in range(idlist.number_of_ids)])
def GetFieldNames(self):
"""Returns the names of the available fields."""
pointdata=self.ugrid.point_data
return [pointdata.get_array_name(i) for i in range(pointdata.number_of_arrays)]
def GetPointCells(self, id):
"""Return an array with the elements which contain a node."""
idlist=tvtk.IdList()
self.ugrid.get_point_cells(id, idlist)
return arr([idlist.get_id(i) for i in range(idlist.number_of_ids())])
def GetPointPoints(self, id):
"""Return the nodes connecting to a given node."""
cells = self.GetPointCells(id)
lst = []
for cell in cells:
lst = lst + list(self.GetCellPoints(cell))
s = set(lst) # remove duplicates
return arr(list(s)) # make into a list again
def GetDistance(self, x, y):
"""Return the distance in physical space between x and y."""
posx = self.ugrid.get_point(x)
posy = self.ugrid.get_point(y)
return math.sqrt(sum([(posx[i] - posy[i])**2 for i in range(len(posx))]))
def Crop(self, min_x, max_x, min_y, max_y, min_z, max_z):
"""Trim off the edges defined by a bounding box."""
trimmer = tvtk.ExtractUnstructuredGrid(input=self.ugrid, extent=(min_x, max_x, min_y, max_y, min_z, max_z))
trimmer.update()
trimmed_ug = trimmer.output
self.ugrid = trimmed_ug
def StructuredPointProbe(self, nx, ny, nz, bounding_box=None):
""" Probe the unstructured grid dataset using a structured points dataset. """
bbox = [0.0,0.0, 0.0,0.0, 0.0,0.0]
if bounding_box==None:
bbox = self.ugrid.bounds
else:
bbox = bounding_box
spacing = [0.0, 0.0, 0.0]
if nx>1: spacing[0] = (bbox[1]-bbox[0])/(nx-1.0)
if ny>1: spacing[1] = (bbox[3]-bbox[2])/(ny-1.0)
if nz>1: spacing[2] = (bbox[5]-bbox[4])/(nz-1.0)
sgrid = tvtk.StructuredPoints(dimensions=(nx, ny, nz), origin=[bbox[0],bbox[2],bbox[4]], spacing=spacing)
probe = tvtk.ProbeFilter (source=self.ugrid, input=sgrid)
probe.update()
return probe.output
|
FluidityProject/multifluids
|
tools/tvtktools.py
|
Python
|
lgpl-2.1
| 7,765
|
# -*- coding: utf-8 -*-
import logging
from geomsmesh import geompy
from extractionOrientee import extractionOrientee
from getSubshapeIds import getSubshapeIds
# -----------------------------------------------------------------------------
# --- TORE
# --- faces toriques et volumes du tore
def facesVolumesToriques(tore, plan, facesDefaut):
"""
Extraction des deux faces et volumes du tore partitionné, qui suivent la génératrice elliptique.
@param tore : le tore partitionné et coupé.
@param plan : le plan de coupe
@return (facetore1,facetore2) les 2 faces selon la génératrice
"""
logging.info("start")
centre = geompy.MakeVertexOnSurface(plan, 0.5, 0.5)
normal = geompy.GetNormal(plan, centre)
reference = geompy.MakeTranslationVector(centre, normal)
[facesInPlan, facesOutPlan, facesOnPlan] = extractionOrientee(plan, tore, reference, "FACE", 1.e-2, "faceTorePlan_")
[facesInSide, facesOutSide, facesOnSide] = extractionOrientee(facesDefaut, tore, reference, "FACE", 1.e-2, "faceTorePeau_")
facesIdInPlan = getSubshapeIds(tore, facesInPlan)
facesIdOutPlan = getSubshapeIds(tore, facesOutPlan)
facesIdOnSide = getSubshapeIds(tore, facesOnSide)
facesIdInSide = getSubshapeIds(tore, facesInSide)
facesIdOutSide = getSubshapeIds(tore, facesOutSide)
#facesIdInOutSide = facesIdInSide + facesIdOutSide
facetore1 = None
faceTore2 = None
for i, faceId in enumerate(facesIdInPlan):
if faceId not in facesIdOnSide:
facetore1 = facesInPlan[i]
break
for i, faceId in enumerate(facesIdOutPlan):
if faceId not in facesIdOnSide:
facetore2 = facesOutPlan[i]
break
#[facetore1,facetore2] = geompy.GetShapesOnShape(pipe0, tore, geompy.ShapeType["FACE"], GEOM.ST_ON)
geompy.addToStudyInFather( tore, facetore1, 'facetore1' )
geompy.addToStudyInFather( tore, facetore2, 'facetore2' )
[volumeTore1, volumeTore2] = geompy.ExtractShapes(tore, geompy.ShapeType["SOLID"], True)
geompy.addToStudyInFather( tore, volumeTore1, 'volumeTore1' )
geompy.addToStudyInFather( tore, volumeTore2, 'volumeTore2' )
return facetore1, facetore2, volumeTore1, volumeTore2
|
FedoraScientific/salome-smesh
|
src/Tools/blocFissure/gmu/facesVolumesToriques.py
|
Python
|
lgpl-2.1
| 2,145
|
"""
pyparticleprobe.dsd_calcs.zr
===============================
A grouping of functions for calculations of a Z-R relationship from a drop
size distribution.
Adapted by Nick Guy.
"""
# HISTORY::
# 28 Feb 2014 - Nick Guy. NRC, NOAA/NSSL (nick.guy@noaa.gov)
# Converted NCL functions below to Python
# FUNCTIONS::
# linreg - Least squares linear regression fit
# regfit_powerlaw - Create a fit line from the regression data
# SD_filter - Filter a variable given a standard deviation
# regfit_abcd - Calculate a,b,c,d power law coefficients from regression data
# save_stats - Save a number of statistics into a text file for documentation
# get_zr_linreg - Solve for a,b,c,d coefficient and exponents in ZR relationship
#-------------------------------------------------------------------
# Load the needed packages
import numpy as np
from scipy import stats
#-------------------------------------------------------------------
# Define various constants that may be used for calculations
#
#===============================================================
# BEGIN FUNCTIONS
#**===============================================================
def linreg(Z,R):
"""Calculate the linear regression of two variables
INPUT::
Z = Reflectivity [mm^6 m^-3]
R = Rainfall rate [mm h^-1]
OUTPUT::
slope = Slope of the regression line
intercept = Intercept of the regression line
rVal = Correlation coefficient
pVal = Two-sided P-value test (null hypothesis slope = 0)
std_err = Standard Error of estimate
USAGE::
slope, intercept, r_value, p_value, std_err = linreg(x,y)
NOTES::
The masked stats function is used, otherwise the regression is performed on
all data.
Note that both variables are put in log-base10 space, to keep linear and because
the vast majority of data is generally weighted to lower values.
"""
#---------------------------------------
# Use the Scipy linear regression algorithm to calculate
slope, intercept, rVal, pVal, std_err = stats.mstats.linregress(np.ma.log10(Z),np.ma.log10(R))
return slope, intercept, rVal, pVal, std_err
#**====================================================
def regfit_powerlaw(Z,slope,intercept,limLo=1E-2,limHi=1E6,Rtest=False):
"""Calculate a fit line to the linearly regressed data by
Create a linear array of independent reflectivity values. Calculate the dependent
Rainfall rate array using a power law distribution.
Optionally return a "test" array calculating the rainfall from the actual
reflectivity values via power law distribution.
This can then be used (with the SD_filter function).
to remove outlier data.
INPUT::
Z = Reflectivity [mm^6 m^-3]
slope = Slope of regression line
intercept = Intercept of the regression line
OPTIONAL
limLo = Lower limit of line
limHi = Upper limit of line
Rtest = Set True to return Rcalc "test" array
OUTPUT::
Zfit = Fit line of Z as the independent variable
Rfit = Fit line of R as the dependent variable
Rcalc = Optionally returned array of rainfall rate based upon input Z
USAGE::
Zfit, Rfit, [Rtest] = linreg(x,y)
NOTES::
The masked stats function is used, otherwise the regression is performed on
all data.
"""
#---------------------------------------
# Create the fit lines
Zfit = np.linspace(limLo,limHi,len(Z))
Rfit = (10.**intercept) * (Zfit**slope)
if Rtest:
Rcalc = (10.**intercept) * (Z**slope)
return Zfit,Rfit,Rcalc
else:
return Zfit,Rfit
#**====================================================
def SD_filter(Var,R,Rfit,Xsd):
"""Applies a filter to data at each point using the standard deviation
of the entire data series.
INPUT::
Var = Variable to be filtered
R = Rainfall rate [mm h^-1]
Xsd = Multiplication factor of Std Dev
OUTPUT::
VarFilt = Filtered input variable (masked)
USAGE::
VarOut = SD_filter(VarIn,R,Xsd)
NOTES::
This module assumes that the variable to be filtered is the same
dimensionality as the Rainfall rate variable.
"""
#---------------------------------------
# Find the standard deviation of scatter to remove outliers
sigDev = Xsd * R.std()
# Create the array for the filtered data
VarFilt = Var.copy()
# Apply the condition to mask the data
VarFilt = np.ma.masked_where((R <= (Rfit-sigDev)) | (R >= (Rfit+sigDev)),VarFilt,
copy=False)
return VarFilt
#**====================================================
def regfit_abcd(slope,intercept):
"""Calculate the a, b, c, d coefficients give the interept and slope,
assuming that reflectivity is the dependent variable
INPUT::
slope = Slope of regression line
intercept = Intercept of the regression line
OUTPUT::
a = a coefficient in Z = aR^b power law
b = b coefficient in Z = aR^b power law
c = c coefficient in R = cZ^d power law
d = d coefficient in R = cZ^d power law
USAGE::
a,b,c,d = fit_abcd(slope,intercept)
NOTES::
This method assumes that reflectivity (Z) was the independent variable and
Rainfall rate (R) was the dependent variable during linear regression.
"""
#---------------------------------------
# Calculate a, b, c, and d coefficients
c = 10.**intercept
d = slope
a = (1./c)**(1./d)
b = (1./d)
return a,b,c,d
#**====================================================
def get_zr_linreg(Z,R,filter=False,SDmult=1.,limLo=1E-2,limHi=1E6):
"""Use linear regression to solve find a Z-R relationship
INPUT::
Z = Reflectivity [mm^6 m^-3]
R = Rainfall rate [mm h^-1]
OPTIONAL::
filter = Set True to also return filtered a,b,c,d
SDmult = Multiplier for Standard deviation filter (if 3; then 3 * Std Dev)
limLo = Lower limit of line
limHi = Upper limit of line
See the printout for details of inputs
OUTPUT::
a = a coefficient in Z = aR^b power law
b = b exponent in Z = aR^b power law
c = c coefficient in R = cZ^d power law
d = d exponent in R = cZ^d power law
USAGE::
zr.save_stats(fname,[**args])
"""
#---------------------------------------
# Calculate a least squares linear regression fit of log-log distribution
Regslp, RegInt, rVal, pVal, stdErr = linreg(Z,R)
# Assign values from linear regression for coefficients
a_all,b_all,c_all,d_all = regfit_abcd(Regslp,RegInt)
# Apply a filter if requested
if filter:
# Line fits for independent Z (linear array) and dependent R via power law relationship
# The Rtest = true returns the test array (power law) for next filtering step
Zfit, RegFit, RRtest = regfit_powerlaw(Z,Regslp,RegInt,limLo=limLo,limHi=limHi,Rtest=True)
#mask = (R >= (RRtest-sigDev)) & (R <= (RRtest+sigDev))
# Filter the arrays within specified std deviation
Z_filt = SD_filter(Z,R,RRtest,SDmult)
R_filt = SD_filter(R,R,RRtest,SDmult)
# Calculate the least squares linear regression fit of log-log distribution of filtered data
RegslpFilt, RegIntFilt, rValFilt, pValFilt, stdErrFilt = linreg(Z_filt,R_filt)
# Create an array for line fit using power law for filtered data
ZfitFilt, RegFitFilt = regfit_powerlaw(Z,RegslpFilt,RegIntFilt,Rtest=False)
del ZfitFilt
a_filt,b_filt,c_filt,d_filt = regfit_abcd(RegslpFilt,RegIntFilt)
# Find the number of elements in the filtered array
nPts = R_filt.count()
Info = 'Filtered'
a, b, c, d, nPts = a_filt, b_filt, c_filt, d_filt, nPts
return a_filt,b_filt,c_filt,d_filt,nPts
else:
# Find the number of elements in the array
nPts = len(R)
Info = 'NonFiltered'
a, b, c, d, nPts = a_all, b_all, c_all, d_all, nPts
# Create a dictionary to transfer the data
data = {'Mode' : Info,
'a' : a,
'b' : b,
'c' : c,
'd' : c,
'number_pts' : nPts
}
return data
#**====================================================
def save_stats(fname,title=None,Conc=None,nPtsAll=None,cFactAll=None,dFactAll=None,aFactAll=None,
bFactAll=None,nPtsFilt=None,cFactFilt=None,dFactFilt=None,aFactFilt=None,
bFactFilt=None,rValAll=None,pValAll=None,stdErrAll=None,rValFilt=None,
pValFilt=None,stdErrFilt=None,Nw=None,
D0=None,Nw_D0_cst=None,W=None):
"""Save a text file with output stats calculated from Z-R relationship calculations
INPUT::
fname = Name out output file
title = Title information to identify statistics
OPTIONAL::
See the printout for details of inputs
OUTPUT::
fname = Text file
USAGE::
zr.save_stats(fname,[**args])
"""
#---------------------------------------
# Create a single element needed to save file
empty = [0.]
ZRstatsTex = "=======================================\n"
ZRstatsTex += "**** "+fname+" ****\n"
ZRstatsTex += "DROP SIZE DISTRIBUTION CONCENTRATION\n"
ZRstatsTex += "min Conc = " + str(Conc.min())+"\n"
ZRstatsTex += "max Conc = " + str(Conc.max())+"\n"
ZRstatsTex += "=======================================\n"
ZRstatsTex += " \n"
ZRstatsTex += title+"\n"
ZRstatsTex += "========================================\n"
ZRstatsTex += "PREFACTOR AND EXPONENT ESTIMATION\n"
ZRstatsTex += "All Data: R=cZ^d:: c = "+str(cFactAll)+" , d = "+str(dFactAll)+"\n"
ZRstatsTex += " Z=aR^b:: a = "+str(aFactAll)+" , b = "+str(bFactAll)+"\n"
ZRstatsTex += " Correlation = "+str(rValAll)+" p = "+str(pValAll)+" StdErr = "+str(stdErrAll)+"\n"
ZRstatsTex += " # Points = "+str(nPtsAll)+"\n"
ZRstatsTex += " -----------------\n"
ZRstatsTex += "Filtered Data: R=cZ^d:: c = "+str(cFactFilt)+" , d = "+str(dFactFilt)+"\n"
ZRstatsTex += " Z=aR^b:: a = "+str(aFactFilt)+" , b = "+str(bFactFilt)+"\n"
ZRstatsTex += " Correlation = "+str(rValFilt)+" p = "+str(pValFilt)+" StdErr = "+str(stdErrFilt)+"\n"
ZRstatsTex += " # Points = "+str(nPtsFilt)+"\n"
ZRstatsTex += "=========================================\n"
ZRstatsTex += " \n"
ZRstatsTex += "==============================================\n"
ZRstatsTex += " \n"
ZRstatsTex += "Bringi et al. 2009, Conv-Strat-Trans\n"
ZRstatsTex += "Stratiform: "+str(len(Nw_D0_cst[Nw_D0_cst == 1]))+" points"+"\n"
ZRstatsTex += "Mean Nw = "+str(np.log10(Nw[Nw_D0_cst == 1].mean()))+", SD = "+str(np.log10(Nw[Nw_D0_cst == 1].std()))+"\n"
ZRstatsTex += "Mean D0 = "+str(D0[Nw_D0_cst == 1].mean())+", SD = "+str(D0[Nw_D0_cst == 1].std())+"\n"
ZRstatsTex += "================================\n"
ZRstatsTex += "Convective: "+str(len(Nw_D0_cst[Nw_D0_cst == 2]))+" points"+"\n"
ZRstatsTex += "Mean Nw = "+str(np.log10(Nw[Nw_D0_cst == 2].mean()))+", SD = "+str(np.log10(Nw[Nw_D0_cst == 2].std()))+"\n"
ZRstatsTex += "Mean D0 = "+str(D0[Nw_D0_cst == 2].mean())+", SD = "+str(D0[Nw_D0_cst == 2].std())+"\n"
ZRstatsTex += "=================================\n"
ZRstatsTex += "Transition: "+str(len(Nw_D0_cst[Nw_D0_cst == 3]))+" points"+"\n"
ZRstatsTex += "Mean Nw = "+str(np.log10(Nw[Nw_D0_cst == 3].mean()))+", SD = "+str(np.log10(Nw[Nw_D0_cst == 3].std()))+"\n"
ZRstatsTex += "Mean D0 = "+str(D0[Nw_D0_cst == 3].mean())+", SD = "+str(D0[Nw_D0_cst == 3].std())+"\n"
ZRstatsTex += "=================================\n"
# ZRstatsTex += "Mean W = "+str(W[Nw_D0_cs == 2].mean())+", SD = "+str(W[Nw_D0_cs == 2].std())+"\n"
ZRstatsTex += "==============================================\n"
ZRstatsTex += " \n"
# Save the file
np.savetxt(fname, empty, header=ZRstatsTex)
#====================================================
|
nguy/pyparticleprobe
|
pyparticleprobe/dsd_calcs/zr.py
|
Python
|
lgpl-2.1
| 12,340
|
#!/usr/bin/env python
"""Handle perspective-distorted sinusoidal gratings (server-side)"""
# Copyright (c) 2002-2003 Andrew Straw. Distributed under the terms
# of the GNU Lesser General Public License (LGPL).
import VisionEgg, string
import sys, os, math
import VisionEgg.Core
import VisionEgg.FlowControl
import VisionEgg.SphereMap
import VisionEgg.PyroHelpers
import Pyro.core
from VisionEgg.PyroApps.ScreenPositionServer import ScreenPositionMetaController
from VisionEgg.PyroApps.ScreenPositionGUI import ScreenPositionParameters
from VisionEgg.PyroApps.SphereGratingGUI import SphereGratingMetaParameters
class SphereGratingExperimentMetaController( Pyro.core.ObjBase ):
def __init__(self,screen,presentation,stimuli):
# get stimuli
assert( stimuli[0][0] == '3d_perspective')
assert( stimuli[1][0] == '3d_perspective')
sphere_grating = stimuli[0][1]
sphere_window = stimuli[1][1]
Pyro.core.ObjBase.__init__(self)
self.meta_params = SphereGratingMetaParameters()
if not isinstance(screen,VisionEgg.Core.Screen):
raise ValueError("Expecting instance of VisionEgg.Core.Screen")
if not isinstance(presentation,VisionEgg.FlowControl.Presentation):
raise ValueError("Expecting instance of VisionEgg.FlowControl.Presentation")
if not isinstance(sphere_grating,VisionEgg.SphereMap.SphereGrating):
raise ValueError("Expecting instance of VisionEgg.SphereMap.SphereGrating")
if not isinstance(sphere_window,VisionEgg.SphereMap.SphereWindow):
raise ValueError("Expecting instance of VisionEgg.SphereMap.SphereWindow")
self.p = presentation
self.stim = sphere_grating
self.window = sphere_window
screen.parameters.bgcolor = (0.5, 0.5, 0.5, 0.0)
self.p.add_controller(self.stim,'on',VisionEgg.FlowControl.FunctionController(
during_go_func=self.on_function_during_go,
between_go_func=self.on_function_between_go))
def __del__(self):
self.p.remove_controller(self.stim,'on')
Pyro.core.ObjBase.__del__(self) # call base class
def on_function_during_go(self,t):
"""Compute when the grating is on"""
if t <= self.meta_params.pre_stim_sec:
return 0 # not on yet
elif t <= (self.meta_params.pre_stim_sec + self.meta_params.stim_sec):
return 1 # on
else:
return 0 # off again
def on_function_between_go(self):
"""Compute when the grating is off"""
return 0 # off again
def get_parameters(self):
return self.meta_params
def set_parameters(self, new_parameters):
if isinstance(new_parameters, SphereGratingMetaParameters):
self.meta_params = new_parameters
else:
raise ValueError("Argument to set_parameters must be instance of SphereGratingMetaParameters")
# self.meta_params = new_parameters
self.update()
def update(self):
stim_params = self.stim.parameters # shorthand
window_params = self.window.parameters # shorthand
meta_params = self.meta_params # shorthand
stim_params.contrast = meta_params.contrast
stim_params.orientation = meta_params.orient
stim_params.spatial_freq_cpd = meta_params.sf
stim_params.temporal_freq_hz = meta_params.tf
stim_params.grating_center_azimuth = meta_params.window_az
stim_params.grating_center_elevation = meta_params.window_el
self.p.parameters.go_duration = ( meta_params.pre_stim_sec + meta_params.stim_sec + meta_params.post_stim_sec, 'seconds')
window_params.window_shape = meta_params.window_func
window_params.window_shape_radius_parameter = meta_params.window_radius
window_params.window_center_azimuth = meta_params.window_az
window_params.window_center_elevation = meta_params.window_el
def go(self):
self.p.parameters.enter_go_loop = 1
def quit_server(self):
self.p.parameters.quit = 1
def get_meta_controller_class():
return SphereGratingExperimentMetaController
def make_stimuli():
stimulus = VisionEgg.SphereMap.SphereGrating(radius=1.0,
spatial_freq_cpd=1.0/9.0,
temporal_freq_hz = 1.0)
mask = VisionEgg.SphereMap.SphereWindow(radius=0.95)
return [('3d_perspective',stimulus),('3d_perspective',mask)]
def get_meta_controller_stimkey():
return "sphere_grating_server"
# Don't do anything unless this script is being run
if __name__ == '__main__':
pyro_server = VisionEgg.PyroHelpers.PyroServer()
screen = VisionEgg.Core.Screen.create_default()
# get Vision Egg stimulus ready to go
stimuli = make_stimuli()
stimulus = stimuli[0][1]
mask = stimuli[1][1]
temp = ScreenPositionParameters()
left = temp.left
right = temp.right
bottom = temp.bottom
top = temp.top
near = temp.near
far = temp.far
projection = VisionEgg.Core.PerspectiveProjection(left,
right,
bottom,
top,
near,
far)
viewport = VisionEgg.Core.Viewport(screen=screen,stimuli=[stimulus,mask],projection=projection)
p = VisionEgg.FlowControl.Presentation(viewports=[viewport])
# now hand over control of projection to ScreenPositionMetaController
projection_controller = ScreenPositionMetaController(p,projection)
pyro_server.connect(projection_controller,"projection_controller")
# now hand over control of grating and mask to SphereGratingExperimentMetaController
meta_controller = SphereGratingExperimentMetaController(screen,p,stimuli)
pyro_server.connect(meta_controller,get_meta_controller_stimkey())
# get listener controller and register it
p.add_controller(None,None, pyro_server.create_listener_controller())
# enter endless loop
p.run_forever()
|
visionegg/visionegg
|
VisionEgg/PyroApps/SphereGratingServer.py
|
Python
|
lgpl-2.1
| 6,202
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
What it does
============
This tool concatenates trr and edr files for unrestrained nodes as well as multistart nodes.
Please note: This functionality has also been integrated into zgf_mdrun. This tool is merely meant to provide this function for older node pools.
How it works
============
At the command line, type::
$ zgf_concatentate_stuff
"""
from ZIBMolPy.utils import check_call
from ZIBMolPy.pool import Pool
from ZIBMolPy.ui import Option, OptionsList
from ZIBMolPy.io.trr import TrrFile
import sys
import os
import re
import numpy as np
from subprocess import Popen, PIPE
options_desc = OptionsList([
Option("t", "trr", "bool", "concatenate trr files", default=False),
Option("e", "edr", "bool", "concatenate edr files", default=False),
])
sys.modules[__name__].__doc__ += options_desc.epytext() # for epydoc
def is_applicable():
pool = Pool()
return( len(pool.where("state == 'merge-able'")) > 0 )
#===============================================================================
def main():
options = options_desc.parse_args(sys.argv)[0]
pool = Pool()
needy_nodes = pool.where("state == 'merge-able'").multilock()
if(len(needy_nodes) == 0):
return
# find out about trr time step
dt = 0
nodeDir = needy_nodes[0].dir.split('/')[-1]
for fn in os.listdir(needy_nodes[0].dir):
if re.match("^"+nodeDir+".+run\d+\.trr", fn):
trr = TrrFile(needy_nodes[0].dir+"/"+fn)
dt = trr.first_frame.next().t - trr.first_frame.t
trr.close()
break
# dt is sometimes noisy in the final digits (three digits is femtosecond step = enough)
dt = np.around(dt, decimals=3)
for n in needy_nodes:
if(options.trr):
# merge sampling trajectories
trr_fns = sorted([ fn for fn in os.listdir(n.dir) if re.match("[^#].+run\d+.trr", fn) ])
cmd = ["trjcat", "-f"]
cmd += trr_fns
cmd += ["-o", "../../"+n.trr_fn, "-cat"]
print("Calling: %s"%" ".join(cmd))
check_call(cmd, cwd=n.dir)
if(options.edr):
# merge edr files
# get list of edr-files
edr_fnames = sorted([n.dir+"/"+fn for fn in os.listdir(n.dir) if re.match("[^#].+run\d+.edr", fn)])
assert( len(edr_fnames) == n.extensions_counter+1 )
assert( len(edr_fnames) == n.extensions_max+1 )
time_offset = n.sampling_length+dt
for edr_fn in edr_fnames[1:]:
# adapt edr starting times
cmd = ["eneconv", "-f", edr_fn, "-o", edr_fn, "-settime"]
print("Calling: "+(" ".join(cmd)))
p = Popen(cmd, stdin=PIPE)
p.communicate(input=(str(time_offset)+"\n"))
assert(p.wait() == 0)
time_offset += n.extensions_length+dt
# concatenate edr files with adapted starting times
cmd = ["eneconv", "-f"] + edr_fnames + ["-o", n.dir+"/ener.edr"]
print("Calling: "+(" ".join(cmd)))
p = Popen(cmd)
retcode = p.wait()
assert(retcode == 0)
needy_nodes.unlock()
#===============================================================================
if(__name__ == "__main__"):
main()
#EOF
|
CMD-at-ZIB/ZIBMolPy
|
tools/zgf_concatenate_stuff.py
|
Python
|
lgpl-3.0
| 2,993
|
import sys
import os.path
here = os.path.dirname(__file__) or os.curdir
sys.path.insert(0, os.path.join(here, '../../src'))
sys.path.insert(0, os.path.join(here, '..'))
# make sure sys.path does not a relative path before you import a module inside
sys.path = [os.path.abspath(p) for p in sys.path]
from testtool import *
def has_executable_in_path(executable: str) -> bool:
import shutil
p = shutil.which(executable)
return p and os.path.basename(p) == executable
|
dlu-ch/dlb
|
test/dlb_contrib/testenv.py
|
Python
|
lgpl-3.0
| 482
|
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
def tensor_save(fun, directory, filename):
pass
|
mathLab/RBniCS
|
rbnics/backends/basic/wrapping/tensor_save.py
|
Python
|
lgpl-3.0
| 182
|
# SPDX-License-Identifier: LGPL-3.0-or-later
# dlb - a Pythonic build tool
# Copyright (C) 2020 Daniel Lutz <dlu-ch@users.noreply.github.com>
"""Input dependency classes for tools."""
import re
import dataclasses
from typing import Dict, Pattern, Union
from . import _depend
class RegularFile(_depend.NonDirectoryMixin, _depend.InputDependency):
pass
class NonRegularFile(_depend.NonDirectoryMixin, _depend.InputDependency):
pass
class Directory(_depend.DirectoryMixin, _depend.InputDependency):
pass
class EnvVar(_depend.InputDependency):
@dataclasses.dataclass(frozen=True, eq=True)
class Value:
name: str
raw: str
groups: Dict[str, str]
def __init__(self, *, name: str, pattern: Union[str, Pattern], example: str, **kwargs):
super().__init__(**kwargs)
if not isinstance(name, str):
raise TypeError("'name' must be a str")
if not name:
raise ValueError("'name' must not be empty")
if isinstance(pattern, str):
pattern = re.compile(pattern)
if not isinstance(pattern, Pattern):
raise TypeError("'pattern' must be regular expression (compiled or str)")
if not isinstance(example, str):
raise TypeError("'example' must be a str")
if not pattern.fullmatch(example):
raise ValueError(f"'example' is not matched by 'pattern': {example!r}")
if self.multiplicity is not None:
raise ValueError("must not have a multiplicity")
self._name = name
self._pattern: Pattern = pattern
self._example = example
@property
def name(self) -> str:
return self._name
@property
def pattern(self) -> Pattern:
return self._pattern
@property
def example(self) -> str:
return self._example
def compatible_and_no_less_restrictive(self, other) -> bool:
if not super().compatible_and_no_less_restrictive(other):
return False
return self.name == other.name and self.pattern == other.pattern # ignore example
def validate_single(self, value) -> 'EnvVar.Value':
# value is used to defined the content of a (future) environment variable
value = super().validate_single(value)
if not isinstance(value, str):
raise TypeError("'value' must be a str")
m = self._pattern.fullmatch(value)
if not m:
raise ValueError(f"value {value!r} is not matched by validation pattern {self._pattern.pattern!r}")
# noinspection PyCallByClass
return EnvVar.Value(name=self.name, raw=value, groups=m.groupdict())
|
dlu-ch/dlb
|
src/dlb/ex/input.py
|
Python
|
lgpl-3.0
| 2,667
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-27 21:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('adyengo', '0006_auto_20160527_2051'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='currency_code',
field=models.CharField(choices=[('EUR', 'Euro')], default='EUR', max_length=3),
),
migrations.AlterField(
model_name='notification',
name='event_code',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='notification',
name='event_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='notification',
name='ip_address',
field=models.CharField(blank=True, max_length=45, null=True),
),
migrations.AlterField(
model_name='notification',
name='merchant_account_code',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AlterField(
model_name='notification',
name='merchant_reference',
field=models.CharField(blank=True, max_length=128, null=True),
),
migrations.AlterField(
model_name='notification',
name='operations',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='notification',
name='original_reference',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AlterField(
model_name='notification',
name='payment_method',
field=models.CharField(blank=True, choices=[('bankTransfer_DE', 'German Banktransfer'), ('directEbanking', 'SofortUberweisung'), ('paypal', 'PayPal'), ('amex', 'Amex'), ('bankTransfer', 'All banktransfers'), ('mc', 'Master Card'), ('bankTransfer_NL', 'Dutch Banktransfer'), ('visa', 'Visa'), ('ideal', 'iDEAL'), ('card', 'All debit and credit cards'), ('elv', 'ELV'), ('directdebit_NL', 'Direct Debit (Netherlands)')], max_length=50, null=True),
),
migrations.AlterField(
model_name='notification',
name='psp_reference',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AlterField(
model_name='notification',
name='reason',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AlterField(
model_name='notification',
name='session',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='adyengo.Session'),
),
migrations.AlterField(
model_name='recurringpaymentresult',
name='result_code',
field=models.CharField(choices=[('Error', 'Error'), ('Refused', 'Refused'), ('Authorised', 'Authorised')], max_length=30),
),
migrations.AlterField(
model_name='session',
name='country_code',
field=models.CharField(blank=True, choices=[('BE', 'Belgium'), ('GB', 'United Kingdom'), ('DE', 'Germany'), ('NL', 'Netherlands')], max_length=2),
),
migrations.AlterField(
model_name='session',
name='currency_code',
field=models.CharField(choices=[('EUR', 'Euro')], default='EUR', max_length=3),
),
migrations.AlterField(
model_name='session',
name='page_type',
field=models.CharField(choices=[('multiple', 'Multiple'), ('skip', 'Skip'), ('single', 'Single')], default='multiple', max_length=15),
),
migrations.AlterField(
model_name='session',
name='recurring_contract',
field=models.CharField(blank=True, choices=[('ONECLICK', 'One click'), ('RECURRING,ONECLICK', 'Recurring and One click (user chooses)'), ('RECURRING', 'Recurring')], max_length=50),
),
migrations.AlterField(
model_name='session',
name='session_type',
field=models.CharField(choices=[('api_recurring', 'API Recurring'), ('hpp_regular', 'HPP Regular'), ('hpp_recurring', 'HPP Recurring')], max_length=25),
),
migrations.AlterField(
model_name='session',
name='shopper_locale',
field=models.CharField(blank=True, choices=[('nl_NL', 'Dutch (Holland)'), ('en_GB', 'English (United Kingdom)'), ('de_DE', 'German (Germany)'), ('nl_BE', 'Dutch (Belgium)'), ('fr_BE', 'French (Belgium)')], default='nl_NL', max_length=5),
),
migrations.AlterField(
model_name='session',
name='skin_code',
field=models.CharField(default='BCS1MHG2', max_length=10),
),
migrations.AlterField(
model_name='sessionallowedpaymentmethods',
name='method',
field=models.CharField(choices=[('bankTransfer_DE', 'German Banktransfer'), ('directEbanking', 'SofortUberweisung'), ('paypal', 'PayPal'), ('amex', 'Amex'), ('bankTransfer', 'All banktransfers'), ('mc', 'Master Card'), ('bankTransfer_NL', 'Dutch Banktransfer'), ('visa', 'Visa'), ('ideal', 'iDEAL'), ('card', 'All debit and credit cards'), ('elv', 'ELV'), ('directdebit_NL', 'Direct Debit (Netherlands)')], max_length=50),
),
migrations.AlterField(
model_name='sessionblockedpaymentmethods',
name='method',
field=models.CharField(choices=[('bankTransfer_DE', 'German Banktransfer'), ('directEbanking', 'SofortUberweisung'), ('paypal', 'PayPal'), ('amex', 'Amex'), ('bankTransfer', 'All banktransfers'), ('mc', 'Master Card'), ('bankTransfer_NL', 'Dutch Banktransfer'), ('visa', 'Visa'), ('ideal', 'iDEAL'), ('card', 'All debit and credit cards'), ('elv', 'ELV'), ('directdebit_NL', 'Direct Debit (Netherlands)')], max_length=50),
),
]
|
gitaarik/adyengo
|
adyengo/migrations/0007_auto_20160527_2341.py
|
Python
|
lgpl-3.0
| 6,260
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'fft_window.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_FFTWindow(object):
def setupUi(self, FFTWindow):
FFTWindow.setObjectName("FFTWindow")
FFTWindow.resize(640, 480)
self.centralwidget = QtWidgets.QWidget(FFTWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.start_frequency = QtWidgets.QDoubleSpinBox(self.centralwidget)
self.start_frequency.setDecimals(3)
self.start_frequency.setMinimum(0.001)
self.start_frequency.setMaximum(1000000.0)
self.start_frequency.setProperty("value", 1.0)
self.start_frequency.setObjectName("start_frequency")
self.verticalLayout.addWidget(self.start_frequency)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.end_frequency = QtWidgets.QDoubleSpinBox(self.centralwidget)
self.end_frequency.setDecimals(3)
self.end_frequency.setMinimum(0.001)
self.end_frequency.setMaximum(1000000.0)
self.end_frequency.setProperty("value", 1000.0)
self.end_frequency.setObjectName("end_frequency")
self.verticalLayout.addWidget(self.end_frequency)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.frequency_step = QtWidgets.QDoubleSpinBox(self.centralwidget)
self.frequency_step.setDecimals(3)
self.frequency_step.setMinimum(0.001)
self.frequency_step.setMaximum(1000000.0)
self.frequency_step.setProperty("value", 1.0)
self.frequency_step.setObjectName("frequency_step")
self.verticalLayout.addWidget(self.frequency_step)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout.addLayout(self.verticalLayout)
self.layout = QtWidgets.QVBoxLayout()
self.layout.setObjectName("layout")
self.horizontalLayout.addLayout(self.layout)
self.horizontalLayout.setStretch(1, 1)
FFTWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(FFTWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 640, 21))
self.menubar.setObjectName("menubar")
FFTWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(FFTWindow)
self.statusbar.setObjectName("statusbar")
FFTWindow.setStatusBar(self.statusbar)
self.retranslateUi(FFTWindow)
QtCore.QMetaObject.connectSlotsByName(FFTWindow)
def retranslateUi(self, FFTWindow):
_translate = QtCore.QCoreApplication.translate
FFTWindow.setWindowTitle(_translate("FFTWindow", "MainWindow"))
self.label_2.setText(_translate("FFTWindow", "Start frequency"))
self.start_frequency.setSuffix(_translate("FFTWindow", "Hz"))
self.label.setText(_translate("FFTWindow", "End frequency"))
self.end_frequency.setSuffix(_translate("FFTWindow", "Hz"))
self.label_3.setText(_translate("FFTWindow", "Frequency step"))
self.frequency_step.setSuffix(_translate("FFTWindow", "Hz"))
|
danielhrisca/asammdf
|
asammdf/gui/ui/fft_window.py
|
Python
|
lgpl-3.0
| 4,071
|
import strata.dataformats as formats
"""Module for outputting data."""
# Set module handles for ftype
default_module = formats.simple.main
modules = {
'default': default_module,
'simple': formats.simple.main,
'simple_plain': formats.simple.main
}
def write(path, data, *args, **kwargs):
"""Output data to a path.
This function selects a default module to use for output and calls
its 'write_data' function. The module can be selected explicitly
using the keyword argument 'ftype'.
Arguments and keyword arguments are passed on to the functions.
Args:
path (str): Write to a file at this path.
data (dict): Data to write.
Keyword Args:
ftype (str, default='simple'): File type to write. Choices:
'simple' - Simple binary (strata.dataformats.simple)
'simple_plain' - Simple plaintext (strata.dataformats.simple)
Raises:
KeyError: If a non-existant 'ftype' is specified.
"""
def write_simple(ftype):
if ftype == 'simple_plain':
kwargs.update({'binary': False})
modules[ftype].write_data(path, data, *args, **kwargs)
ftype = kwargs.pop('ftype', 'default')
try:
assert (ftype in modules.keys())
except AssertionError:
raise KeyError("specified 'ftype' not existing for writing.")
write_simple(ftype)
def flowdata_to_dict(flow):
"""Convert a FlowData object to write-compatible dictionary.
Args:
flow (FlowData): Object to convert.
Returns:
dict: Data to write.
"""
return {key: flow.data[key] for key in flow.data.dtype.names}
|
pjohansson/flowtools-rewrite
|
strata/dataformats/write.py
|
Python
|
lgpl-3.0
| 1,682
|
# -*- coding: utf-8 -*-
# Copyright(C) 2011-2012 Romain Bignon, Laurent Bachelier
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
from weboob.capabilities.audio import BaseAudio
from weboob.capabilities.radio import Radio
import itertools
class RadioFranceTest(BackendTest):
MODULE = 'radiofrance'
def test_ls_radios_and_selections(self):
l = list(self.backend.iter_resources(objs=[Radio], split_path=[]))
self.assertTrue(0 < len(l) < 30)
for radio in l:
name = radio.split_path[-1]
if name != 'francebleu':
streams = self.backend.get_radio(name).streams
self.assertTrue(len(streams) > 0)
l_sel = list(self.backend.iter_resources(objs=[BaseAudio], split_path=[name, 'selection']))
if len(l_sel) > 0:
self.assertTrue(len(l_sel[0].url) > 0)
l = list(self.backend.iter_resources(objs=[Radio], split_path=['francebleu']))
self.assertTrue(len(l) > 30)
for radio in l:
streams = self.backend.get_radio(radio.split_path[-1]).streams
self.assertTrue(len(streams) > 0)
l_sel1 = list(self.backend.iter_resources(objs=[BaseAudio],
split_path=['francebleu',
radio.split_path[-1]]))
if 'Selection' in [el.title for el in l_sel1]:
l_sel = list(self.backend.iter_resources(objs=[BaseAudio],
split_path=['francebleu',
radio.split_path[-1],
'selection']))
if len(l_sel) > 0:
self.assertTrue(len(l_sel[0].url) > 0)
def test_podcasts(self):
for key, item in self.backend._RADIOS.items():
if 'podcast' in item:
emissions = list(self.backend.iter_resources(objs=[BaseAudio], split_path=[key, 'podcasts']))
self.assertTrue(len(emissions) > 0)
podcasts = list(self.backend.iter_resources(objs=[BaseAudio], split_path=emissions[0].split_path))
self.assertTrue(len(podcasts) > 0)
podcast = self.backend.get_audio(podcasts[0].id)
self.assertTrue(podcast.url)
def test_search_radio(self):
l = list(self.backend.iter_radios_search('bleu'))
self.assertTrue(len(l) > 0)
self.assertTrue(len(l[0].streams) > 0)
def test_search_get_audio(self):
l = list(itertools.islice(self.backend.search_audio('jou'), 0, 20))
self.assertTrue(len(l) > 0)
a = self.backend.get_audio(l[0].id)
self.assertTrue(a.url)
|
laurentb/weboob
|
modules/radiofrance/test.py
|
Python
|
lgpl-3.0
| 3,556
|
# Copyright (C) 2015 Kevin Ross, Optiv, Inc. (brad.spengler@optiv.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class BrowserSecurity(Signature):
name = "browser_security"
description = "Attempts to modify browser security settings"
severity = 3
categories = ["browser", "clickfraud", "banker"]
authors = ["Kevin Ross", "Optiv"]
minimum = "1.2"
def run(self):
reg_indicators = [
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Internet\\ Explorer\\\\Privacy\\\\EnableInPrivateMode$",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Internet\\ Explorer\\\\PhishingFilter\\\\.*",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\Zones\\\\[0-4]\\\\.*",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\ZoneMap\\\\Domains\\\\.*",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\ZoneMap\\\\EscDomains\\\\.*",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\ZoneMap\\\\EscRanges\\\\.*",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\ZoneMap\\\\IEHarden$",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet\\ Settings\\\\CertificateRevocation$",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Internet\\ Explorer\\\\Main\\\\NoUpdateCheck$",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Internet\\ Explorer\\\\Security\\\\.*",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Internet\\ Explorer\\\\Main\\\\FeatureControl\\\\.*",
]
for indicator in reg_indicators:
if self.check_write_key(pattern=indicator, regex=True):
return True
return False
|
lixiangning888/whole_project
|
modules/signatures_orginal_20151110/browser_security.py
|
Python
|
lgpl-3.0
| 2,590
|
import json
from . import TestCase
class TestSimulationBase(TestCase):
def setUp(self):
self.simulations = {}
logic_type = ['repressilator', 'toggle_switch_1', 'toggle_switch_2',
'inverter', 'simple', 'and_gate', 'or_gate']
for logic_type in logic_type:
with open('tests/preprocess_%s.json' % logic_type) as fobj:
self.simulations[logic_type] = json.load(fobj)
class TestSimulationPreprocess(TestSimulationBase):
def test_preprocess_repressilator(self):
circuits = json.dumps([
{'inputs': [{'id': 1, 'promoter_id': 17, 'receptor_id': 1}],
'logics': [1], 'outputs': []}
])
result = self.client.post('/simulation/preprocess', data=circuits).json
self.assertDictContainsRecursively(result,
self.simulations['repressilator'])
def test_preprocess_toggle_switch_1(self):
circuits = json.dumps([
{'inputs': [{'id': 3, 'promoter_id': 9, 'receptor_id': 4},
{'id': 4, 'promoter_id': 20, 'receptor_id': 5}],
'logics': [17], 'outputs': [1]}
])
result = self.client.post('/simulation/preprocess', data=circuits).json
self.assertDictContainsRecursively(result,
self.simulations['toggle_switch_1'])
def test_preprocess_toggle_switch_2(self):
circuits = json.dumps([
{'inputs': [{'id': 4, 'promoter_id': 20, 'receptor_id': 5}],
'logics': [18], 'outputs': [1, 2]}
])
result = self.client.post('/simulation/preprocess', data=circuits).json
self.assertDictContainsRecursively(result,
self.simulations['toggle_switch_2'])
def test_preprocess_inverter(self):
circuits = json.dumps([
{'inputs': [{'id': 1, 'promoter_id': 17, 'receptor_id': 1}],
'logics': [7], 'outputs': [1]}
])
result = self.client.post('/simulation/preprocess', data=circuits).json
self.assertDictContainsRecursively(result,
self.simulations['inverter'])
def test_preprocess_simple(self):
circuits = json.dumps([
{'inputs': [{'id': 1, 'promoter_id': 17, 'receptor_id': 1}],
'logics': [20], 'outputs': [1]}
])
result = self.client.post('/simulation/preprocess', data=circuits).json
self.assertDictContainsRecursively(result,
self.simulations['simple'])
def test_preprocess_and_gate(self):
circuits = json.dumps([
{'inputs': [{'id': 8, 'promoter_id': 1, 'receptor_id': 12},
{'id': 9, 'promoter_id': 17, 'receptor_id': 13}],
'logics': [21], 'outputs': [1]}
])
result = self.client.post('/simulation/preprocess', data=circuits).json
self.assertDictContainsRecursively(result,
self.simulations['and_gate'])
def test_preprocess_or_gate(self):
circuits = json.dumps([
{'inputs': [{'id': 8, 'promoter_id': 1, 'receptor_id': 12},
{'id': 9, 'promoter_id': 17, 'receptor_id': 13}],
'logics': [23], 'outputs': [1]}
])
result = self.client.post('/simulation/preprocess', data=circuits).json
self.assertDictContainsRecursively(result,
self.simulations['or_gate'])
class TestSimulationSimulate(TestSimulationBase):
def test_simulation_dynamic_and_gate(self):
s = self.simulations['and_gate']
s['x0'] = {'Zinc ions': 2e-2, 'PAI': 1e-2}
s['t'] = 100
result = self.client.post('/simulation/simulate/dynamic',
data=json.dumps(s)).json
with open('tests/simulation_dynamic_and_gate.json') as fobj:
desired = json.load(fobj)
self.assertItemsAlmostEqual(result, desired)
def test_simulation_dynamic_simple(self):
s = self.simulations['simple']
s['x0'] = {'Mercury ions': 1e-2}
s['t'] = 100
result = self.client.post('/simulation/simulate/dynamic',
data=json.dumps(s)).json
with open('tests/simulation_dynamic_simple.json') as fobj:
desired = json.load(fobj)
self.assertItemsAlmostEqual(result, desired)
def test_simulation_dynamic_toggle_switch_1(self):
s = self.simulations['toggle_switch_1']
s['x0'] = {'Arsenic ions': 1e-2, 'aTc': 2e-2}
s['t'] = 100
result = self.client.post('/simulation/simulate/dynamic',
data=json.dumps(s)).json
with open('tests/simulation_dynamic_toggle_switch_1.json') as fobj:
desired = json.load(fobj)
self.assertItemsAlmostEqual(result, desired)
def test_simulation_static_and_gate(self):
s = self.simulations['and_gate']
s['c_static'] = 1.0
s['t'] = 100
result = self.client.post('/simulation/simulate/static',
data=json.dumps(s)).json
with open('tests/simulation_static_and_gate.json') as fobj:
desired = json.load(fobj)
self.assertItemsAlmostEqual(result, desired)
def test_simulation_static_simple(self):
s = self.simulations['simple']
s['c_static'] = 1.0
s['t'] = 100
result = self.client.post('/simulation/simulate/static',
data=json.dumps(s)).json
with open('tests/simulation_static_simple.json') as fobj:
desired = json.load(fobj)
self.assertItemsAlmostEqual(result, desired)
def test_simulation_static_toggle_switch_1(self):
s = self.simulations['toggle_switch_1']
s['c_static'] = 1.0
s['t'] = 100
result = self.client.post('/simulation/simulate/static',
data=json.dumps(s)).json
with open('tests/simulation_static_toggle_switch_1.json') as fobj:
desired = json.load(fobj)
self.assertItemsAlmostEqual(result, desired)
|
igemsoftware/SYSU-Software_2014
|
tests/test_simulation.py
|
Python
|
lgpl-3.0
| 6,266
|
import unittest
from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner
class StringDecisionTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = DecisionRunner('kwargs_parameter.dmn', debug='DEBUG')
def test_string_decision_string_output1(self):
res = self.runner.decide(Gender='m')
self.assertEqual(res.description, 'm Row Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(StringDecisionTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
knipknap/SpiffWorkflow
|
tests/SpiffWorkflow/dmn/KwargsParameterTest.py
|
Python
|
lgpl-3.0
| 672
|
import pygame
from vec2d import vec2d
from random import choice
ITEM_TYPES = ['weapon', 'armor', 'flask']
WEAPONS_ID = ['sword', 'axe', 'staff', 'bow']
ARMORS_ID = [
'chest', 'shoulders', 'shield', 'gloves', 'boots', 'pants', 'mantle',
'helmet', 'skirt']
FLASKS_ID = ['healt', 'armor']
SWORD_IMG = list()
AXES_IMG = list()
STAFF_IMG = list()
BOW_IMG = list()
SWORDS = dict()
for i in range(47):
SWORD_IMG.append(pygame.image.load('sw'+str(i+1)+'.png'))
AXES_IMG.append(pygame.image.load('sw'+str(i+1)+'.png'))
STAFF_IMG.append(pygame.image.load('sw'+str(i+1)+'.png'))
BOW_IMG.append(pygame.image.load('sw'+str(i+1)+'.png'))
class Item(pygame.sprite.Sprite):
(DROPPED, GETTED) = range(2)
def __init__(self, screen, pos, img_file, item_type,
price, attack_power, deffence_power, armor, name, level):
pygame.sprite.Sprite.__init__(self)
self.pos = vec2d(pos)
self.screen = screen
self.base_image = img_file
self.image = self.base_image
self.state = self.DROPPED
self.type = item_type
self.attack_power = attack_power
self.deffence_power = deffence_power
self.armor = armor
self.price = price
self.name = name
self.level_required = level
def drop(self, pos):
self.state = self.DROPPED
def get(self):
self.state = self.GETTED
def draw(self):
item_rect = self.image.get_rect().move(
self.pos.x - self.image.get_size()[0] / 2,
self.pos.y - self.image.get_size()[1] / 2)
self.screen.blit(self.image, item_rect)
class Cell(pygame.sprite.Sprite):
def __init__(self, screen, pos, item = None, background = None):
pygame.sprite.Sprite.__init__(self)
self.pos = vec2d(pos)
self.item = item
self.screen = screen
self.background = background
self.side = 50
def is_empty(self):
return self.item == None
def draw():
if self.background == None:
self.background = Color('Black')
cell_rect = self.background.get_rect()
cell_rect.move(
self.pos.x - self.image.get_size()[0] / 2,
self.pos.y - self.image.get_size()[1] / 2)
self.screen.blit(self.image, item_rect)
class Inventory:
(WEAPON, SHIELD, CHEST, SHOULDERS, SHIELD, GLOVES,
BOOTS, PANTS, MANTLE, HELMET, SKIRT,) = range(11)
def __init__(self, screen, texture):
self.texture = texture
self.left_top = vec2d(0, 540)
self.hight = 60
self.wide = 600
self.screen = screen
self.bag_pages = 1
self.bag = [[]]
self.start_pos_bag = vec2d(0, 0)
# initial of bag
self.inverntory = list()
self.start_pos_inventory = vec2d(0, 0)
for i in range(11):
current_pos = self.start_pos_inventory
self.inverntory.append(Cell(self.screen, current_pos))
current_pos.x += 60
self.bar = []
self.start_pos_bar = vec2d(5, 545)
for i in range(9):
current_pos = self.start_pos_bar
self.bar.append(Cell(self.screen, current_pos))
current_pos.x += 60
def add_item_bag(self, item):
if self.bag_pages <= 3:
if len(self.bag[self.bag_pages]) == 9 and self.bag_pages < 3:
self.bag_pages += 1
else:
return
item.state = item.GETTED
cell_position = (self.bag_pages, len(self.bag[self.bag_pages]))
self.bag[self.bag_pages].apeend(Cell(self.screen, cell_position,
item, background))
def remove_item_bag(self, item):
deleting_pos = (item.pos.x, item.pos.y)
self.bag[deleting_pos[0]].pop(deleting_pos[1])
item.state = item.DROPPED
def add_item_inventory(self, pos_inventory, item):
if self.inverntory[pos_inventory] == None:
self.inverntory[pos_inventory] = item
else:
self.remove_item_inventory(pos_inventory)
self.inverntory.insert(pos_inventory, item)
def remove_item_inventory(self, pos_inventory):
if self.inverntory[pos_inventory] != None:
removed_item = self.inverntory[pos_inventory]
self.inverntory[pos_inventory] = None
self.add_item_bag(removed_item)
def draw(self):
step_h, step_w = self.texture.get_size()
step_h_counter = 0
step_w_counter = 0
while step_w_counter <= self.wide:
while step_h_counter <= self.hight:
self.screen.blit(self.texture,
self.left_top + vec2d(step_w_counter, step_h_counter))
step_h_counter += step_h
step_w_counter += step_w
step_h_counter = 0
def load_items(self):
for img_file in SWORD_IMG:
i += i + 1
SWORDS[str(i) + "_sword"] = type(str(i) + "_sword", (Item,), {
'pos' : vec2d(0, 0), 'screen' : self.screen,
'base_image' : img_file, 'image' : img_file,
'state' : 0, 'item_type' : ITEM_TYPES[0],
'level_required' : level,
'name' : str(i) + "_sword",
'attack_power' : level*20,
'deffence_power' : level*5,
'price' : level*100, 'armor' : level*100 })
# load more items..
def draw_inventory(self):
for cell in self.inverntory:
cell.draw()
cell.item.draw()
def draw_bar(self):
for cell in self.bar:
cell.draw()
cell.item.draw()
def draw_bag(self):
pass
|
lepovica/Lost-Nero
|
inventory.py
|
Python
|
lgpl-3.0
| 5,940
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import neighbors
from GridCal.Engine import PowerFlowOptions, FileOpen, SolverType, ReactivePowerControlMode, \
TapsControlMode, BranchImpedanceMode, TimeSeries, PtdfTimeSeries, CDF, LatinHypercubeSampling
def knn_interp(X, Y, perc):
k_split = int(X.shape[0] * perc)
X_train = X[:k_split]
Y_train = Y[:k_split]
X_test = X[k_split:]
Y_test = Y[k_split:]
n_neighbors = 5
model = neighbors.KNeighborsRegressor(n_neighbors)
print('Fitting...')
model.fit(X_train, Y_train)
print('Predicting...')
Y_predict = model.predict(X_test)
print('Scoring...')
score = model.score(X_test, Y_test)
print('Score:', score)
Y_predict
def run(fname):
circuit = FileOpen(fname).open()
pf_options = PowerFlowOptions(solver_type=SolverType.NR,
retry_with_other_methods=False,
verbose=False,
initialize_with_existing_solution=False,
tolerance=1e-6,
max_iter=5,
max_outer_loop_iter=10,
control_q=ReactivePowerControlMode.NoControl,
control_taps=TapsControlMode.NoControl,
multi_core=False,
dispatch_storage=False,
control_p=False,
apply_temperature_correction=False,
branch_impedance_tolerance_mode=BranchImpedanceMode.Specified,
q_steepness_factor=30,
distributed_slack=False,
ignore_single_node_islands=False,
correction_parameter=1e-4)
nc = circuit.compile_time_series()
ts_driver = TimeSeries(circuit, pf_options)
ts_driver.run()
ptdf_driver = PtdfTimeSeries(circuit, pf_options, power_delta=10)
ptdf_driver.run()
npoints = int(len(circuit.time_profile) * 1)
lhs_driver = LatinHypercubeSampling(circuit, pf_options, sampling_points=npoints)
lhs_driver.run()
P = nc.get_power_injections().real.T
Q = nc.get_power_injections().imag.T
Pbr_ts = ts_driver.results.Sbranch.real
Pbr_ptdf = ptdf_driver.results.Sbranch.real
P_lhs = lhs_driver.results.S_points.real
Q_lhs = lhs_driver.results.S_points.imag
Pbr_lhs = lhs_driver.results.Sbr_points.real
# KNN
n_neighbors = 3
model = neighbors.KNeighborsRegressor(n_neighbors)
# model.fit(P[:40], Pbr_ts[:40])
# model.fit(P_lhs, Pbr_lhs) # just the LHS for training
# X = np.r_[np.c_[P_lhs, Q], np.c_[P, Q]]
# Y = np.r_[Pbr_lhs, Pbr_ts]
X = np.c_[P, Q][:60]
Y = Pbr_ts[:60]
model.fit(X, Y) # LHS + TS for training ("dreaming")
Pbr_knn = model.predict(np.c_[P, Q])
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
i = 10 # branch index
ax.plot(Pbr_ts[i, :], label='Real flow', linewidth=5, c='orange')
ax.plot(Pbr_ptdf[i, :], label='PTDF', c='b', linestyle='--')
ax.plot(Pbr_knn[i, :], label='KNN', c='k', linestyle=':')
ax.set_xlabel('Time')
ax.set_ylabel('MW')
fig.legend()
plt.show()
if __name__ == '__main__':
run(r'/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE_30_new.xlsx')
|
SanPen/GridCal
|
src/research/ptdf_ts.py
|
Python
|
lgpl-3.0
| 3,540
|
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos import EffectMode
from eos import ModuleHigh
from eos import State
from eos.const.eve import EffectCategoryId
from tests.integration.effect_mode.testcase import EffectModeTestCase
class TestFullComplianceOverload(EffectModeTestCase):
def test_started_on_add(self):
effect = self.mkeffect(
category_id=EffectCategoryId.overload,
modifiers=[self.modifier])
item = ModuleHigh(
self.mktype(
attrs={self.tgt_attr.id: 10, self.src_attr.id: 2},
effects=[effect]).id,
state=State.overload)
item.set_effect_mode(effect.id, EffectMode.full_compliance)
# Action
self.fit.modules.high.append(item)
# Verification
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 12)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_started_on_state_switch(self):
effect = self.mkeffect(
category_id=EffectCategoryId.overload,
modifiers=[self.modifier])
item = ModuleHigh(
self.mktype(
attrs={self.tgt_attr.id: 10, self.src_attr.id: 2},
effects=[effect]).id,
state=State.active)
item.set_effect_mode(effect.id, EffectMode.full_compliance)
self.fit.modules.high.append(item)
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 10)
# Action
item.state = State.overload
# Verification
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 12)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_started_on_mode_switch(self):
effect = self.mkeffect(
category_id=EffectCategoryId.overload,
modifiers=[self.modifier])
item = ModuleHigh(
self.mktype(
attrs={self.tgt_attr.id: 10, self.src_attr.id: 2},
effects=[effect]).id,
state=State.overload)
item.set_effect_mode(effect.id, EffectMode.force_stop)
self.fit.modules.high.append(item)
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 10)
# Action
item.set_effect_mode(effect.id, EffectMode.full_compliance)
# Verification
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 12)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_stopped_insufficient_state_on_add(self):
effect = self.mkeffect(
category_id=EffectCategoryId.overload,
modifiers=[self.modifier])
item = ModuleHigh(
self.mktype(
attrs={self.tgt_attr.id: 10, self.src_attr.id: 2},
effects=[effect]).id,
state=State.active)
item.set_effect_mode(effect.id, EffectMode.full_compliance)
# Action
self.fit.modules.high.append(item)
# Verification
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 10)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_stopped_insufficient_state_on_state_switch(self):
effect = self.mkeffect(
category_id=EffectCategoryId.overload,
modifiers=[self.modifier])
item = ModuleHigh(
self.mktype(
attrs={self.tgt_attr.id: 10, self.src_attr.id: 2},
effects=[effect]).id,
state=State.overload)
item.set_effect_mode(effect.id, EffectMode.full_compliance)
self.fit.modules.high.append(item)
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 12)
# Action
item.state = State.active
# Verification
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 10)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_stopped_insufficient_state_on_mode_switch(self):
effect = self.mkeffect(
category_id=EffectCategoryId.overload,
modifiers=[self.modifier])
item = ModuleHigh(
self.mktype(
attrs={self.tgt_attr.id: 10, self.src_attr.id: 2},
effects=[effect]).id,
state=State.active)
item.set_effect_mode(effect.id, EffectMode.force_run)
self.fit.modules.high.append(item)
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 12)
# Action
item.set_effect_mode(effect.id, EffectMode.full_compliance)
# Verification
self.assertAlmostEqual(item.attrs[self.tgt_attr.id], 10)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
|
pyfa-org/eos
|
tests/integration/effect_mode/full_compliance/test_overload.py
|
Python
|
lgpl-3.0
| 5,775
|
import testimport1
import testimport2
|
wh20160213/WuhuaLearnToPython
|
python_study_level1/package2/__init__.py
|
Python
|
lgpl-3.0
| 44
|
import threading
from asyncio import get_event_loop
from pony import orm
from pony.orm import db_session, desc, raw_sql, select
from tribler_core.modules.metadata_store.orm_bindings.channel_node import LEGACY_ENTRY, TODELETE
from tribler_core.modules.metadata_store.orm_bindings.torrent_metadata import NULL_KEY_SUBST
from tribler_core.modules.metadata_store.serialization import METADATA_NODE, MetadataNodePayload
from tribler_core.utilities.unicode import hexlify
def define_binding(db):
class MetadataNode(db.ChannelNode):
"""
This ORM class extends ChannelNode by adding metadata-storing attributes such as "title" and "tags".
It implements methods for indexed text search based on the "title" field.
It is not intended for direct use. Instead, other classes should derive from it.
"""
_discriminator_ = METADATA_NODE
# Serializable
title = orm.Optional(str, default='', index=True)
tags = orm.Optional(str, default='', index=True)
# FIXME: ACHTUNG! PONY BUG! This is a workaround for Pony not caching attributes from multiple inheritance!
# Its real home is CollectionNode, but we are forced to put it here so it is loaded by default on all queries.
# When Pony fixes it, we must move it back to CollectionNode for clarity.
num_entries = orm.Optional(int, size=64, default=0, index=True)
# Special class-level properties
_payload_class = MetadataNodePayload
payload_arguments = _payload_class.__init__.__code__.co_varnames[
: _payload_class.__init__.__code__.co_argcount
][1:]
nonpersonal_attributes = db.ChannelNode.nonpersonal_attributes + ('title', 'tags')
@classmethod
def search_keyword(cls, query, lim=100):
# Requires FTS5 table "FtsIndex" to be generated and populated.
# FTS table is maintained automatically by SQL triggers.
# BM25 ranking is embedded in FTS5.
# Sanitize FTS query
if not query or query == "*":
return []
# !!! FIXME !!! Fix GROUP BY for entries without infohash !!!
# TODO: optimize this query by removing unnecessary select nests (including Pony-manages selects)
fts_ids = raw_sql(
"""SELECT rowid FROM ChannelNode WHERE rowid IN (SELECT rowid FROM FtsIndex WHERE FtsIndex MATCH $query
ORDER BY bm25(FtsIndex) LIMIT $lim) GROUP BY infohash"""
)
return cls.select(lambda g: g.rowid in fts_ids)
@classmethod
@db_session
def get_entries_query(
cls,
metadata_type=None,
channel_pk=None,
exclude_deleted=False,
hide_xxx=False,
exclude_legacy=False,
origin_id=None,
sort_by=None,
sort_desc=True,
txt_filter=None,
subscribed=None,
category=None,
attribute_ranges=None,
id_=None,
):
"""
This method implements REST-friendly way to get entries from the database. It is overloaded by the higher
level classes to add some more conditions to the query.
:return: PonyORM query object corresponding to the given params.
"""
# Warning! For Pony magic to work, iteration variable name (e.g. 'g') should be the same everywhere!
pony_query = cls.search_keyword(txt_filter, lim=1000) if txt_filter else select(g for g in cls)
if metadata_type is not None:
try:
pony_query = pony_query.where(lambda g: g.metadata_type in metadata_type)
except TypeError:
pony_query = pony_query.where(lambda g: g.metadata_type == metadata_type)
pony_query = (
pony_query.where(public_key=(b"" if channel_pk == NULL_KEY_SUBST else channel_pk))
if channel_pk is not None
else pony_query
)
if attribute_ranges is not None:
for attr, left, right in attribute_ranges:
getattr(cls, attr) # Check against code injection
if left is not None:
pony_query = pony_query.where(f"g.{attr} >= left")
if right is not None:
pony_query = pony_query.where(f"g.{attr} < right")
# origin_id can be zero, for e.g. root channel
pony_query = pony_query.where(id_=id_) if id_ is not None else pony_query
pony_query = pony_query.where(origin_id=origin_id) if origin_id is not None else pony_query
pony_query = pony_query.where(lambda g: g.subscribed) if subscribed is not None else pony_query
pony_query = pony_query.where(lambda g: g.tags == category) if category else pony_query
pony_query = pony_query.where(lambda g: g.status != TODELETE) if exclude_deleted else pony_query
pony_query = pony_query.where(lambda g: g.xxx == 0) if hide_xxx else pony_query
pony_query = pony_query.where(lambda g: g.status != LEGACY_ENTRY) if exclude_legacy else pony_query
# Sort the query
if sort_by == "HEALTH":
pony_query = (
pony_query.sort_by("(desc(g.health.seeders), desc(g.health.leechers))")
if sort_desc
else pony_query.sort_by("(g.health.seeders, g.health.leechers)")
)
elif sort_by == "size" and not issubclass(cls, db.ChannelMetadata):
# TODO: optimize this check to skip cases where size field does not matter
# When querying for mixed channels / torrents lists, channels should have priority over torrents
sort_expression = "desc(g.num_entries), desc(g.size)" if sort_desc else "g.num_entries, g.size"
pony_query = pony_query.sort_by(sort_expression)
elif sort_by:
sort_expression = "g." + sort_by
sort_expression = desc(sort_expression) if sort_desc else sort_expression
pony_query = pony_query.sort_by(sort_expression)
return pony_query
@classmethod
async def get_entries_threaded(cls, **kwargs):
def _get_results():
result = cls.get_entries(**kwargs)
if not isinstance(threading.current_thread(), threading._MainThread):
db.disconnect()
return result
return await get_event_loop().run_in_executor(None, _get_results)
@classmethod
@db_session
def get_entries(cls, first=1, last=None, **kwargs):
"""
Get some torrents. Optionally sort the results by a specific field, or filter the channels based
on a keyword/whether you are subscribed to it.
:return: A list of class members
"""
pony_query = cls.get_entries_query(**kwargs)
return pony_query[(first or 1) - 1 : last]
@classmethod
@db_session
def get_total_count(cls, **kwargs):
"""
Get total count of torrents that would be returned if there would be no pagination/limits/sort
"""
for p in ["first", "last", "sort_by", "sort_desc"]:
kwargs.pop(p, None)
return cls.get_entries_query(**kwargs).count()
@classmethod
@db_session
def get_entries_count(cls, **kwargs):
for p in ["first", "last"]:
kwargs.pop(p, None)
return cls.get_entries_query(**kwargs).count()
@classmethod
def get_auto_complete_terms(cls, keyword, max_terms, limit=10):
if not keyword:
return []
with db_session:
result = cls.search_keyword("\"" + keyword + "\"*", lim=limit)[:]
titles = [g.title.lower() for g in result]
# Copy-pasted from the old DBHandler (almost) completely
all_terms = set()
for line in titles:
if len(all_terms) >= max_terms:
break
i1 = line.find(keyword)
i2 = line.find(' ', i1 + len(keyword))
term = line[i1:i2] if i2 >= 0 else line[i1:]
if term != keyword:
all_terms.add(term)
return list(all_terms)
def to_simple_dict(self):
"""
Return a basic dictionary with information about the channel.
"""
simple_dict = {
"type": self._discriminator_,
"id": self.id_,
"origin_id": self.origin_id,
"public_key": hexlify(self.public_key),
"name": self.title,
"category": self.tags,
"status": self.status,
}
return simple_dict
return MetadataNode
|
hbiyik/tribler
|
src/tribler-core/tribler_core/modules/metadata_store/orm_bindings/metadata_node.py
|
Python
|
lgpl-3.0
| 9,088
|
# -*- coding: utf-8 -*-
# Copyright(C) 2014 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.misc import to_unicode
from weboob.tools.compat import StrConv
class BrowserIncorrectPassword(Exception):
pass
class BrowserForbidden(Exception):
pass
class BrowserBanned(BrowserIncorrectPassword):
pass
class BrowserUnavailable(Exception):
pass
class BrowserInteraction(Exception):
pass
class BrowserQuestion(BrowserInteraction, StrConv):
"""
When raised by a browser,
"""
def __init__(self, *fields):
self.fields = fields
def __str__(self):
return ", ".join("{}: {}".format(
field.id or field.label, field.description) for field in self.fields
)
def __unicode__(self):
return ", ".join(
u"{}: {}".format(
to_unicode(field.id) or to_unicode(field.label),
to_unicode(field.description)
) for field in self.fields
)
class DecoupledValidation(BrowserInteraction):
def __init__(self, message='', resource=None, *values):
super(DecoupledValidation, self).__init__(*values)
self.message = message
self.resource = resource
def __str__(self):
return self.message
class AppValidation(DecoupledValidation):
pass
class AppValidationError(Exception):
pass
class AppValidationCancelled(AppValidationError):
pass
class AppValidationExpired(AppValidationError):
pass
class BrowserRedirect(BrowserInteraction):
def __init__(self, url, resource=None):
self.url = url
# Needed for transfer redirection
self.resource = resource
def __str__(self):
return 'Redirecting to %s' % self.url
class CaptchaQuestion(Exception):
"""Site requires solving a CAPTCHA (base class)"""
# could be improved to pass the name of the backendconfig key
def __init__(self, type=None, **kwargs):
super(CaptchaQuestion, self).__init__("The site requires solving a captcha")
self.type = type
for key, value in kwargs.items():
setattr(self, key, value)
class WrongCaptchaResponse(Exception):
"""when website tell us captcha response is not good"""
def __init__(self, message=None):
super(WrongCaptchaResponse, self).__init__(message or "Captcha response is wrong")
class ImageCaptchaQuestion(CaptchaQuestion):
type = 'image_captcha'
image_data = None
def __init__(self, image_data):
super(ImageCaptchaQuestion, self).__init__(self.type, image_data=image_data)
class NocaptchaQuestion(CaptchaQuestion):
type = 'g_recaptcha'
website_key = None
website_url = None
def __init__(self, website_key, website_url):
super(NocaptchaQuestion, self).__init__(self.type, website_key=website_key, website_url=website_url)
class RecaptchaQuestion(CaptchaQuestion):
type = 'g_recaptcha'
website_key = None
website_url = None
def __init__(self, website_key, website_url):
super(RecaptchaQuestion, self).__init__(self.type, website_key=website_key, website_url=website_url)
class RecaptchaV3Question(CaptchaQuestion):
type = 'g_recaptcha'
website_key = None
website_url = None
action = None
def __init__(self, website_key, website_url, action=None):
super(RecaptchaV3Question, self).__init__(self.type, website_key=website_key, website_url=website_url)
self.action = action
class FuncaptchaQuestion(CaptchaQuestion):
type = 'funcaptcha'
website_key = None
website_url = None
sub_domain = None
def __init__(self, website_key, website_url, sub_domain=None):
super(FuncaptchaQuestion, self).__init__(
self.type, website_key=website_key, website_url=website_url, sub_domain=sub_domain)
class BrowserHTTPNotFound(Exception):
pass
class BrowserHTTPError(BrowserUnavailable):
pass
class BrowserHTTPSDowngrade(Exception):
pass
class BrowserSSLError(BrowserUnavailable):
pass
class ParseError(Exception):
pass
class FormFieldConversionWarning(UserWarning):
"""
A value has been set to a form's field and has been implicitly converted.
"""
class NoAccountsException(Exception):
pass
class ModuleInstallError(Exception):
pass
class ModuleLoadError(Exception):
def __init__(self, module_name, msg):
super(ModuleLoadError, self).__init__(msg)
self.module = module_name
class ActionNeeded(Exception):
pass
class AuthMethodNotImplemented(ActionNeeded):
pass
class BrowserPasswordExpired(ActionNeeded):
pass
class NeedInteractive(Exception):
pass
class NeedInteractiveForRedirect(NeedInteractive):
"""
An authentication is required to connect and credentials are not supplied
"""
pass
class NeedInteractiveFor2FA(NeedInteractive):
"""
A 2FA is required to connect, credentials are supplied but not the second factor
"""
pass
|
laurentb/weboob
|
weboob/exceptions.py
|
Python
|
lgpl-3.0
| 5,643
|
from generator.analysis.verifier_tools import *
def after_SystemStateFlow(analysis):
# Find all three systemcall handlers
(Handler11, Handler12, Handler13, Idle, StartOS) = \
get_functions(analysis.system_graph, ["Handler11", "Handler12", "Handler13", "Idle", "StartOS"])
t = RunningTaskToolbox(analysis)
# Handler11 has higher priority than Handler12
t.reachability(StartOS, "StartOS", [], # =>
[Handler11])
# Handler11 has higher priority than Handler12
t.reachability(Handler11, "ActivateTask", [Handler12], # =>
[Handler11])
# Handler13 is directly started
t.reachability(Handler11, "ActivateTask", [Handler13], # =>
[Handler13])
# Handler12 is always activated afterwards
t.reachability(Handler13, "TerminateTask", [], # =>
[Handler11])
# Handler12 is always activated afterwards
t.reachability(Handler11, "TerminateTask", [], # =>
[Handler12])
# Handler12 is always activated afterwards
t.reachability(Handler12, "TerminateTask", [], # =>
[Idle])
# Idle handler is never left
t.reachability(Idle, "Idle", [], # =>
[Idle])
t.promise_all_syscalls_checked()
|
danceos/dosek
|
app/bcc1/task1/verify_a.py
|
Python
|
lgpl-3.0
| 1,285
|
# Copyright (c) 2018 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.Version import Version
from unittest import TestCase
import pytest
major_versions = [Version("1"), Version(b"1"), Version(1), Version([1]), Version(["1"]), Version("1."), Version("MOD-1"), Version("1B"), Version(Version("1"))]
major_minor_versions = [Version("1.2"), Version("1-2"), Version("1_2"), Version(b"1.2"), Version("1.2BETA"), Version([1, 2]), Version(["1", "2"]), Version([1, "2"]), Version([1, b"2"])]
major_minor_revision_versions = [Version("1.2.3"), Version("1.2.3BETA"), Version("1_2-3"), Version(b"1.2.3"), Version([1, 2, 3]), Version(["1", "2", "3"]), Version([1, "2", 3]), Version("MOD-1.2.3"), Version(["1", 2, b"3"])]
def check_version_equals(first_version: Version, second_version: Version):
assert first_version == second_version
assert first_version.getMajor() == second_version.getMajor()
assert first_version.getMinor() == second_version.getMinor()
assert first_version.getRevision() == second_version.getRevision()
@pytest.mark.parametrize("first_version", major_versions)
@pytest.mark.parametrize("second_version", major_versions)
def test_major_equals(first_version, second_version):
check_version_equals(first_version, second_version)
@pytest.mark.parametrize("first_version", major_minor_versions)
@pytest.mark.parametrize("second_version", major_minor_versions)
def test_major_and_minor_equals(first_version, second_version):
check_version_equals(first_version, second_version)
@pytest.mark.parametrize("first_version", major_minor_revision_versions)
@pytest.mark.parametrize("second_version", major_minor_revision_versions)
def test_major_minor_revision_equals(first_version, second_version):
check_version_equals(first_version, second_version)
@pytest.mark.parametrize("first_version", major_versions)
@pytest.mark.parametrize("second_version", major_minor_versions)
def test_check_version_smaller(first_version, second_version):
assert first_version < second_version
# Just to be on the really safe side
assert first_version != second_version
assert not first_version > second_version
@pytest.mark.parametrize("first_version", major_minor_versions)
@pytest.mark.parametrize("second_version", major_minor_revision_versions)
def test_check_version_smaller_2(first_version, second_version):
assert first_version < second_version
# Just to be on the really safe side
assert first_version != second_version
assert not first_version > second_version
def test_versionPostfix():
version = Version("1.2.3-alpha.4")
assert version.getPostfixType() == "alpha"
assert version.getPostfixVersion() == 4
assert version.hasPostFix()
assert not Version("").hasPostFix()
assert version <= Version("1.2.3-alpha.5")
assert version < Version("1.2.3-alpha.5")
def test_versionWeirdCompares():
version = Version("1.2.3-alpha.4")
assert not version == 12
def test_wrongType():
version = Version(None)
assert version == Version("0")
def test_compareStrings():
version_string = "1.0.0"
version = Version(version_string)
assert version == version_string
assert version >= version_string
assert version < "2.0.0"
assert version <= "2.0.0"
assert "0" < version
assert Version("1.0.0") > Version("1.0.0-alpha.7")
# Defend people from ignoring the typing.
assert not version > None
assert not version < None
def test_compareBeta():
normal_version = Version("1.0.0")
beta_version = Version("1.0.0-BETA")
assert normal_version > beta_version
def test_comparePostfixVersion():
assert Version("1.0.0-alpha.1") < Version("1.0.0-alpha.2")
|
Ultimaker/Uranium
|
tests/TestVersion.py
|
Python
|
lgpl-3.0
| 3,738
|
#! /usr/bin/env python
# -*- coding: utf8 -*-
import openturns as ot
import sys
import shutil
import os
import argparse
import threading
import tempfile
# multicore example:
# ./t_distributed_python_wrapper.py --sample-size 10 --test local
#
# multihost example (need working ssh server):
# ./t_distributed_python_wrapper.py --sample-size 100 --test remote --hosts host1 host2
# todo: add ctrc-c test
parser = argparse.ArgumentParser(description="test openturns "
"distributed-python-wrapper",
add_help=False)
parser.add_argument('--help', action='store_const', const=[],
help='show this help message and exit')
parser.add_argument('--debug', '-d', action='store_true',
help='print openturns debug\'log')
parser.add_argument('--hosts', '-h', nargs='+')
parser.add_argument('--test', '-t', nargs=1,
help='test_type (remote, local)')
parser.add_argument('--point', action='store_true',
help='test with one point rather than a sample')
parser.add_argument('--analytical', action='store_true',
help='test without separate workdir')
parser.add_argument('--cleanup', '-c', nargs=1,
help='cleanup workdirs')
parser.add_argument('--error', '-e', action='store_true',
help='make error')
parser.add_argument('--tmpdir', '-w', nargs=1,
help='tmpdir')
parser.add_argument('--sample-size', '-s', nargs=1,
help='number of points to compute')
parser.add_argument('--work-time', '-p', nargs=1,
help='number of second of computing per point')
parser.add_argument('--nb-output', '-n', nargs=1,
help='number of output variable')
args = parser.parse_args()
#print "args: " + str(args)
# print help if asked
if args.help != None:
printHelp(parser)
if args.debug:
ot.Log.Show( ot.Log.Flags() | ot.Log.DBG )
hosts = None
if args.hosts != None:
hosts = args.hosts
test_type = "local"
if args.test != None:
test_type = args.test[0]
test_point = False
if args.point:
test_point = True
test_analytical = False
if args.analytical:
test_analytical = True
cleanup = "ok"
if args.cleanup != None:
cleanup = args.cleanup[0]
make_error = args.error
tmpdir = ""
if args.tmpdir != None:
tmpdir = args.tmpdir[0]
sample_size = 5
if args.sample_size != None:
sample_size = int(args.sample_size[0])
work_time = 1
if args.work_time != None:
work_time = float(args.work_time[0])
nb_output = 1
if args.nb_output != None:
nb_output = int(args.nb_output[0])
print( "test_type:" + test_type + ", test_point:" + str(test_point) + \
", test_analytical:" + str(test_analytical) + \
", cleanup:" + cleanup + ", make_error:" + \
str(make_error) + ", tmpdir:" + tmpdir + ", sample_size:" + \
str(sample_size) + ", work_time:" + str(work_time) + ", hosts:" + \
str(hosts) + ", nb_output:" + str(nb_output))
# uncomment following line to show wanted logs
#ot.Log.Show( ot.Log.ALL )
#
#ot.Log.Show( ot.Log.Flags() | ot.Log.WRAPPER )
#ot.Log.Show( ot.Log.Flags() | ot.Log.DBG )
# print compute progression :
ot.Log.Show( ot.Log.Flags() | ot.Log.INFO )
# set number of thread = number of job concurrently started
#ResourceMap.Set("parallel-threads", "6")
#print "Nb of thread of localhost: ", ot.ResourceMap.Get("parallel-threads")
script_dir = os.path.dirname( os.path.realpath( __file__ ) )
program_wrapper = script_dir + os.sep + "dummy_program_wrapper.py"
func_wrapper = script_dir + os.sep + "dummy_func_wrapper.py"
program = script_dir + os.sep + "dummy_program.py"
dist_func = ot.OpenTURNSDistributedPythonFunction(n_input=4,
n_output=nb_output,
wrapper_file=program_wrapper,
hosts=hosts,
cleanup=cleanup,
files_to_send=[program],
tmpdir=tmpdir)
if test_analytical:
dist_func.set_separate_workdir(False)
if 'win' not in sys.platform:
# change group pid in order to avoid wrapper_launcher destroying parent process
# when interrupting
os.setpgid(0, 0)
model = ot.NumericalMathFunction( dist_func )
# create sample
inS = ot.NumericalSample(sample_size, 4)
if not make_error:
F=2
else:
F=666
for i in range(sample_size):
inS[i,0] = i + 1
inS[i,1] = F
inS[i,2] = work_time
inS[i,3] = nb_output
print( 'Compute' )
if make_error:
try:
outS = model( inS )
except:
print( '====== An error raised, that\'s ok ======' )
else:
Exception("ERROR: no exception!")
else:
if not test_point:
outS = model( inS )
else:
outS = []
outS.append( model( inS[0] ) )
sample_size = 1
print( 'Results' )
if sample_size < 64 and nb_output <= 2:
print( outS )
# 01 comes from deterministic value, check output values are ok
sum = 0
ok = 1
for i in range(sample_size):
z = outS[i][0]
valid_value = (i + 1) * F
if z != valid_value:
ok = 0
print( 'point ' + str(i) + ' incorrect, got value: ' + str(z) + \
', valid value is ' + str(valid_value) )
if ok:
print( 'Results are OK.' )
else:
print ('!!!!!!!!!!!!!!!ERROR!!!!!!!!!!!!!!!!!' )
exit(1)
# check existing or not workdir
check_workdir_beg = None
check_workdir_end = None
# guess workdir
workdir = dist_func.wd_hosts_in.remote_tmpdir
if not workdir:
workdir = tempfile.gettempdir()
workdir += os.sep + dist_func.wd_hosts_in.workdir_basename
if cleanup == "no":
check_workdir_beg = 0
check_workdir_end = sample_size
elif cleanup == "ok" and sample_size > 2 and make_error:
check_workdir_beg = 2
check_workdir_end = 2
if check_workdir_beg != None:
# todo: check that 0..n directory are there on remote nodes too?
if test_type == "local":
workdirs = range(check_workdir_beg, check_workdir_end)
dirs = os.listdir(workdir)
for i in workdirs:
if str(i) not in dirs:
err_msg = "The directory " + workdir + os.sep +\
str(i) + " was not found!"
print (err_msg)
raise Exception(err_msg)
shutil.rmtree(workdir)
print( 'Workdir found. Cleaned.' )
else:
if os.path.exists(workdir):
raise Exception('The workdir was not cleaned!')
print( 'Workdir not found: ok.' )
# print an empty line in order to detect exception
print ('')
|
dbarbier/privot
|
python/test/t_distributed_python_wrapper.py
|
Python
|
lgpl-3.0
| 6,819
|
# BlenderBIM Add-on - OpenBIM Blender Add-on
# Copyright (C) 2020, 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is part of BlenderBIM Add-on.
#
# BlenderBIM Add-on is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BlenderBIM Add-on is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BlenderBIM Add-on. If not, see <http://www.gnu.org/licenses/>.
import bpy
from bpy.types import PropertyGroup
from bpy.props import (
PointerProperty,
StringProperty,
EnumProperty,
BoolProperty,
IntProperty,
FloatProperty,
FloatVectorProperty,
CollectionProperty,
)
class COBieProperties(PropertyGroup):
cobie_ifc_file: StringProperty(default="", name="COBie IFC File")
cobie_types: StringProperty(default=".COBieType", name="COBie Types")
cobie_components: StringProperty(default=".COBie", name="COBie Components")
cobie_json_file: StringProperty(default="", name="COBie JSON File")
should_load_from_memory: BoolProperty(default=False, name="Load from Memory")
|
IfcOpenShell/IfcOpenShell
|
src/blenderbim/blenderbim/bim/module/cobie/prop.py
|
Python
|
lgpl-3.0
| 1,454
|
"""
Parsing for Tor network status documents. This supports both the v2 and v3
dir-spec. Documents can be obtained from a few sources...
* the 'cached-consensus' file in tor's data directory
* tor metrics, at https://metrics.torproject.org/data.html
* directory authorities and mirrors via their DirPort
... and contain the following sections...
* document header
* list of :class:`stem.descriptor.networkstatus.DirectoryAuthority`
* list of :class:`stem.descriptor.router_status_entry.RouterStatusEntry`
* document footer
Of these, the router status entry section can be quite large (on the order of
hundreds of kilobytes). As such we provide a couple of methods for reading
network status documents...
* :class:`stem.descriptor.networkstatus.NetworkStatusDocumentV3` constructor
If read time and memory aren't a concern then you can simply use the document
constructor. Router entries are assigned to its 'routers' attribute...
::
from stem.descriptor.networkstatus import NetworkStatusDocumentV3
# Reads the full consensus into memory twice (both for the parsed and
# unparsed contents).
consensus_file = open('.tor/cached-consensus', 'r')
consensus = NetworkStatusDocumentV3(consensus_file.read())
consensus_file.close()
for router in consensus.routers:
print router.nickname
* :func:`stem.descriptor.networkstatus.parse_file`
Alternatively, the :func:`~stem.descriptor.networkstatus.parse_file` function
provides an iterator for a document's routers. Those routers refer to a 'thin'
document, which doesn't have a 'routers' attribute. This allows for lower
memory usage and upfront runtime.
::
from stem.descriptor.networkstatus import parse_file
with open('.tor/cached-consensus', 'r') as consensus_file:
# Processes the routers as we read them in. The routers refer to a document
# with an unset 'routers' attribute.
for router in parse_file(consensus_file):
print router.nickname
**Module Overview:**
::
parse_file - parses a network status file, providing an iterator for its routers
NetworkStatusDocument - Network status document
|- NetworkStatusDocumentV2 - Version 2 network status document
+- NetworkStatusDocumentV3 - Version 3 network status document
DocumentSignature - Signature of a document by a directory authority
DirectoryAuthority - Directory authority as defined in a v3 network status document
"""
import datetime
import StringIO
import stem.descriptor
import stem.descriptor.router_status_entry
import stem.version
import stem.util.tor_tools
# Version 2 network status document fields, tuples of the form...
# (keyword, is_mandatory)
NETWORK_STATUS_V2_FIELDS = (
("network-status-version", True),
("dir-source", True),
("fingerprint", True),
("contact", True),
("dir-signing-key", True),
("client-versions", False),
("server-versions", False),
("published", True),
("dir-options", False),
("directory-signature", True),
)
# Network status document are either a 'vote' or 'consensus', with different
# mandatory fields for each. Both though require that their fields appear in a
# specific order. This is an ordered listing of the following...
#
# (field, in_votes, in_consensus, is_mandatory)
HEADER_STATUS_DOCUMENT_FIELDS = (
("network-status-version", True, True, True),
("vote-status", True, True, True),
("consensus-methods", True, False, False),
("consensus-method", False, True, False),
("published", True, False, True),
("valid-after", True, True, True),
("fresh-until", True, True, True),
("valid-until", True, True, True),
("voting-delay", True, True, True),
("client-versions", True, True, False),
("server-versions", True, True, False),
("known-flags", True, True, True),
("params", True, True, False),
)
FOOTER_STATUS_DOCUMENT_FIELDS = (
("directory-footer", True, True, True),
("bandwidth-weights", False, True, False),
("directory-signature", True, True, True),
)
HEADER_FIELDS = [attr[0] for attr in HEADER_STATUS_DOCUMENT_FIELDS]
FOOTER_FIELDS = [attr[0] for attr in FOOTER_STATUS_DOCUMENT_FIELDS]
AUTH_START = "dir-source"
ROUTERS_START = "r"
FOOTER_START = "directory-footer"
V2_FOOTER_START = "directory-signature"
DEFAULT_PARAMS = {
"bwweightscale": 10000,
"cbtdisabled": 0,
"cbtnummodes": 3,
"cbtrecentcount": 20,
"cbtmaxtimeouts": 18,
"cbtmincircs": 100,
"cbtquantile": 80,
"cbtclosequantile": 95,
"cbttestfreq": 60,
"cbtmintimeout": 2000,
"cbtinitialtimeout": 60000,
}
# KeyCertificate fields, tuple is of the form...
# (keyword, is_mandatory)
KEY_CERTIFICATE_PARAMS = (
('dir-key-certificate-version', True),
('dir-address', False),
('fingerprint', True),
('dir-identity-key', True),
('dir-key-published', True),
('dir-key-expires', True),
('dir-signing-key', True),
('dir-key-crosscert', False),
('dir-key-certification', True),
)
BANDWIDTH_WEIGHT_ENTRIES = (
"Wbd", "Wbe", "Wbg", "Wbm",
"Wdb",
"Web", "Wed", "Wee", "Weg", "Wem",
"Wgb", "Wgd", "Wgg", "Wgm",
"Wmb", "Wmd", "Wme", "Wmg", "Wmm",
)
def parse_file(document_file, validate = True, is_microdescriptor = False, document_version = 3):
"""
Parses a network status and iterates over the RouterStatusEntry in it. The
document that these instances reference have an empty 'routers' attribute to
allow for limited memory usage.
:param file document_file: file with network status document content
:param bool validate: checks the validity of the document's contents if
**True**, skips these checks otherwise
:param bool is_microdescriptor: **True** if this is for a microdescriptor
consensus, **False** otherwise
:param int document_version: network status document version
:returns: :class:`stem.descriptor.networkstatus.NetworkStatusDocument` object
:raises:
* **ValueError** if the document_version is unrecognized or the contents is
malformed and validate is **True**
* **IOError** if the file can't be read
"""
# getting the document without the routers section
header = stem.descriptor._read_until_keywords((ROUTERS_START, FOOTER_START, V2_FOOTER_START), document_file)
routers_start = document_file.tell()
stem.descriptor._read_until_keywords((FOOTER_START, V2_FOOTER_START), document_file, skip = True)
routers_end = document_file.tell()
footer = document_file.readlines()
document_content = "".join(header + footer)
if document_version == 2:
document_type = NetworkStatusDocumentV2
router_type = stem.descriptor.router_status_entry.RouterStatusEntryV3
elif document_version == 3:
document_type = NetworkStatusDocumentV3
if not is_microdescriptor:
router_type = stem.descriptor.router_status_entry.RouterStatusEntryV3
else:
router_type = stem.descriptor.router_status_entry.RouterStatusEntryMicroV3
else:
raise ValueError("Document version %i isn't recognized (only able to parse v2 or v3)" % document_version)
desc_iterator = stem.descriptor.router_status_entry.parse_file(
document_file,
validate,
entry_class = router_type,
entry_keyword = ROUTERS_START,
start_position = routers_start,
end_position = routers_end,
extra_args = (document_type(document_content, validate),),
)
for desc in desc_iterator:
yield desc
class NetworkStatusDocument(stem.descriptor.Descriptor):
"""
Common parent for network status documents.
"""
def __init__(self, raw_content):
super(NetworkStatusDocument, self).__init__(raw_content)
self._unrecognized_lines = []
def get_unrecognized_lines(self):
return list(self._unrecognized_lines)
class NetworkStatusDocumentV2(NetworkStatusDocument):
"""
Version 2 network status document. These have been deprecated and are no
longer generated by Tor.
:var tuple routers: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2`
contained in the document
:var int version: **\*** document version
:var str hostname: **\*** hostname of the authority
:var str address: **\*** authority's IP address
:var int dir_port: **\*** authority's DirPort
:var str fingerprint: **\*** authority's fingerprint
:var str contact: **\*** authority's contact information
:var str signing_key: **\*** authority's public signing key
:var list client_versions: list of recommended client tor version strings
:var list server_versions: list of recommended server tor version strings
:var datetime published: **\*** time when the document was published
:var list options: **\*** list of things that this authority decides
:var str signing_authority: **\*** name of the authority signing the document
:var str signature: **\*** authority's signature for the document
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
def __init__(self, raw_content, validate = True):
super(NetworkStatusDocumentV2, self).__init__(raw_content)
self.version = None
self.hostname = None
self.address = None
self.dir_port = None
self.fingerprint = None
self.contact = None
self.signing_key = None
self.client_versions = []
self.server_versions = []
self.published = None
self.options = []
self.signing_authority = None
self.signatures = None
# Splitting the document from the routers. Unlike v3 documents we're not
# bending over backwards on the validation by checking the field order or
# that header/footer attributes aren't in the wrong section. This is a
# deprecated descriptor type - patches welcome if you want those checks.
document_file = StringIO.StringIO(raw_content)
document_content = "".join(stem.descriptor._read_until_keywords((ROUTERS_START, V2_FOOTER_START), document_file))
self.routers = tuple(stem.descriptor.router_status_entry.parse_file(
document_file,
validate,
entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV2,
entry_keyword = ROUTERS_START,
section_end_keywords = V2_FOOTER_START,
extra_args = (self,),
))
document_content += "\n" + document_file.read()
entries = stem.descriptor._get_descriptor_components(document_content, validate)
if validate: self._check_constraints(entries)
self._parse(entries, validate)
def _parse(self, entries, validate):
for keyword, values in entries.items():
value, block_contents = values[0]
line = "%s %s" % (keyword, value) # original line
if block_contents: line += "\n%s" % block_contents
if keyword == "network-status-version":
if not value.isdigit():
if not validate: continue
raise ValueError("Network status document has a non-numeric version: %s" % line)
self.version = int(value)
if validate and self.version != 2:
raise ValueError("Expected a version 2 network status document, got version '%s' instead" % self.version)
elif keyword == "dir-source":
dir_source_comp = value.split()
if len(dir_source_comp) < 3:
if not validate: continue
raise ValueError("The 'dir-source' line of a v2 network status document must have three values: %s" % line)
if validate:
if not dir_source_comp[0]:
# https://trac.torproject.org/7055
raise ValueError("Authority's hostname can't be blank: %s" % line)
elif not stem.util.connection.is_valid_ip_address(dir_source_comp[1]):
raise ValueError("Authority's address isn't a valid IPv4 address: %s" % dir_source_comp[1])
elif not stem.util.connection.is_valid_port(dir_source_comp[2], allow_zero = True):
raise ValueError("Authority's DirPort is invalid: %s" % dir_source_comp[2])
elif not dir_source_comp[2].isdigit():
continue
self.hostname = dir_source_comp[0]
self.address = dir_source_comp[1]
self.dir_port = None if dir_source_comp[2] == '0' else int(dir_source_comp[2])
elif keyword == "fingerprint":
if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
raise ValueError("Authority's fingerprint in a v2 network status document is malformed: %s" % line)
self.fingerprint = value
elif keyword == "contact":
self.contact = value
elif keyword == "dir-signing-key":
self.signing_key = block_contents
elif keyword in ("client-versions", "server-versions"):
# v2 documents existed while there were tor versions using the 'old'
# style, hence we aren't attempting to parse them
for version_str in value.split(","):
if keyword == 'client-versions':
self.client_versions.append(version_str)
elif keyword == 'server-versions':
self.server_versions.append(version_str)
elif keyword == "published":
try:
self.published = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
except ValueError:
if validate:
raise ValueError("Version 2 network status document's 'published' time wasn't parseable: %s" % value)
elif keyword == "dir-options":
self.options = value.split()
elif keyword == "directory-signature":
self.signing_authority = value
self.signature = block_contents
else:
self._unrecognized_lines.append(line)
# 'client-versions' and 'server-versions' are only required if "Versions"
# is among the options
if validate and "Versions" in self.options:
if not ('client-versions' in entries and 'server-versions' in entries):
raise ValueError("Version 2 network status documents must have a 'client-versions' and 'server-versions' when 'Versions' is listed among its dir-options:\n%s" % str(self))
def _check_constraints(self, entries):
required_fields = [field for (field, is_mandatory) in NETWORK_STATUS_V2_FIELDS if is_mandatory]
for keyword in required_fields:
if not keyword in entries:
raise ValueError("Network status document (v2) must have a '%s' line:\n%s" % (keyword, str(self)))
# all recognized fields can only appear once
single_fields = [field for (field, _) in NETWORK_STATUS_V2_FIELDS]
for keyword in single_fields:
if keyword in entries and len(entries[keyword]) > 1:
raise ValueError("Network status document (v2) can only have a single '%s' line, got %i:\n%s" % (keyword, len(entries[keyword]), str(self)))
if 'network-status-version' != entries.keys()[0]:
raise ValueError("Network status document (v2) are expected to start with a 'network-status-version' line:\n%s" % str(self))
class NetworkStatusDocumentV3(NetworkStatusDocument):
"""
Version 3 network status document. This could be either a vote or consensus.
:var tuple routers: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
contained in the document
:var int version: **\*** document version
:var str version_flavor: **\*** flavor associated with the document (such as 'microdesc')
:var bool is_consensus: **\*** **True** if the document is a consensus
:var bool is_vote: **\*** **True** if the document is a vote
:var bool is_microdescriptor: **\*** **True** if this is a microdescriptor
flavored document, **False** otherwise
:var datetime valid_after: **\*** time when the consensus became valid
:var datetime fresh_until: **\*** time when the next consensus should be produced
:var datetime valid_until: **\*** time when this consensus becomes obsolete
:var int vote_delay: **\*** number of seconds allowed for collecting votes
from all authorities
:var int dist_delay: **\*** number of seconds allowed for collecting
signatures from all authorities
:var list client_versions: list of recommended client tor versions
:var list server_versions: list of recommended server tor versions
:var list known_flags: **\*** list of known router flags
:var list params: **\*** dict of parameter(**str**) => value(**int**) mappings
:var list directory_authorities: **\*** list of :class:`~stem.descriptor.networkstatus.DirectoryAuthority`
objects that have generated this document
:var list signatures: **\*** :class:`~stem.descriptor.networkstatus.DocumentSignature`
of the authorities that have signed the document
**Consensus Attributes:**
:var int consensus_method: method version used to generate this consensus
:var dict bandwidth_weights: dict of weight(str) => value(int) mappings
**Vote Attributes:**
:var list consensus_methods: list of ints for the supported method versions
:var datetime published: time when the document was published
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as None if undefined
"""
def __init__(self, raw_content, validate = True, default_params = True):
"""
Parse a v3 network status document.
:param str raw_content: raw network status document data
:param bool validate: **True** if the document is to be validated, **False** otherwise
:param bool default_params: includes defaults in our params dict, otherwise
it just contains values from the document
:raises: **ValueError** if the document is invalid
"""
super(NetworkStatusDocumentV3, self).__init__(raw_content)
document_file = StringIO.StringIO(raw_content)
self._header = _DocumentHeader(document_file, validate, default_params)
# merge header attributes into us
for attr, value in vars(self._header).items():
if attr != "_unrecognized_lines":
setattr(self, attr, value)
else:
self._unrecognized_lines += value
self.directory_authorities = tuple(stem.descriptor.router_status_entry.parse_file(
document_file,
validate,
entry_class = DirectoryAuthority,
entry_keyword = AUTH_START,
section_end_keywords = (ROUTERS_START, FOOTER_START),
extra_args = (self._header.is_vote,),
))
if not self._header.is_microdescriptor:
router_type = stem.descriptor.router_status_entry.RouterStatusEntryV3
else:
router_type = stem.descriptor.router_status_entry.RouterStatusEntryMicroV3
self.routers = tuple(stem.descriptor.router_status_entry.parse_file(
document_file,
validate,
entry_class = router_type,
entry_keyword = ROUTERS_START,
section_end_keywords = FOOTER_START,
extra_args = (self,),
))
self._footer = _DocumentFooter(document_file, validate, self._header)
# merge header attributes into us
for attr, value in vars(self._footer).items():
if attr != "_unrecognized_lines":
setattr(self, attr, value)
else:
self._unrecognized_lines += value
def meets_consensus_method(self, method):
"""
Checks if we meet the given consensus-method. This works for both votes and
consensuses, checking our 'consensus-method' and 'consensus-methods'
entries.
:param int method: consensus-method to check for
:returns: **True** if we meet the given consensus-method, and **False** otherwise
"""
return self._header.meets_consensus_method(method)
def __cmp__(self, other):
if not isinstance(other, NetworkStatusDocumentV3):
return 1
return str(self) > str(other)
class _DocumentHeader(object):
def __init__(self, document_file, validate, default_params):
self.version = None
self.version_flavor = None
self.is_consensus = True
self.is_vote = False
self.is_microdescriptor = False
self.consensus_methods = []
self.published = None
self.consensus_method = None
self.valid_after = None
self.fresh_until = None
self.valid_until = None
self.vote_delay = None
self.dist_delay = None
self.client_versions = []
self.server_versions = []
self.known_flags = []
self.params = dict(DEFAULT_PARAMS) if default_params else {}
self._unrecognized_lines = []
content = "".join(stem.descriptor._read_until_keywords((AUTH_START, ROUTERS_START, FOOTER_START), document_file))
entries = stem.descriptor._get_descriptor_components(content, validate)
self._parse(entries, validate)
# doing this validation afterward so we know our 'is_consensus' and
# 'is_vote' attributes
if validate:
_check_for_missing_and_disallowed_fields(self, entries, HEADER_STATUS_DOCUMENT_FIELDS)
_check_for_misordered_fields(entries, HEADER_FIELDS)
def meets_consensus_method(self, method):
return bool(self.consensus_method >= method or filter(lambda x: x >= method, self.consensus_methods))
def _parse(self, entries, validate):
for keyword, values in entries.items():
value, _ = values[0]
line = "%s %s" % (keyword, value)
# all known header fields can only appear once except
if validate and len(values) > 1 and keyword in HEADER_FIELDS:
raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values)))
if keyword == 'network-status-version':
# "network-status-version" version
if ' ' in value:
version, flavor = value.split(' ', 1)
else:
version, flavor = value, None
if not version.isdigit():
if not validate: continue
raise ValueError("Network status document has a non-numeric version: %s" % line)
self.version = int(version)
self.version_flavor = flavor
self.is_microdescriptor = flavor == 'microdesc'
if validate and self.version != 3:
raise ValueError("Expected a version 3 network status document, got version '%s' instead" % self.version)
elif keyword == 'vote-status':
# "vote-status" type
#
# The consensus-method and consensus-methods fields are optional since
# they weren't included in version 1. Setting a default now that we
# know if we're a vote or not.
if value == 'consensus':
self.is_consensus, self.is_vote = True, False
self.consensus_method = 1
elif value == 'vote':
self.is_consensus, self.is_vote = False, True
self.consensus_methods = [1]
elif validate:
raise ValueError("A network status document's vote-status line can only be 'consensus' or 'vote', got '%s' instead" % value)
elif keyword == 'consensus-methods':
# "consensus-methods" IntegerList
consensus_methods = []
for entry in value.split(" "):
if entry.isdigit():
consensus_methods.append(int(entry))
elif validate:
raise ValueError("A network status document's consensus-methods must be a list of integer values, but was '%s'" % value)
self.consensus_methods = consensus_methods
if validate and not (1 in self.consensus_methods):
raise ValueError("Network status votes must include consensus-method version 1")
elif keyword == 'consensus-method':
# "consensus-method" Integer
if value.isdigit():
self.consensus_method = int(value)
elif validate:
raise ValueError("A network status document's consensus-method must be an integer, but was '%s'" % value)
elif keyword in ('published', 'valid-after', 'fresh-until', 'valid-until'):
try:
date_value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
if keyword == 'published':
self.published = date_value
elif keyword == 'valid-after':
self.valid_after = date_value
elif keyword == 'fresh-until':
self.fresh_until = date_value
elif keyword == 'valid-until':
self.valid_until = date_value
except ValueError:
if validate:
raise ValueError("Network status document's '%s' time wasn't parseable: %s" % (keyword, value))
elif keyword == "voting-delay":
# "voting-delay" VoteSeconds DistSeconds
value_comp = value.split(' ')
if len(value_comp) == 2 and value_comp[0].isdigit() and value_comp[1].isdigit():
self.vote_delay = int(value_comp[0])
self.dist_delay = int(value_comp[1])
elif validate:
raise ValueError("A network status document's 'voting-delay' line must be a pair of integer values, but was '%s'" % value)
elif keyword in ("client-versions", "server-versions"):
for entry in value.split(","):
try:
version_value = stem.version.Version(entry)
if keyword == 'client-versions':
self.client_versions.append(version_value)
elif keyword == 'server-versions':
self.server_versions.append(version_value)
except ValueError:
if validate:
raise ValueError("Network status document's '%s' line had '%s', which isn't a parseable tor version: %s" % (keyword, entry, line))
elif keyword == "known-flags":
# "known-flags" FlagList
# simply fetches the entries, excluding empty strings
self.known_flags = [entry for entry in value.split(" ") if entry]
elif keyword == "params":
# "params" [Parameters]
# Parameter ::= Keyword '=' Int32
# Int32 ::= A decimal integer between -2147483648 and 2147483647.
# Parameters ::= Parameter | Parameters SP Parameter
# should only appear in consensus-method 7 or later
if validate and not self.meets_consensus_method(7):
raise ValueError("A network status document's 'params' line should only appear in consensus-method 7 or later")
# skip if this is a blank line
if value == "": continue
self.params.update(_parse_int_mappings(keyword, value, validate))
if validate:
self._check_params_constraints()
else:
self._unrecognized_lines.append(line)
def _check_params_constraints(self):
"""
Checks that the params we know about are within their documented ranges.
"""
for key, value in self.params.items():
# all parameters are constrained to int32 range
minimum, maximum = -2147483648, 2147483647
if key == "circwindow":
minimum, maximum = 100, 1000
elif key == "CircuitPriorityHalflifeMsec":
minimum = -1
elif key in ("perconnbwrate", "perconnbwburst"):
minimum = 1
elif key == "refuseunknownexits":
minimum, maximum = 0, 1
elif key == "bwweightscale":
minimum = 1
elif key == "cbtdisabled":
minimum, maximum = 0, 1
elif key == "cbtnummodes":
minimum, maximum = 1, 20
elif key == "cbtrecentcount":
minimum, maximum = 3, 1000
elif key == "cbtmaxtimeouts":
minimum, maximum = 3, 10000
elif key == "cbtmincircs":
minimum, maximum = 1, 10000
elif key == "cbtquantile":
minimum, maximum = 10, 99
elif key == "cbtclosequantile":
minimum, maximum = self.params.get("cbtquantile", minimum), 99
elif key == "cbttestfreq":
minimum = 1
elif key == "cbtmintimeout":
minimum = 500
elif key == "cbtinitialtimeout":
minimum = self.params.get("cbtmintimeout", minimum)
if value < minimum or value > maximum:
raise ValueError("'%s' value on the params line must be in the range of %i - %i, was %i" % (key, minimum, maximum, value))
class _DocumentFooter(object):
def __init__(self, document_file, validate, header):
self.signatures = []
self.bandwidth_weights = {}
self._unrecognized_lines = []
content = document_file.read()
if validate and content and not header.meets_consensus_method(9):
raise ValueError("Network status document's footer should only appear in consensus-method 9 or later")
elif not content and not header.meets_consensus_method(9):
return # footer is optional and there's nothing to parse
entries = stem.descriptor._get_descriptor_components(content, validate)
self._parse(entries, validate, header)
if validate:
_check_for_missing_and_disallowed_fields(header, entries, FOOTER_STATUS_DOCUMENT_FIELDS)
_check_for_misordered_fields(entries, FOOTER_FIELDS)
def _parse(self, entries, validate, header):
for keyword, values in entries.items():
value, block_contents = values[0]
line = "%s %s" % (keyword, value)
# all known footer fields can only appear once except...
# * 'directory-signature' in a consensus
if validate and len(values) > 1 and keyword in FOOTER_FIELDS:
if not (keyword == 'directory-signature' and header.is_consensus):
raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values)))
if keyword == "directory-footer":
# nothing to parse, simply checking that we don't have a value
if validate and value:
raise ValueError("A network status document's 'directory-footer' line shouldn't have any content, got '%s'" % line)
elif keyword == "bandwidth-weights":
self.bandwidth_weights = _parse_int_mappings(keyword, value, validate)
if validate:
weight_keys = tuple(sorted(self.bandwidth_weights.keys()))
if weight_keys != BANDWIDTH_WEIGHT_ENTRIES:
expected_label = ', '.join(BANDWIDTH_WEIGHT_ENTRIES)
actual_label = ', '.join(weight_keys)
raise ValueError("A network status document's 'bandwidth-weights' entries should be '%s', got '%s'" % (expected_label, actual_label))
elif keyword == "directory-signature":
for sig_value, block_contents in values:
if not header.is_microdescriptor:
expected_spaces = 1
format_label = 'directory-signature FINGERPRINT KEY_DIGEST'
else:
expected_spaces = 2
format_label = 'directory-signature METHOD FINGERPRINT KEY_DIGEST'
if sig_value.count(" ") != expected_spaces or not block_contents:
if not validate: continue
raise ValueError("Authority signatures in a network status document are expected to be of the form '%s\\nSIGNATURE', got:\n%s\n%s" % (format_label, sig_value, block_contents))
if not header.is_microdescriptor:
method = None
fingerprint, key_digest = sig_value.split(" ", 1)
else:
method, fingerprint, key_digest = sig_value.split(" ", 2)
self.signatures.append(DocumentSignature(method, fingerprint, key_digest, block_contents, validate))
def _check_for_missing_and_disallowed_fields(header, entries, fields):
"""
Checks that we have mandatory fields for our type, and that we don't have
any fields exclusive to the other (ie, no vote-only fields appear in a
consensus or vice versa).
:param _DocumentHeader header: document header
:param dict entries: ordered keyword/value mappings of the header or footer
:param list fields: expected field attributes (either
**HEADER_STATUS_DOCUMENT_FIELDS** or **FOOTER_STATUS_DOCUMENT_FIELDS**)
:raises: **ValueError** if we're missing mandatory fields or have fields we shouldn't
"""
missing_fields, disallowed_fields = [], []
for field, in_votes, in_consensus, mandatory in fields:
if mandatory and ((header.is_consensus and in_consensus) or (header.is_vote and in_votes)):
# mandatory field, check that we have it
if not field in entries.keys():
missing_fields.append(field)
elif (header.is_consensus and not in_consensus) or (header.is_vote and not in_votes):
# field we shouldn't have, check that we don't
if field in entries.keys():
disallowed_fields.append(field)
if missing_fields:
raise ValueError("Network status document is missing mandatory field: %s" % ', '.join(missing_fields))
if disallowed_fields:
raise ValueError("Network status document has fields that shouldn't appear in this document type or version: %s" % ', '.join(disallowed_fields))
def _check_for_misordered_fields(entries, expected):
"""
To be valid a network status document's fiends need to appear in a specific
order. Checks that known fields appear in that order (unrecognized fields
are ignored).
:param dict entries: ordered keyword/value mappings of the header or footer
:param list expected: ordered list of expected fields (either
**HEADER_FIELDS** or **FOOTER_FIELDS**)
:raises: **ValueError** if entries aren't properly ordered
"""
# Earlier validation has ensured that our fields either belong to our
# document type or are unknown. Remove the unknown fields since they
# reflect a spec change and can appear anywhere in the document.
actual = filter(lambda field: field in expected, entries.keys())
# Narrow the expected to just what we have. If the lists then match then the
# order's valid.
expected = filter(lambda field: field in actual, expected)
if actual != expected:
actual_label = ', '.join(actual)
expected_label = ', '.join(expected)
raise ValueError("The fields in a section of the document are misordered. It should be '%s' but was '%s'" % (actual_label, expected_label))
def _parse_int_mappings(keyword, value, validate):
# Parse a series of 'key=value' entries, checking the following:
# - values are integers
# - keys are sorted in lexical order
results, seen_keys = {}, []
for entry in value.split(" "):
try:
if not '=' in entry:
raise ValueError("must only have 'key=value' entries")
entry_key, entry_value = entry.split("=", 1)
try:
# the int() function accepts things like '+123', but we don't want to
if entry_value.startswith('+'):
raise ValueError()
entry_value = int(entry_value)
except ValueError:
raise ValueError("'%s' is a non-numeric value" % entry_value)
if validate:
# parameters should be in ascending order by their key
for prior_key in seen_keys:
if prior_key > entry_key:
raise ValueError("parameters must be sorted by their key")
results[entry_key] = entry_value
seen_keys.append(entry_key)
except ValueError, exc:
if not validate: continue
raise ValueError("Unable to parse network status document's '%s' line (%s): %s'" % (keyword, exc, value))
return results
class DirectoryAuthority(stem.descriptor.Descriptor):
"""
Directory authority information obtained from a v3 network status document.
:var str nickname: **\*** authority's nickname
:var str fingerprint: **\*** authority's fingerprint
:var str hostname: **\*** hostname of the authority
:var str address: **\*** authority's IP address
:var int dir_port: **\*** authority's DirPort
:var int or_port: **\*** authority's ORPort
:var str contact: **\*** contact information
**Consensus Attributes:**
:var str vote_digest: **\*** digest of the authority that contributed to the consensus
**Vote Attributes:**
:var str legacy_dir_key: fingerprint of and obsolete identity key
:var stem.descriptor.networkstatus.KeyCertificate key_certificate: **\***
authority's key certificate
**\*** mandatory attribute
"""
def __init__(self, raw_content, validate = True, is_vote = False):
"""
Parse a directory authority entry in a v3 network status document.
:param str raw_content: raw directory authority entry information
:param bool validate: checks the validity of the content if True, skips
these checks otherwise
:param bool is_vote: True if this is for a vote, False if it's for a consensus
:raises: ValueError if the descriptor data is invalid
"""
super(DirectoryAuthority, self).__init__(raw_content)
self.nickname = None
self.fingerprint = None
self.hostname = None
self.address = None
self.dir_port = None
self.or_port = None
self.contact = None
self.vote_digest = None
self.legacy_dir_key = None
self.key_certificate = None
self._unrecognized_lines = []
self._parse(raw_content, validate, is_vote)
def _parse(self, content, validate, is_vote):
"""
Parses the given content and applies the attributes.
:param str content: descriptor content
:param bool validate: checks validity if True
:param bool is_vote: **True** if this is for a vote, **False** if it's for
a consensus
:raises: **ValueError** if a validity check fails
"""
# separate the directory authority entry from its key certificate
key_div = content.find('\ndir-key-certificate-version')
if key_div != -1:
key_cert_content = content[key_div + 1:]
content = content[:key_div + 1]
else:
key_cert_content = None
entries = stem.descriptor._get_descriptor_components(content, validate)
if validate and 'dir-source' != entries.keys()[0]:
raise ValueError("Authority entries are expected to start with a 'dir-source' line:\n%s" % (content))
# check that we have mandatory fields
if validate:
required_fields, excluded_fields = ["dir-source", "contact"], []
if is_vote:
if not key_cert_content:
raise ValueError("Authority votes must have a key certificate:\n%s" % content)
excluded_fields += ["vote-digest"]
elif not is_vote:
if key_cert_content:
raise ValueError("Authority consensus entries shouldn't have a key certificate:\n%s" % content)
required_fields += ["vote-digest"]
excluded_fields += ["legacy-dir-key"]
for keyword in required_fields:
if not keyword in entries:
raise ValueError("Authority entries must have a '%s' line:\n%s" % (keyword, content))
for keyword in entries:
if keyword in excluded_fields:
type_label = "votes" if is_vote else "consensus entries"
raise ValueError("Authority %s shouldn't have a '%s' line:\n%s" % (type_label, keyword, content))
for keyword, values in entries.items():
value, _ = values[0]
line = "%s %s" % (keyword, value)
# all known attributes can only appear at most once
if validate and len(values) > 1 and keyword in ('dir-source', 'contact', 'legacy-dir-key', 'vote-digest'):
raise ValueError("Authority entries can only have a single '%s' line, got %i:\n%s" % (keyword, len(values), content))
if keyword == 'dir-source':
# "dir-source" nickname identity address IP dirport orport
dir_source_comp = value.split(" ")
if len(dir_source_comp) < 6:
if not validate: continue
raise ValueError("Authority entry's 'dir-source' line must have six values: %s" % line)
if validate:
if not stem.util.tor_tools.is_valid_nickname(dir_source_comp[0]):
raise ValueError("Authority's nickname is invalid: %s" % dir_source_comp[0])
elif not stem.util.tor_tools.is_valid_fingerprint(dir_source_comp[1]):
raise ValueError("Authority's fingerprint is invalid: %s" % dir_source_comp[1])
elif not dir_source_comp[2]:
# https://trac.torproject.org/7055
raise ValueError("Authority's hostname can't be blank: %s" % line)
elif not stem.util.connection.is_valid_ip_address(dir_source_comp[3]):
raise ValueError("Authority's address isn't a valid IPv4 address: %s" % dir_source_comp[3])
elif not stem.util.connection.is_valid_port(dir_source_comp[4], allow_zero = True):
raise ValueError("Authority's DirPort is invalid: %s" % dir_source_comp[4])
elif not stem.util.connection.is_valid_port(dir_source_comp[5]):
raise ValueError("Authority's ORPort is invalid: %s" % dir_source_comp[5])
elif not (dir_source_comp[4].isdigit() and dir_source_comp[5].isdigit()):
continue
self.nickname = dir_source_comp[0]
self.fingerprint = dir_source_comp[1]
self.hostname = dir_source_comp[2]
self.address = dir_source_comp[3]
self.dir_port = None if dir_source_comp[4] == '0' else int(dir_source_comp[4])
self.or_port = int(dir_source_comp[5])
elif keyword == 'contact':
# "contact" string
self.contact = value
elif keyword == 'legacy-dir-key':
# "legacy-dir-key" FINGERPRINT
if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
raise ValueError("Authority has a malformed legacy directory key: %s" % line)
self.legacy_dir_key = value
elif keyword == 'vote-digest':
# "vote-digest" digest
# technically not a fingerprint, but has the same characteristics
if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
raise ValueError("Authority has a malformed vote digest: %s" % line)
self.vote_digest = value
else:
self._unrecognized_lines.append(line)
if key_cert_content:
self.key_certificate = KeyCertificate(key_cert_content, validate)
def get_unrecognized_lines(self):
"""
Returns any unrecognized lines.
:returns: a list of unrecognized lines
"""
return self._unrecognized_lines
def __cmp__(self, other):
if not isinstance(other, DirectoryAuthority):
return 1
return str(self) > str(other)
class KeyCertificate(stem.descriptor.Descriptor):
"""
Directory key certificate for a v3 network status document.
:var int version: **\*** version of the key certificate
:var str address: authority's IP address
:var int dir_port: authority's DirPort
:var str fingerprint: **\*** authority's fingerprint
:var str identity_key: **\*** long term authority identity key
:var datetime published: **\*** time when this key was generated
:var datetime expires: **\*** time after which this key becomes invalid
:var str signing_key: **\*** directory server's public signing key
:var str crosscert: signature made using certificate's signing key
:var str certification: **\*** signature of this key certificate signed with
the identity key
**\*** mandatory attribute
"""
def __init__(self, raw_content, validate = True):
super(KeyCertificate, self).__init__(raw_content)
self.version = None
self.address = None
self.dir_port = None
self.fingerprint = None
self.identity_key = None
self.published = None
self.expires = None
self.signing_key = None
self.crosscert = None
self.certification = None
self._unrecognized_lines = []
self._parse(raw_content, validate)
def _parse(self, content, validate):
"""
Parses the given content and applies the attributes.
:param str content: descriptor content
:param bool validate: checks validity if **True**
:raises: **ValueError** if a validity check fails
"""
entries = stem.descriptor._get_descriptor_components(content, validate)
if validate:
if 'dir-key-certificate-version' != entries.keys()[0]:
raise ValueError("Key certificates must start with a 'dir-key-certificate-version' line:\n%s" % (content))
elif 'dir-key-certification' != entries.keys()[-1]:
raise ValueError("Key certificates must end with a 'dir-key-certification' line:\n%s" % (content))
# check that we have mandatory fields and that our known fields only
# appear once
for keyword, is_mandatory in KEY_CERTIFICATE_PARAMS:
if is_mandatory and not keyword in entries:
raise ValueError("Key certificates must have a '%s' line:\n%s" % (keyword, content))
entry_count = len(entries.get(keyword, []))
if entry_count > 1:
raise ValueError("Key certificates can only have a single '%s' line, got %i:\n%s" % (keyword, entry_count, content))
for keyword, values in entries.items():
value, block_contents = values[0]
line = "%s %s" % (keyword, value)
if keyword == 'dir-key-certificate-version':
# "dir-key-certificate-version" version
if not value.isdigit():
if not validate: continue
raise ValueError("Key certificate has a non-integer version: %s" % line)
self.version = int(value)
if validate and self.version != 3:
raise ValueError("Expected a version 3 key certificate, got version '%i' instead" % self.version)
elif keyword == 'dir-address':
# "dir-address" IPPort
if not ':' in value:
if not validate: continue
raise ValueError("Key certificate's 'dir-address' is expected to be of the form ADDRESS:PORT: %s" % line)
address, dirport = value.split(':', 1)
if validate:
if not stem.util.connection.is_valid_ip_address(address):
raise ValueError("Key certificate's address isn't a valid IPv4 address: %s" % line)
elif not stem.util.connection.is_valid_port(dirport):
raise ValueError("Key certificate's dirport is invalid: %s" % line)
elif not dirport.isdigit():
continue
self.address = address
self.dir_port = int(dirport)
elif keyword == 'fingerprint':
# "fingerprint" fingerprint
if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
raise ValueError("Key certificate's fingerprint is malformed: %s" % line)
self.fingerprint = value
elif keyword in ('dir-key-published', 'dir-key-expires'):
# "dir-key-published" YYYY-MM-DD HH:MM:SS
# "dir-key-expires" YYYY-MM-DD HH:MM:SS
try:
date_value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
if keyword == 'dir-key-published':
self.published = date_value
elif keyword == 'dir-key-expires':
self.expires = date_value
except ValueError:
if validate:
raise ValueError("Key certificate's '%s' time wasn't parseable: %s" % (keyword, value))
elif keyword in ('dir-identity-key', 'dir-signing-key', 'dir-key-crosscert', 'dir-key-certification'):
# "dir-identity-key" NL a public key in PEM format
# "dir-signing-key" NL a key in PEM format
# "dir-key-crosscert" NL CrossSignature
# "dir-key-certification" NL Signature
if validate and not block_contents:
raise ValueError("Key certificate's '%s' line must be followed by a key block: %s" % (keyword, line))
if keyword == 'dir-identity-key':
self.identity_key = block_contents
elif keyword == 'dir-signing-key':
self.signing_key = block_contents
elif keyword == 'dir-key-crosscert':
self.crosscert = block_contents
elif keyword == 'dir-key-certification':
self.certification = block_contents
else:
self._unrecognized_lines.append(line)
def get_unrecognized_lines(self):
"""
Returns any unrecognized lines.
:returns: **list** of unrecognized lines
"""
return self._unrecognized_lines
def __cmp__(self, other):
if not isinstance(other, KeyCertificate):
return 1
return str(self) > str(other)
class DocumentSignature(object):
"""
Directory signature of a v3 network status document.
:var str method: method used to make the signature, this only appears in
microdescriptor consensuses
:var str identity: fingerprint of the authority that made the signature
:var str key_digest: digest of the signing key
:var str signature: document signature
:param bool validate: checks validity if **True**
:raises: **ValueError** if a validity check fails
"""
def __init__(self, method, identity, key_digest, signature, validate = True):
# Checking that these attributes are valid. Technically the key
# digest isn't a fingerprint, but it has the same characteristics.
if validate:
if not stem.util.tor_tools.is_valid_fingerprint(identity):
raise ValueError("Malformed fingerprint (%s) in the document signature" % (identity))
if not stem.util.tor_tools.is_valid_fingerprint(key_digest):
raise ValueError("Malformed key digest (%s) in the document signature" % (key_digest))
# TODO: The method field is undocumented so I'm just guessing how we should
# handle it. Ticket for clarification...
# https://trac.torproject.org/7072
self.method = method
self.identity = identity
self.key_digest = key_digest
self.signature = signature
def __cmp__(self, other):
if not isinstance(other, DocumentSignature):
return 1
for attr in ("identity", "key_digest", "signature"):
if getattr(self, attr) > getattr(other, attr): return 1
elif getattr(self, attr) < getattr(other, attr): return -1
return 0
|
eoinof/stem
|
stem/descriptor/networkstatus.py
|
Python
|
lgpl-3.0
| 49,888
|
#!/usr/bin/env python3
import curses
from CursesMenu import CursesMenu
def foo():
print('baaaang!')
return 0
if __name__ == '__main__':
s = curses.initscr()
m = CursesMenu(s, 'menu')
m.add('one', foo)
m.add('two', foo)
m.add('three', foo)
try:
m.run()
except Exception:
curses.endwin()
raise
else:
curses.endwin()
|
reverendhomer/ANUS-Python-Menu
|
l.py
|
Python
|
unlicense
| 389
|
# Generated by Django 2.1.1 on 2019-01-01 19:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('led', '0002_auto_20181229_2037'),
]
operations = [
migrations.RemoveField(
model_name='currentledstate',
name='blue',
),
migrations.RemoveField(
model_name='currentledstate',
name='green',
),
migrations.RemoveField(
model_name='currentledstate',
name='red',
),
]
|
piotr-worotnicki/raspberry-pi-rgb-led-controller
|
led/migrations/0003_auto_20190101_2024.py
|
Python
|
unlicense
| 551
|
def testit(s):
key = 'word'
letter_i = 0
for c in s:
if c.lower() == key[letter_i % len(key)]:
letter_i += 1
return letter_i // len(key)
|
SelvorWhim/competitive
|
Codewars/ThinkingTestingHowManyWord.py
|
Python
|
unlicense
| 173
|
#!/usr/bin/env python
from os import path
from IPython.html.nbextensions import install_nbextension
from IPython.html.services.config import ConfigManager
install_nbextension(
path.join(path.dirname(path.abspath(__file__)), 'notebook_input_mode'), user=True, verbose=2)
cm = ConfigManager().update('notebook', {"load_extensions": {"notebook_input_mode/main": True}})
|
dvbuntu/notebook_input_mode
|
install.py
|
Python
|
unlicense
| 373
|
'''
You're given strings J representing the types of stones that are
jewels, and S representing the stones you have. Each character
in S is a type of stone you have. You want to know how many of
the stones you have are also jewels.
The letters in J are guaranteed distinct, and all characters in
J and S are letters. Letters are case sensitive, so "a" is
considered a different type of stone from "A".
Example 1:
Input: J = "aA", S = "aAAbbbb"
Output: 3
Example 2:
Input: J = "z", S = "ZZ"
Output: 0
Note:
S and J will consist of letters and have length at most 50.
The characters in J are distinct.
'''
class Solution(object):
def numJewelsInStones(self, J, S):
"""
:type J: str
:type S: str
:rtype: int
"""
return sum(s in J for s in S)
|
zzzzzzzlmy/MyLeetCode
|
771. Jewels and Stones.py
|
Python
|
unlicense
| 824
|
"""
coinfee.net example.
"""
import warnings
from uuid import uuid4 as random_uuid
import jinja2
import coinfee
# You should change this unless you want to make a donation.
ADDRESS = '16jCrzcXo2PxadrQiQwUgwrmEwDGQYBwZq'
# Price in Satoshis. 10,000 or more.
SATOSHIS = 10000
# See deprecation warnings in logs.
warnings.simplefilter('always')
def render(template, page={}):
template = jinja2.Environment(
loader=jinja2.FileSystemLoader('./')
).get_template(template)
return str(template.render(page=page))
def application(env, start_response):
"""
This is where uwsgi calls us.
"""
def reply(status, data, headers=[]):
"""
Need to use this as return reply().
"""
start_response(str(status), headers)
return data
path = env['REQUEST_URI']
if path == '/purchase':
# Generate a random ID for payment.
uuid = str(random_uuid())[:19]
url = '/purchase/{}'.format(uuid)
# Redirect to unique buy URL.
return reply(307, '', [('Location', url)])
if path.startswith('/purchase/'):
page = {}
page['unique'] = path[len('/purchase/'):]
coinfee_payment = coinfee.payment(ADDRESS,
SATOSHIS,
page['unique'])
page['paid'] = coinfee_payment.status
page['address'] = coinfee_payment.address
page['satoshis'] = coinfee_payment.satoshis
page['bitcoins'] = "{0:.8f}".format(page['satoshis'] *
0.00000001)
return reply(200, render('purchase.html', page))
if path == '/':
return reply(200, render('index.html'))
return reply(404, 'Not found.')
|
coinfee/coinfee-python
|
example/wsgi.py
|
Python
|
unlicense
| 1,771
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from contractor.openstack.common import log as logging
from contractor import runner
from oslo.config import cfg
import sys
CONF = cfg.CONF
CONF.set_default('debug', True)
CONF.set_default('verbose', True)
def main():
environment = str(sys.argv[1])
config = str(sys.argv[2]) if len(sys.argv) > 2 else 'contractor.json'
logging.setup('contractor')
r = runner.Runner(config=config, environment=environment)
r.execute()
|
moniker-dns/contractor
|
contractor/cmd/contractor.py
|
Python
|
apache-2.0
| 1,127
|
# coding: utf-8
from __future__ import unicode_literals
from genty import genty_repeat
from test.test_case_base import TestCase
class GentyRepeatTest(TestCase):
"""Tests for :mod:`box.test.genty.genty_repeat`."""
def test_repeat_decorator_decorates_function_with_appropriate_repeat_count(self):
@genty_repeat(15)
def some_func():
pass
self.assertEqual(15, some_func.genty_repeat_count) # pylint:disable=no-member
def test_repeat_decorator_decorates_method_with_appropriate_repeat_count(self):
class SomeClass(object):
@genty_repeat(13)
def some_func(self):
pass
some_instance = SomeClass()
self.assertEqual(13, some_instance.some_func.genty_repeat_count) # pylint:disable=no-member
def test_repeat_rejects_negative_counts(self):
with self.assertRaises(ValueError) as context:
@genty_repeat(-1)
def _():
pass
self.assertIn('Please pick a value >= 0', str(context.exception))
def test_repeat_allows_zero_iterations(self):
@genty_repeat(0)
def some_func():
pass
self.assertEqual(0, some_func.genty_repeat_count) # pylint:disable=no-member
|
box/genty
|
test/test_genty_repeat.py
|
Python
|
apache-2.0
| 1,264
|
#Example brute-force solution in python
l=[]
m=0;
for i in range(9999,999,-1):
for j in range(9999,999,-1):
if i*j>m and str(i*j)==str(i*j)[::-1] :
m=i*j
l=[i,j]
print m, l
|
namnatulco/project-euler-cpp
|
004/solution.py
|
Python
|
apache-2.0
| 192
|
"""Auto-generated file, do not edit by hand. UZ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_UZ = PhoneMetadata(id='UZ', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[04]\\d(?:\\d(?:\\d{2})?)?', possible_length=(2, 3, 5)),
toll_free=PhoneNumberDesc(national_number_pattern='0(?:0[1-3]|[1-3]|50)', example_number='01', possible_length=(2, 3)),
emergency=PhoneNumberDesc(national_number_pattern='0(?:0[1-3]|[1-3]|50)', example_number='01', possible_length=(2, 3)),
short_code=PhoneNumberDesc(national_number_pattern='0(?:0[1-3]|[1-3]|50)|45400', example_number='01', possible_length=(2, 3, 5)),
carrier_specific=PhoneNumberDesc(national_number_pattern='454\\d\\d', example_number='45400', possible_length=(5,)),
sms_services=PhoneNumberDesc(national_number_pattern='454\\d\\d', example_number='45400', possible_length=(5,)),
short_data=True)
|
daviddrysdale/python-phonenumbers
|
python/phonenumbers/shortdata/region_UZ.py
|
Python
|
apache-2.0
| 979
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.toontowngui.FrameColorPicker
from direct.gui.DirectGui import *
from otp.otpgui.ColorPicker import ColorPicker
from toontown.toonbase import TTLocalizer, ToontownGlobals
class FrameColorPicker(ColorPicker):
def __init__(self, minSat, maxSat, minVal, maxVal, frameCallback, text = TTLocalizer.ChooseAColor):
self.frameCallback = frameCallback
self.pickedColor = None
gui = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
okImage = [ gui.find('**/ChtBx_OKBtn_' + name) for name in ('UP', 'DN', 'Rllvr') ]
self.frame = DirectFrame(relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=1.075, text=text, text_scale=0.09, text_pos=(0, 0.4))
self.okButton = DirectButton(self.frame, relief=None, image=okImage, pos=(0, 0, -0.375), text=TTLocalizer.lOK, text_scale=0.06, text_pos=(0, -0.1), command=self.__colorChosen)
ColorPicker.__init__(self, self.frame, minSat, maxSat, minVal, maxVal, self.__changeColor, (0.15, 0, 0.035))
gui.removeNode()
return
def destroy(self):
ColorPicker.destroy(self)
self.frame.destroy()
self.okButton.destroy()
del self.frame
del self.okButton
def __changeColor(self, color):
self.frame['geom_color'] = color
self.pickedColor = color
def __colorChosen(self):
self.frameCallback(self.pickedColor)
self.destroy()
|
DedMemez/ODS-August-2017
|
toontowngui/FrameColorPicker.py
|
Python
|
apache-2.0
| 1,561
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.data service ops where servers are started late or preempted."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests import data_service_test_base
from tensorflow.python.data.experimental.ops import data_service_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
TMP_WORK_DIR = data_service_test_base.TMP_WORK_DIR
NO_WORK_DIR = data_service_test_base.NO_WORK_DIR
class DataServiceOpsTest(data_service_test_base.TestBase,
parameterized.TestCase):
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherStop(self):
cluster = self.create_cluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
results = []
results.append(next(iterator).numpy())
cluster.stop_dispatcher()
# After the dispatcher dies, the worker should continue providing the rest
# of the dataset's elements.
for _ in range(num_elements - 1):
results.append(next(iterator).numpy())
self.assertEqual(results, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartBeforeReading(self):
cluster = self.create_cluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
cluster.restart_dispatcher()
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartDuringReading(self):
cluster = self.create_cluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
results = []
for _ in range(num_elements // 2):
results.append(next(iterator).numpy())
cluster.restart_dispatcher()
for elem in iterator:
results.append(elem.numpy())
self.assertEqual(list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartBetweenIterations(self):
cluster = self.create_cluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(100, cluster)
self.assertDatasetProduces(ds, list(range(num_elements)))
cluster.restart_dispatcher()
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherManyRestarts(self):
cluster = self.create_cluster(num_workers=1)
num_elements_start = 10
num_elements_end = 15
datasets = []
for num_elements in range(num_elements_start, num_elements_end):
datasets.append(
self.make_distributed_range_dataset(num_elements, cluster))
cluster.restart_dispatcher()
for ds, num_elements in zip(datasets,
range(num_elements_start, num_elements_end)):
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherAndWorkerRestart(self):
cluster = self.create_cluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
cluster.restart_dispatcher()
cluster.restart_worker()
self.assertDatasetProduces(ds, list(range(num_elements)))
cluster.restart_dispatcher()
cluster.restart_worker()
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherAndMultiWorkerRestart(self):
num_workers = 2
cluster = self.create_cluster(num_workers=num_workers)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
results = []
cluster.restart_dispatcher()
for worker_index in range(num_workers):
cluster.restart_worker(worker_index=worker_index)
for elem in iterator:
results.append(elem.numpy())
self.assertCountEqual(num_workers * list(range(num_elements)), results)
cluster.restart_dispatcher()
for worker_index in range(num_workers):
cluster.restart_worker(worker_index=worker_index)
for elem in iterator:
results.append(elem.numpy())
self.assertCountEqual(num_workers * list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testStartServersLate(self):
# Test that the data service client performs retries instead of failing when
# the dataset is created before the master and worker are started.
try:
import portpicker # pylint: disable=g-import-not-at-top
dispatcher_port = portpicker.pick_unused_port()
except:
raise self.skipTest("Flakes in portpicker library do not represent "
"TensorFlow errors.")
cluster = self.create_cluster(
num_workers=1, dispatcher_port=dispatcher_port, start=False)
def start_servers():
time.sleep(0.5)
cluster.start_dispatcher()
cluster.start_workers()
start_servers_thread = threading.Thread(target=start_servers, daemon=True)
start_servers_thread.start()
num_elements = 10
ds = self.make_distributed_range_dataset(num_elements, cluster)
results = [elem.numpy() for elem in ds]
self.assertEqual(list(range(num_elements)), results)
start_servers_thread.join()
@combinations.generate(test_base.eager_only_combinations())
def testAddWorkerMidJob(self):
cluster = self.create_cluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
results = []
# Read halfway through the dataset.
for _ in range(num_elements // 2):
results.append(next(iterator).numpy())
cluster.add_worker()
# Wait for the new worker to register with the dispatcher.
while cluster.num_registered_workers() < 2:
time.sleep(10 / 1000) # 10ms
for elem in iterator:
results.append(elem.numpy())
self.assertCountEqual(2 * list(range(num_elements)), results)
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
combinations.combine(use_same_port=[True, False]),
data_service_test_base.all_cluster_configurations()))
def testRestartWorker(self, use_same_port, work_dir, fault_tolerant_mode):
cluster = self.create_cluster(
num_workers=1,
work_dir=work_dir,
fault_tolerant_mode=fault_tolerant_mode)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
# Read halfway through the dataset.
midpoint = num_elements // 2
for i in range(midpoint):
self.assertEqual(i, next(iterator).numpy())
# Stop the original worker and start a new one.
cluster.restart_worker(use_same_port=use_same_port)
# There may have been some elements prefetched from the first worker
# before it was stopped.
while True:
val = next(iterator).numpy()
if val == 0:
break
# The dataset starts over now that we read from the new worker.
# TODO(b/157086991): Iterate until end of sequence when we support
# detecting lost workers.
for i in range(1, num_elements // 2):
val = next(iterator).numpy()
self.assertEqual(i, val)
@combinations.generate(test_base.eager_only_combinations())
def testChangeProcessingModeAfterRestart(self):
cluster = self.create_cluster(num_workers=1)
num_elements = 100
range_dataset = dataset_ops.Dataset.range(num_elements)
ds = range_dataset.apply(
data_service_ops.distribute(
processing_mode="parallel_epochs",
service=cluster.target,
job_name="test"))
iterator = iter(ds)
for i in range(num_elements // 2):
self.assertEqual(i, next(iterator).numpy())
cluster.restart_dispatcher()
ds = range_dataset.apply(
data_service_ops.distribute(
processing_mode="distributed_epoch",
service=cluster.target,
job_name="test"))
with self.assertRaisesOpError("already an existing job with that name "
"using processing mode <parallel_epochs>"):
next(iter(ds)).numpy()
@combinations.generate(
combinations.times(
test_base.eager_only_combinations(),
combinations.combine(work_dir=[TMP_WORK_DIR, NO_WORK_DIR])))
def testDistributeLargeGraphThenRegisterWorker(self, work_dir):
cluster = self.create_cluster(
num_workers=0, work_dir=work_dir, fault_tolerant_mode=False)
# Larger than default OSS grpc message size limit of 4MB.
tensor = array_ops.ones((2, 1000, 1000), dtype=dtypes.float32)
ds = dataset_ops.Dataset.from_tensors(tensor)
ds = self.make_distributed_dataset(ds, cluster)
it = iter(ds)
cluster.add_worker()
self.assertAllEqual(next(it), tensor)
if __name__ == "__main__":
test.main()
|
karllessard/tensorflow
|
tensorflow/python/data/experimental/kernel_tests/data_service_ops_ft_test.py
|
Python
|
apache-2.0
| 10,153
|
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Clean up resources from gcp projects. """
import argparse
import collections
import datetime
import json
import os
import subprocess
import sys
# A resource that need to be cleared.
Resource = collections.namedtuple('Resource', 'group name subgroup condition managed tolerate')
DEMOLISH_ORDER = [
# [WARNING FROM KRZYZACY] : TOUCH THIS WITH CARE!
# ORDER REALLY MATTERS HERE!
# compute resources
Resource('compute', 'instances', None, 'zone', None, False),
Resource('compute', 'addresses', None, 'region', None, False),
Resource('compute', 'disks', None, 'zone', None, False),
Resource('compute', 'firewall-rules', None, None, None, False),
Resource('compute', 'routes', None, None, None, False),
Resource('compute', 'forwarding-rules', None, 'region', None, False),
Resource('compute', 'target-http-proxies', None, None, None, False),
Resource('compute', 'target-https-proxies', None, None, None, False),
Resource('compute', 'url-maps', None, None, None, False),
Resource('compute', 'backend-services', None, 'region', None, False),
Resource('compute', 'target-pools', None, 'region', None, False),
Resource('compute', 'health-checks', None, None, None, False),
Resource('compute', 'http-health-checks', None, None, None, False),
Resource('compute', 'instance-groups', None, 'zone', 'Yes', False),
Resource('compute', 'instance-groups', None, 'zone', 'No', False),
Resource('compute', 'instance-templates', None, None, None, False),
Resource('compute', 'networks', 'subnets', 'region', None, True),
Resource('compute', 'networks', None, '', None, False),
Resource('compute', 'routes', None, None, None, False),
# logging resources
# sinks does not have creationTimestamp yet
#Resource('logging', 'sinks', None, None, None, False),
]
def collect(project, age, resource, filt):
""" Collect a list of resources for each condition (zone or region).
Args:
project: The name of a gcp project.
age: Time cutoff from the creation of a resource.
resource: Definition of a type of gcloud resource.
filt: Filter clause for gcloud list command.
Returns:
A dict of condition : list of gcloud resource object.
Raises:
ValueError if json result from gcloud is invalid.
"""
col = collections.defaultdict(list)
cmd = ['gcloud', resource.group, '-q', resource.name]
if resource.subgroup:
cmd.append(resource.subgroup)
cmd.extend([
'list',
'--format=json(name,creationTimestamp.date(tz=UTC),zone,region,isManaged)',
'--filter=%s' % filt,
'--project=%s' % project])
print '%r' % cmd
for item in json.loads(subprocess.check_output(cmd)):
print '%r' % item
if 'name' not in item or 'creationTimestamp' not in item:
raise ValueError('%r' % item)
if resource.condition and resource.condition in item:
colname = item[resource.condition]
else:
colname = ''
if resource.managed:
if 'isManaged' not in item:
raise ValueError(resource.name, resource.managed)
else:
if resource.managed != item['isManaged']:
continue
# Unify datetime to use utc timezone.
created = datetime.datetime.strptime(item['creationTimestamp'], '%Y-%m-%dT%H:%M:%S')
print ('Found %r(%r), %r in %r, created time = %r' %
(resource.name, resource.subgroup, item['name'], colname, item['creationTimestamp']))
if created < age:
print ('Added to janitor list: %r(%r), %r' %
(resource.name, resource.subgroup, item['name']))
col[colname].append(item['name'])
return col
def clear_resources(project, cols, resource, rate_limit):
"""Clear a collection of resource, from collect func above.
Args:
project: The name of a gcp project.
cols: A dict of collection of resource.
resource: Definition of a type of gcloud resource.
rate_limit: how many resources to delete per gcloud delete call
Returns:
0 if no error
1 if deletion command fails
"""
err = 0
for col, items in cols.items():
if ARGS.dryrun:
print ('Resource type %r(%r) to be deleted: %r' %
(resource.name, resource.subgroup, list(items)))
continue
manage_key = {'Yes':'managed', 'No':'unmanaged'}
# construct the customized gcloud command
base = ['gcloud', resource.group, '-q', resource.name]
if resource.subgroup:
base.append(resource.subgroup)
if resource.managed:
base.append(manage_key[resource.managed])
base.append('delete')
base.append('--project=%s' % project)
if resource.condition:
if col:
base.append('--%s=%s' % (resource.condition, col))
else:
base.append('--global')
print 'going to delete %d %s' % (len(items), resource.name)
# try to delete at most $rate_limit items at a time
for idx in xrange(0, len(items), rate_limit):
clean = items[idx:idx+rate_limit]
print 'Call %r' % (base + list(clean))
try:
subprocess.check_call(base + list(clean))
except subprocess.CalledProcessError as exc:
if not resource.tolerate:
err = 1
print >>sys.stderr, 'Error try to delete resources: %r' % exc
return err
def clean_gke_cluster(project, age, filt):
"""Clean up potential leaking gke cluster"""
# a cluster can be created in one of those three endpoints
endpoints = [
'https://test-container.sandbox.googleapis.com/', # test
'https://staging-container.sandbox.googleapis.com/', # staging
'https://container.googleapis.com/', # prod
]
err = 0
for endpoint in endpoints:
os.environ['CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER'] = endpoint
print "checking endpoint %s" % endpoint
cmd = [
'gcloud', 'container', '-q', 'clusters', 'list',
'--project=%s' % project,
'--filter=%s' % filt,
'--format=json(name,createTime,zone)'
]
print 'running %s' % cmd
output = ''
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as exc:
print >>sys.stderr, 'Cannot reach endpoint %s with %r, continue' % (endpoint, exc)
continue
for item in json.loads(output):
print 'cluster info: %r' % item
if 'name' not in item or 'createTime' not in item or 'zone' not in item:
print >>sys.stderr, 'name, createTime and zone must present'
raise ValueError('%r' % item)
# The raw createTime string looks like 2017-08-30T18:33:14+00:00
# Which python 2.7 does not support timezones.
# Since age is already in UTC time we'll just strip the timezone part
item['createTime'] = item['createTime'].split('+')[0]
created = datetime.datetime.strptime(
item['createTime'], '%Y-%m-%dT%H:%M:%S')
if created < age:
print ('Found stale gke cluster %r in %r, created time = %r' %
(item['name'], endpoint, item['createTime']))
delete = [
'gcloud', 'container', '-q', 'clusters', 'delete',
item['name'],
'--project=%s' % project,
'--zone=%s' % item['zone'],
]
try:
print 'running %s' % delete
subprocess.check_call(delete)
except subprocess.CalledProcessError as exc:
err = 1
print >>sys.stderr, 'Error try to delete cluster %s: %r' % (item['name'], exc)
return err
def main(project, days, hours, filt, rate_limit):
""" Clean up resources from a gcp project based on it's creation time
Args:
project: The name of a gcp project.
days/hours: days/hours of maximum lifetime of a gcp resource.
filt: Resource instance filters when query.
Returns:
0 if no error
1 if list or delete command fails
"""
print '[=== Start Janitor on project %r ===]' % project
err = 0
age = datetime.datetime.utcnow() - datetime.timedelta(days=days, hours=hours)
for res in DEMOLISH_ORDER:
print 'Try to search for %r with condition %r' % (res.name, res.condition)
try:
col = collect(project, age, res, filt)
if col:
err |= clear_resources(project, col, res, rate_limit)
except (subprocess.CalledProcessError, ValueError):
err |= 1 # keep clean the other resource
print >>sys.stderr, 'Fail to list resource %r from project %r' % (res.name, project)
# try to clean leaking gke cluster
try:
err |= clean_gke_cluster(project, age, filt)
except ValueError:
err |= 1 # keep clean the other resource
print >>sys.stderr, 'Fail to clean up cluster from project %r' % project
print '[=== Finish Janitor on project %r with status %r ===]' % (project, err)
sys.exit(err)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description='Clean up resources from an expired project')
PARSER.add_argument('--project', help='Project to clean', required=True)
PARSER.add_argument(
'--days', type=int,
help='Clean items more than --days old (added to --hours)')
PARSER.add_argument(
'--hours', type=float,
help='Clean items more than --hours old (added to --days)')
PARSER.add_argument(
'--filter',
default='name !~ ^default',
help='Filter down to these instances')
PARSER.add_argument(
'--dryrun',
default=False,
action='store_true',
help='list but not delete resources')
PARSER.add_argument(
'--ratelimit', type=int, default=50,
help='Max number of resources to bulk clear in one gcloud delete call')
ARGS = PARSER.parse_args()
# We want to allow --days=0 and --hours=0, so check against None instead.
if ARGS.days is None and ARGS.hours is None:
print >>sys.stderr, 'must specify --days and/or --hours'
sys.exit(1)
main(ARGS.project, ARGS.days or 0, ARGS.hours or 0, ARGS.filter, ARGS.ratelimit)
|
foxish/test-infra
|
boskos/janitor/janitor.py
|
Python
|
apache-2.0
| 11,282
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@time: 2017/2/20 9:18
@author: yl
"""
import re
# 处理HanLP的分词结果:① 过滤1个字的词语;② 选择词性为’ns’,’nsf’,’nz’的词语。
class SegPro(object):
def __init__(self):
pass
def process(self, sourcefile, resultfile, tag, filterlength=1):
'''
:param sourcefile: HanLP分词结果文件
:param resultfile: 对HanLP分词结果进行处理,输出txt文件
:param tag: 过滤词性不为tag的词语,tag为列表
:param filterlength:过滤长度为filterlength的词语
:return:
'''
f = open(sourcefile,'rb')
wr = open(resultfile, 'a+', encoding='utf8')
t = tag
for lineno, line in enumerate(f, 1):
line = line.strip().decode('utf-8')
s = line.split(' ')[0]
r = re.sub("([^\u4E00-\u9FD5])", '', s)
if len(r) == filterlength:
continue
# if ('/nt' in s or '/nz' in s or '/ns' in s or '/nsf' in s):
if self.tagging_filter(s, t):
wr.write(s + '\n')
print('Processing line: ', lineno)
f.close()
wr.close()
print('Done!')
def tagging_filter(self, s, tag):
for x in tag:
if x in s:
return 1
return 0
if __name__ == '__main__':
tag = ['/nt', '/nz', '/ns', '/nsf']
segpro = SegPro()
segpro.process('./result/BJplacePro.txt', './BJ1.txt', tag, filterlength=1)
|
yuanlisky/work
|
suffixDict/useHanLP_segPro.py
|
Python
|
apache-2.0
| 1,558
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_persistent_volume_claim import V1PersistentVolumeClaim
class TestV1PersistentVolumeClaim(unittest.TestCase):
""" V1PersistentVolumeClaim unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1PersistentVolumeClaim(self):
"""
Test V1PersistentVolumeClaim
"""
model = kubernetes.client.models.v1_persistent_volume_claim.V1PersistentVolumeClaim()
if __name__ == '__main__':
unittest.main()
|
skuda/client-python
|
kubernetes/test/test_v1_persistent_volume_claim.py
|
Python
|
apache-2.0
| 925
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mctaco dataset module."""
from tensorflow_datasets import testing
from tensorflow_datasets.question_answering import mctaco
class MctacoTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = mctaco.Mctaco
DL_EXTRACT_RESULT = {
"validation": "dev_3783.tsv",
"test": "test_9942.tsv",
}
SPLITS = {
"validation": 5,
"test": 3,
}
if __name__ == "__main__":
testing.test_main()
|
tensorflow/datasets
|
tensorflow_datasets/question_answering/mctaco_test.py
|
Python
|
apache-2.0
| 1,038
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Python datastore class User to be used as a datastore data type.
Classes defined here:
User: object representing a user.
Error: base exception type
UserNotFoundError: UserService exception
RedirectTooLongError: UserService exception
NotAllowedError: UserService exception
"""
import os
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import user_service_pb
from google.appengine.runtime import apiproxy_errors
class Error(Exception):
"""Base User error type."""
class UserNotFoundError(Error):
"""Raised by User.__init__() when there's no email argument and no user is
logged in."""
class RedirectTooLongError(Error):
"""Raised by UserService calls if the generated redirect URL was too long.
"""
class NotAllowedError(Error):
"""Raised by UserService calls if the requested redirect URL is not allowed.
"""
class User(object):
"""A user.
We provide the email address, nickname, auth domain, and id for a user.
A nickname is a human-readable string which uniquely identifies a Google
user, akin to a username. It will be an email address for some users, but
not all.
"""
__user_id = None
def __init__(self, email=None, _auth_domain=None, _user_id=None):
"""Constructor.
Args:
email: An optional string of the user's email address. It defaults to
the current user's email address.
Raises:
UserNotFoundError: Raised if the user is not logged in and the email
argument is empty.
"""
if _auth_domain is None:
_auth_domain = os.environ.get('AUTH_DOMAIN')
else:
assert email is not None
assert _auth_domain
if email is None:
assert 'USER_EMAIL' in os.environ
email = os.environ['USER_EMAIL']
if _user_id is None and 'USER_ID' in os.environ:
_user_id = os.environ['USER_ID']
if not email:
raise UserNotFoundError
self.__email = email
self.__auth_domain = _auth_domain
self.__user_id = _user_id or None
def nickname(self):
"""Return this user's nickname.
The nickname will be a unique, human readable identifier for this user
with respect to this application. It will be an email address for some
users, but not all.
"""
if (self.__email and self.__auth_domain and
self.__email.endswith('@' + self.__auth_domain)):
suffix_len = len(self.__auth_domain) + 1
return self.__email[:-suffix_len]
else:
return self.__email
def email(self):
"""Return this user's email address."""
return self.__email
def user_id(self):
"""Return either a permanent unique identifying string or None.
If the email address was set explicity, this will return None.
"""
return self.__user_id
def auth_domain(self):
"""Return this user's auth domain."""
return self.__auth_domain
def __unicode__(self):
return unicode(self.nickname())
def __str__(self):
return str(self.nickname())
def __repr__(self):
if self.__user_id:
return "users.User(email='%s',_user_id='%s')" % (self.email(),
self.user_id())
else:
return "users.User(email='%s')" % self.email()
def __hash__(self):
return hash((self.__email, self.__auth_domain))
def __cmp__(self, other):
if not isinstance(other, User):
return NotImplemented
return cmp((self.__email, self.__auth_domain),
(other.__email, other.__auth_domain))
def create_login_url(dest_url):
"""Computes the login URL for this request and specified destination URL.
Args:
dest_url: String that is the desired final destination URL for the user
once login is complete. If 'dest_url' does not have a host
specified, we will use the host from the current request.
Returns:
string
"""
req = user_service_pb.StringProto()
resp = user_service_pb.StringProto()
req.set_value(dest_url)
try:
apiproxy_stub_map.MakeSyncCall('user', 'CreateLoginURL', req, resp)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
user_service_pb.UserServiceError.REDIRECT_URL_TOO_LONG):
raise RedirectTooLongError
elif (e.application_error ==
user_service_pb.UserServiceError.NOT_ALLOWED):
raise NotAllowedError
else:
raise e
return resp.value()
CreateLoginURL = create_login_url
def create_logout_url(dest_url):
"""Computes the logout URL for this request and specified destination URL.
Args:
dest_url: String that is the desired final destination URL for the user
once logout is complete. If 'dest_url' does not have a host
specified, we will use the host from the current request.
Returns:
string
"""
req = user_service_pb.StringProto()
resp = user_service_pb.StringProto()
req.set_value(dest_url)
try:
apiproxy_stub_map.MakeSyncCall('user', 'CreateLogoutURL', req, resp)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
user_service_pb.UserServiceError.REDIRECT_URL_TOO_LONG):
raise RedirectTooLongError
else:
raise e
return resp.value()
CreateLogoutURL = create_logout_url
def get_current_user():
try:
return User()
except UserNotFoundError:
return None
GetCurrentUser = get_current_user
def is_current_user_admin():
"""Return true if the user making this request is an admin for this
application, false otherwise.
We specifically make this a separate function, and not a member function of
the User class, because admin status is not persisted in the datastore. It
only exists for the user making this request right now.
"""
return (os.environ.get('USER_IS_ADMIN', '0')) == '1'
IsCurrentUserAdmin = is_current_user_admin
|
jamslevy/gsoc
|
thirdparty/google_appengine/google/appengine/api/users.py
|
Python
|
apache-2.0
| 6,427
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to evaluate Inception on a single GPU.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import os.path
import time
import numpy as np
import tensorflow as tf
from inception import image_processing
from inception import inception_model as inception
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_dir', '/tmp/imagenet_eval',
"""Directory where to write event logs.""")
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/imagenet_train',
"""Directory where to read model checkpoints.""")
# Flags governing the frequency of the eval.
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,
"""How often to run the eval.""")
tf.app.flags.DEFINE_boolean('run_once', False,
"""Whether to run eval only once.""")
# Flags governing the data used for the eval.
tf.app.flags.DEFINE_integer('num_examples', 50000,
"""Number of examples to run. Note that the eval """
"""ImageNet dataset contains 50000 examples.""")
tf.app.flags.DEFINE_string('subset', 'validation',
"""Either 'validation' or 'train'.""")
def _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op):
"""Runs Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
top_1_op: Top 1 op.
top_5_op: Top 5 op.
summary_op: Summary op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
if os.path.isabs(ckpt.model_checkpoint_path):
# Restores from checkpoint with absolute path.
saver.restore(sess, ckpt.model_checkpoint_path)
else:
# Restores from checkpoint with relative path.
saver.restore(sess, os.path.join(FLAGS.checkpoint_dir,
ckpt.model_checkpoint_path))
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/imagenet_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print('Succesfully loaded model from %s at step=%s.' %
(ckpt.model_checkpoint_path, global_step))
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
# Counts the number of correct predictions.
count_top_1 = 0.0
count_top_5 = 0.0
total_sample_count = num_iter * FLAGS.batch_size
step = 0
print('%s: starting evaluation on (%s).' % (datetime.now(), FLAGS.subset))
start_time = time.time()
while step < num_iter and not coord.should_stop():
top_1, top_5 = sess.run([top_1_op, top_5_op])
#print('TOP 1: %s \nTOP 5: %s' % (top_1,top_5))
count_top_1 += np.sum(top_1)
count_top_5 += np.sum(top_5)
step += 1
if step % 20 == 0:
duration = time.time() - start_time
sec_per_batch = duration / 20.0
examples_per_sec = FLAGS.batch_size / sec_per_batch
print('%s: [%d batches out of %d] (%.1f examples/sec; %.3f'
'sec/batch)' % (datetime.now(), step, num_iter,
examples_per_sec, sec_per_batch))
start_time = time.time()
# Compute precision @ 1.
precision_at_1 = count_top_1 / total_sample_count
recall_at_5 = count_top_5 / total_sample_count
print('%s: precision @ 1 = %.4f recall @ 5 = %.4f [%d examples]' %
(datetime.now(), precision_at_1, recall_at_5, total_sample_count))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Precision @ 1', simple_value=precision_at_1)
summary.value.add(tag='Recall @ 5', simple_value=recall_at_5)
summary_writer.add_summary(summary, global_step)
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate(dataset):
"""Evaluate model on Dataset for a number of steps."""
with tf.Graph().as_default():
# Get images and labels from the dataset.
images, labels, _ = image_processing.inputs(dataset)
# Number of classes in the Dataset label set plus 1.
# Label 0 is reserved for an (unused) background class.
num_classes = dataset.num_classes() + 1
# Build a Graph that computes the logits predictions from the
# inference model.
logits, _ = inception.inference(images, num_classes)
# Calculate predictions.
top_1_op = tf.nn.in_top_k(logits, labels, 1)
top_5_op = tf.nn.in_top_k(logits, labels, 5)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
inception.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
graph_def = tf.get_default_graph().as_graph_def()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
graph_def=graph_def)
while True:
_eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
|
Cyber-Neuron/inception_v3
|
inception/inception/inception_eval.py
|
Python
|
apache-2.0
| 6,623
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph Convolution Networks implementation adapted from https://github.com/tkipf/gcn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .models_base import glorot
from .models_base import Model
import tensorflow as tf
# Global unique layer ID dictionary for layer name assignment.
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return tf.SparseTensor(
indices=pre_out.indices,
values=pre_out.values / keep_prob,
dense_shape=pre_out.dense_shape)
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
class GCN(Model):
"""Graph Convolution Networks.
Attributes:
input_dim: Integer representing the number of input features.
output_dim: Integer representing the number of classes.
hidden: Integer representing the number of hidden units in the first layer
of the network.
dropout: Float representing the dropout probability during training.
aggregation: String representing an aggregation operation, that is applied
on the two inputs of the agreement model, after they are encoded through
the convolution layers. See superclass attributes for details.
activation: An activation function to be applied to the outputs of each
fully connected layer of the aggregation network.
is_binary_classification: Boolean specifying if this is model for binary
classification. If so, it uses a different loss function and returns
predictions with a single dimension, batch size.
name: String representing the model name.
"""
def __init__(self,
input_dim,
output_dim,
hidden,
dropout=0.5,
aggregation=None,
hidden_aggregation=(),
activation=tf.nn.leaky_relu,
is_binary_classification=False,
name='GCN'):
super(GCN, self).__init__(
aggregation=aggregation,
hidden_aggregation=hidden_aggregation,
activation=activation)
dropout = 0.5 if dropout is None else dropout
self.input_dim = input_dim
self.output_dim = output_dim
self.num_supports = 1
self.hidden = hidden
self.dropout = dropout
self.name = name
self.is_binary_classification = is_binary_classification
def get_encoding_and_params(self, inputs, is_train, support,
num_features_nonzero, **unused_kwargs):
"""Creates the model hidden representations and prediction ops.
For this model, the hidden representation is the last layer of the MLP,
before the logit computation. The predictions are unnormalized logits.
Args:
inputs: A tensor containing the model inputs. The first dimension is the
batch size.
is_train: A boolean placeholder specifying if this is a training or
testing setting.
support: TODO(dattias, kvis-google): add.
num_features_nonzero: Number of non-zero features.
**unused_kwargs: Other unused keyword arguments.
Returns:
encoding: A tensor containing an encoded batch of samples. The first
dimension corresponds to the batch size.
all_vars: A dictionary mapping from variable name to TensorFlow op
containing all variables used in this model.
reg_params: A dictionary mapping from a variable name to a Tensor of
parameters which will be used for regularization.
"""
# Build layers.
with tf.variable_scope(self.name + '/encoding'):
hidden, reg_params = self._construct_encoding(inputs, is_train, support,
num_features_nonzero)
# Store model variables for easy access.
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope=tf.get_default_graph().get_name_scope())
all_vars = {var.name: var for var in variables}
return hidden, all_vars, reg_params
def _construct_encoding(self, inputs, is_train, support,
num_features_nonzero):
"""Create weight variables."""
dropout = (
tf.constant(self.dropout, tf.float32) * tf.cast(is_train, tf.float32))
layer_1 = GraphConvolution(
input_dim=self.input_dim,
output_dim=self.hidden,
activation=tf.nn.relu,
dropout=dropout,
sparse_inputs=True,
num_features_nonzero=num_features_nonzero,
support=support,
name='GraphConvolution1')
encoding = layer_1(inputs)
reg_params = layer_1.vars
return encoding, reg_params
def get_predictions_and_params(self, encoding, is_train, **kwargs):
"""Creates the model prediction op.
For this model, the hidden representation is the last layer of the MLP,
before the logit computation. The predictions are unnormalized logits.
Args:
encoding: A tensor containing the model inputs. The first dimension is the
batch size.
is_train: A placeholder representing a boolean value that specifies if
this model will be used for training or for test.
**kwargs: Other keyword arguments.
Returns:
predictions: A tensor of logits. For multiclass classification its
shape is (num_samples, num_classes), where the second dimension contains
a logit per class. For binary classification, its shape is
(num_samples,), where each element is the probability of class 1 for
that sample.
all_vars: A dictionary mapping from variable name to TensorFlow op
containing all variables used in this model.
reg_params: A dictionary mapping from a variable name to a Tensor of
parameters which will be used for regularization.
"""
reg_params = {}
support = kwargs['support']
num_features_nonzero = kwargs['num_features_nonzero']
# Build layers.
with tf.variable_scope(self.name + '/prediction'):
dropout = (
tf.constant(self.dropout, tf.float32) * tf.cast(is_train, tf.float32))
layer_2 = GraphConvolution(
input_dim=self.hidden,
output_dim=self.output_dim,
activation=lambda x: x,
dropout=dropout,
num_features_nonzero=num_features_nonzero,
support=support,
name='GraphConvolution2')
predictions = layer_2(encoding)
if self.is_binary_classification:
predictions = predictions[:, 0]
# Store model variables for easy access.
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope=tf.get_default_graph().get_name_scope())
all_vars = {var.name: var for var in variables}
return predictions, all_vars, reg_params
def get_loss(self,
predictions,
targets,
name_scope='loss',
reg_params=None,
**kwargs):
"""Returns a loss between the provided targets and predictions.
For binary classification, this loss is sigmoid cross entropy. For
multi-class classification, it is softmax cross entropy.
A weight decay loss is also added to the parameters passed in reg_params.
Args:
predictions: A tensor of predictions. For multiclass classification its
shape is (num_samples, num_classes), where the second dimension contains
a logit per class. For binary classification, its shape is
(num_samples,), where each element is the probability of class 1 for
that sample.
targets: A tensor of targets of shape (num_samples,), where each row
contains the label index of the corresponding sample.
name_scope: A string containing the name scope used in TensorFlow.
reg_params: A dictonary of parameters, mapping from name to parameter, for
the variables to be included in the weight decay loss. If None, no
weight decay is applied.
**kwargs: Keyword arguments, potentially containing the weight of the
regularization term, passed under the name `weight_decay`. If this is
not provided, it defaults to 0.0.
Returns:
loss: The cummulated loss value.
"""
reg_params = reg_params if reg_params is not None else {}
weight_decay = kwargs['weight_decay'] if 'weight_decay' in kwargs else None
with tf.name_scope(name_scope):
# Cross entropy error.
if self.is_binary_classification:
loss = tf.reduce_sum(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=targets, logits=predictions))
else:
loss = tf.losses.softmax_cross_entropy(targets, predictions)
# Weight decay loss.
if weight_decay is not None:
for var in reg_params.values():
loss = loss + weight_decay * tf.nn.l2_loss(var)
return loss
def normalize_predictions(self, predictions):
"""Converts predictions to probabilities.
Args:
predictions: A tensor of logits. For multiclass classification its shape
is (num_samples, num_classes), where the second dimension contains a
logit per class. For binary classification, its shape is (num_samples,),
where each element is the probability of class 1 for that sample.
Returns:
A tensor of the same shape as predictions, with values between [0, 1]
representing probabilities.
"""
if self.is_binary_classification:
return tf.nn.sigmoid(predictions)
return tf.nn.softmax(predictions, axis=-1)
class GraphConvolution(object):
"""Graph convolution layer."""
def __init__(self,
input_dim,
output_dim,
support,
num_features_nonzero,
dropout=0.,
sparse_inputs=False,
activation=tf.nn.relu,
bias=False,
featureless=False,
name=None):
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
self.dropout = dropout
self.act = activation
self.support = support
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# Helper variable for sparse dropout.
self.num_features_nonzero = num_features_nonzero
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = tf.get_variable(
name='weights', initializer=glorot([input_dim, output_dim]))
if self.bias:
self.vars['bias'] = tf.get_variable(
name='bias', initializer=tf.zeros(shape=[output_dim]))
def __call__(self, inputs):
with tf.name_scope(self.name):
outputs = self._call(inputs)
return outputs
def _call(self, inputs):
"""Run over inputs."""
x = inputs
# Dropout.
if self.sparse_inputs:
x = sparse_dropout(x, 1 - self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1 - self.dropout)
# Convolve.
if not self.featureless:
pre_sup = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
else:
pre_sup = self.vars['weights']
support = dot(self.support, pre_sup, sparse=True)
output = support
# Bias.
if self.bias:
output += self.vars['bias']
return self.act(output)
|
tensorflow/neural-structured-learning
|
research/gam/gam/models/gcn.py
|
Python
|
apache-2.0
| 12,450
|
# Copyright 2012, VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import mock
from oslo.config import cfg
import testtools
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.openstack.common import jsonutils
from neutron.openstack.common import uuidutils
from neutron.plugins.openvswitch.common import constants
from neutron.tests import base
from neutron.tests import tools
OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
class TestBaseOVS(base.BaseTestCase):
def setUp(self):
super(TestBaseOVS, self).setUp()
self.root_helper = 'sudo'
self.ovs = ovs_lib.BaseOVS(self.root_helper)
self.br_name = 'bridge1'
def test_add_bridge(self):
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
self.ovs.add_bridge(self.br_name)
mock_vsctl.assert_called_with(["--", "--may-exist",
"add-br", self.br_name])
def test_delete_bridge(self):
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
self.ovs.delete_bridge(self.br_name)
mock_vsctl.assert_called_with(["--", "--if-exists", "del-br",
self.br_name])
def test_bridge_exists_returns_true(self):
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
self.assertTrue(self.ovs.bridge_exists(self.br_name))
mock_vsctl.assert_called_with(['br-exists', self.br_name],
check_error=True)
def test_bridge_exists_returns_false_for_exit_code_2(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError('Exit code: 2\n')):
self.assertFalse(self.ovs.bridge_exists('bridge1'))
def test_bridge_exists_raises_unknown_exception(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError()):
with testtools.ExpectedException(RuntimeError):
self.ovs.bridge_exists('bridge1')
def test_get_bridge_name_for_port_name_returns_bridge_for_valid_port(self):
port_name = 'bar'
with mock.patch.object(self.ovs, 'run_vsctl',
return_value=self.br_name) as mock_vsctl:
bridge = self.ovs.get_bridge_name_for_port_name(port_name)
self.assertEqual(bridge, self.br_name)
mock_vsctl.assert_called_with(['port-to-br', port_name],
check_error=True)
def test_get_bridge_name_for_port_name_returns_none_for_exit_code_1(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError('Exit code: 1\n')):
self.assertFalse(self.ovs.get_bridge_name_for_port_name('bridge1'))
def test_get_bridge_name_for_port_name_raises_unknown_exception(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError()):
with testtools.ExpectedException(RuntimeError):
self.ovs.get_bridge_name_for_port_name('bridge1')
def _test_port_exists(self, br_name, result):
with mock.patch.object(self.ovs,
'get_bridge_name_for_port_name',
return_value=br_name):
self.assertEqual(self.ovs.port_exists('bar'), result)
def test_port_exists_returns_true_for_bridge_name(self):
self._test_port_exists(self.br_name, True)
def test_port_exists_returns_false_for_none(self):
self._test_port_exists(None, False)
class OVS_Lib_Test(base.BaseTestCase):
"""A test suite to exercise the OVS libraries shared by Neutron agents.
Note: these tests do not actually execute ovs-* utilities, and thus
can run on any system. That does, however, limit their scope.
"""
def setUp(self):
super(OVS_Lib_Test, self).setUp()
self.BR_NAME = "br-int"
self.TO = "--timeout=10"
self.root_helper = 'sudo'
self.br = ovs_lib.OVSBridge(self.BR_NAME, self.root_helper)
self.execute = mock.patch.object(
utils, "execute", spec=utils.execute).start()
def test_vifport(self):
"""Create and stringify vif port, confirm no exceptions."""
pname = "vif1.0"
ofport = 5
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
# test __init__
port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
self.assertEqual(port.port_name, pname)
self.assertEqual(port.ofport, ofport)
self.assertEqual(port.vif_id, vif_id)
self.assertEqual(port.vif_mac, mac)
self.assertEqual(port.switch.br_name, self.BR_NAME)
# test __str__
str(port)
def test_set_controller(self):
controller_names = ['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555']
self.br.set_controller(controller_names)
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'set-controller', self.BR_NAME,
'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555'],
root_helper=self.root_helper)
def test_del_controller(self):
self.br.del_controller()
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'del-controller', self.BR_NAME],
root_helper=self.root_helper)
def test_get_controller(self):
self.execute.return_value = 'tcp:127.0.0.1:6633\ntcp:172.17.16.10:5555'
names = self.br.get_controller()
self.assertEqual(names,
['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555'])
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'get-controller', self.BR_NAME],
root_helper=self.root_helper)
def test_set_protocols(self):
protocols = 'OpenFlow13'
self.br.set_protocols(protocols)
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'set', 'bridge', self.BR_NAME,
"protocols=%s" % protocols],
root_helper=self.root_helper)
def test_create(self):
self.br.add_bridge(self.BR_NAME)
self.br.create()
def test_destroy(self):
self.br.delete_bridge(self.BR_NAME)
self.br.destroy()
def test_reset_bridge(self):
self.br.destroy()
self.br.create()
self.br.reset_bridge()
def _build_timeout_opt(self, exp_timeout):
return "--timeout=%d" % exp_timeout if exp_timeout else self.TO
def _test_delete_port(self, exp_timeout=None):
exp_timeout_str = self._build_timeout_opt(exp_timeout)
pname = "tap5"
self.br.delete_port(pname)
self.execute.assert_called_once_with(
["ovs-vsctl", exp_timeout_str, "--", "--if-exists",
"del-port", self.BR_NAME, pname],
root_helper=self.root_helper)
def test_delete_port(self):
self._test_delete_port()
def test_call_command_non_default_timeput(self):
# This test is only for verifying a non-default timeout
# is correctly applied. Does not need to be repeated for
# every ovs_lib method
new_timeout = 5
self.br.vsctl_timeout = new_timeout
self._test_delete_port(new_timeout)
def test_add_flow(self):
ofport = "99"
vid = 4000
lsw_id = 18
cidr = '192.168.1.0/24'
flow_dict_1 = OrderedDict([('priority', 2),
('dl_src', 'ca:fe:de:ad:be:ef'),
('actions', 'strip_vlan,output:0')])
flow_dict_2 = OrderedDict([('priority', 1),
('actions', 'normal')])
flow_dict_3 = OrderedDict([('priority', 2),
('actions', 'drop')])
flow_dict_4 = OrderedDict([('priority', 2),
('in_port', ofport),
('actions', 'drop')])
flow_dict_5 = OrderedDict([
('priority', 4),
('in_port', ofport),
('dl_vlan', vid),
('actions', "strip_vlan,set_tunnel:%s,normal" % (lsw_id))])
flow_dict_6 = OrderedDict([
('priority', 3),
('tun_id', lsw_id),
('actions', "mod_vlan_vid:%s,output:%s" % (vid, ofport))])
flow_dict_7 = OrderedDict([
('priority', 4),
('nw_src', cidr),
('proto', 'arp'),
('actions', 'drop')])
self.br.add_flow(**flow_dict_1)
self.br.add_flow(**flow_dict_2)
self.br.add_flow(**flow_dict_3)
self.br.add_flow(**flow_dict_4)
self.br.add_flow(**flow_dict_5)
self.br.add_flow(**flow_dict_6)
self.br.add_flow(**flow_dict_7)
expected_calls = [
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,dl_src=ca:fe:de:ad:be:ef"
",actions=strip_vlan,output:0"],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=1,actions=normal"],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,actions=drop"],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,in_port=%s,actions=drop" % ofport],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=4,dl_vlan=%s,in_port=%s,"
"actions=strip_vlan,set_tunnel:%s,normal"
% (vid, ofport, lsw_id)],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=3,tun_id=%s,actions="
"mod_vlan_vid:%s,output:%s"
% (lsw_id, vid, ofport)],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=4,nw_src=%s,arp,actions=drop" % cidr],
process_input=None, root_helper=self.root_helper),
]
self.execute.assert_has_calls(expected_calls)
def test_add_flow_timeout_set(self):
flow_dict = OrderedDict([('priority', 1),
('hard_timeout', 1000),
('idle_timeout', 2000),
('actions', 'normal')])
self.br.add_flow(**flow_dict)
self.execute.assert_called_once_with(
["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=1000,idle_timeout=2000,priority=1,actions=normal"],
process_input=None,
root_helper=self.root_helper)
def test_add_flow_default_priority(self):
flow_dict = OrderedDict([('actions', 'normal')])
self.br.add_flow(**flow_dict)
self.execute.assert_called_once_with(
["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,priority=1,actions=normal"],
process_input=None,
root_helper=self.root_helper)
def test_get_port_ofport(self):
pname = "tap99"
ofport = "6"
self.execute.return_value = ofport
self.assertEqual(self.br.get_port_ofport(pname), ofport)
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "get", "Interface", pname, "ofport"],
root_helper=self.root_helper)
def test_get_datapath_id(self):
datapath_id = '"0000b67f4fbcc149"'
self.execute.return_value = datapath_id
self.assertEqual(self.br.get_datapath_id(), datapath_id.strip('"'))
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "get",
"Bridge", self.BR_NAME, "datapath_id"],
root_helper=self.root_helper)
def test_count_flows(self):
self.execute.return_value = 'ignore\nflow-1\n'
# counts the number of flows as total lines of output - 2
self.assertEqual(self.br.count_flows(), 1)
self.execute.assert_called_once_with(
["ovs-ofctl", "dump-flows", self.BR_NAME],
root_helper=self.root_helper,
process_input=None)
def test_delete_flow(self):
ofport = "5"
lsw_id = 40
vid = 39
self.br.delete_flows(in_port=ofport)
self.br.delete_flows(tun_id=lsw_id)
self.br.delete_flows(dl_vlan=vid)
expected_calls = [
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME,
"in_port=" + ofport],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME,
"tun_id=%s" % lsw_id],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME,
"dl_vlan=%s" % vid],
process_input=None, root_helper=self.root_helper),
]
self.execute.assert_has_calls(expected_calls)
def test_delete_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.delete_flows,
**params)
def test_mod_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_mod_flow_no_actions_set(self):
params = {'in_port': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_defer_apply_flows(self):
flow_expr = mock.patch.object(ovs_lib, '_build_flow_expr_str').start()
flow_expr.side_effect = ['added_flow_1', 'added_flow_2',
'deleted_flow_1']
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
self.br.defer_apply_on()
self.br.add_flow(flow='add_flow_1')
self.br.defer_apply_on()
self.br.add_flow(flow='add_flow_2')
self.br.delete_flows(flow='delete_flow_1')
self.br.defer_apply_off()
flow_expr.assert_has_calls([
mock.call({'flow': 'add_flow_1'}, 'add'),
mock.call({'flow': 'add_flow_2'}, 'add'),
mock.call({'flow': 'delete_flow_1'}, 'del')
])
run_ofctl.assert_has_calls([
mock.call('add-flows', ['-'], 'added_flow_1\nadded_flow_2\n'),
mock.call('del-flows', ['-'], 'deleted_flow_1\n')
])
def test_add_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = "6"
command = ["ovs-vsctl", self.TO, '--', "--may-exist", "add-port",
self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=gre", "options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(command, root_helper=self.root_helper), None),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_patch_port(self):
pname = "tap99"
peer = "bar10"
ofport = "6"
# Each element is a tuple of (expected mock call, return_value)
command = ["ovs-vsctl", self.TO, "add-port", self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=patch", "options:peer=" + peer])
expected_calls_and_values = [
(mock.call(command, root_helper=self.root_helper),
None),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport)
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(self.br.add_patch_port(pname, peer), ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _test_get_vif_ports(self, is_xen=False):
pname = "tap99"
ofport = "6"
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
if is_xen:
external_ids = ('{xs-vif-uuid="%s", attached-mac="%s"}'
% (vif_id, mac))
else:
external_ids = ('{iface-id="%s", attached-mac="%s"}'
% (vif_id, mac))
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
"%s\n" % pname),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "external_ids"],
root_helper=self.root_helper),
external_ids),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport),
]
if is_xen:
expected_calls_and_values.append(
(mock.call(["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=" + vif_id],
root_helper=self.root_helper),
vif_id)
)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
ports = self.br.get_vif_ports()
self.assertEqual(1, len(ports))
self.assertEqual(ports[0].port_name, pname)
self.assertEqual(ports[0].ofport, ofport)
self.assertEqual(ports[0].vif_id, vif_id)
self.assertEqual(ports[0].vif_mac, mac)
self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _encode_ovs_json(self, headings, data):
# See man ovs-vsctl(8) for the encoding details.
r = {"data": [],
"headings": headings}
for row in data:
ovs_row = []
r["data"].append(ovs_row)
for cell in row:
if isinstance(cell, (str, int, list)):
ovs_row.append(cell)
elif isinstance(cell, dict):
ovs_row.append(["map", cell.items()])
elif isinstance(cell, set):
ovs_row.append(["set", cell])
else:
raise TypeError('%r not int, str, list, set or dict' %
type(cell))
return jsonutils.dumps(r)
def _test_get_vif_port_set(self, is_xen):
if is_xen:
id_key = 'xs-vif-uuid'
else:
id_key = 'iface-id'
headings = ['name', 'external_ids']
data = [
# A vif port on this bridge:
['tap99', {id_key: 'tap99id', 'attached-mac': 'tap99mac'}, 1],
# A vif port on this bridge not yet configured
['tap98', {id_key: 'tap98id', 'attached-mac': 'tap98mac'}, []],
# Another vif port on this bridge not yet configured
['tap97', {id_key: 'tap97id', 'attached-mac': 'tap97mac'},
['set', []]],
# A vif port on another bridge:
['tap88', {id_key: 'tap88id', 'attached-mac': 'tap88id'}, 1],
# Non-vif port on this bridge:
['tun22', {}, 2],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
'tap99\ntun22'),
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=name,external_ids,ofport",
"list", "Interface"],
root_helper=self.root_helper),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id = mock.patch.object(self.br,
'get_xapi_iface_id').start()
get_xapi_iface_id.return_value = 'tap99id'
port_set = self.br.get_vif_port_set()
self.assertEqual(set(['tap99id']), port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id.assert_called_once_with('tap99id')
def test_get_vif_ports_nonxen(self):
self._test_get_vif_ports(is_xen=False)
def test_get_vif_ports_xen(self):
self._test_get_vif_ports(is_xen=True)
def test_get_vif_port_set_nonxen(self):
self._test_get_vif_port_set(False)
def test_get_vif_port_set_xen(self):
self._test_get_vif_port_set(True)
def test_get_vif_ports_list_ports_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_ports)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_ports_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_interface_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
'tap99\n'),
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=name,external_ids,ofport",
"list", "Interface"],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_port_tag_dict(self):
headings = ['name', 'tag']
data = [
['int-br-eth2', set()],
['patch-tun', set()],
['qr-76d9e6b6-21', 1],
['tapce5318ff-78', 1],
['tape1400310-e6', 1],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
'\n'.join((iface for iface, tag in data))),
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=name,tag",
"list", "Port"],
root_helper=self.root_helper),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
port_tags = self.br.get_port_tag_dict()
self.assertEqual(
port_tags,
{u'int-br-eth2': [],
u'patch-tun': [],
u'qr-76d9e6b6-21': 1,
u'tapce5318ff-78': 1,
u'tape1400310-e6': 1}
)
def test_clear_db_attribute(self):
pname = "tap77"
self.br.clear_db_attribute("Port", pname, "tag")
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "clear", "Port", pname, "tag"],
root_helper=self.root_helper)
def _test_iface_to_br(self, exp_timeout=None):
iface = 'tap0'
br = 'br-int'
root_helper = 'sudo'
self.execute.return_value = 'br-int'
exp_timeout_str = self._build_timeout_opt(exp_timeout)
self.assertEqual(ovs_lib.get_bridge_for_iface(root_helper, iface), br)
self.execute.assert_called_once_with(
["ovs-vsctl", exp_timeout_str, "iface-to-br", iface],
root_helper=root_helper)
def test_iface_to_br(self):
self._test_iface_to_br()
def test_iface_to_br_non_default_timeout(self):
new_timeout = 5
cfg.CONF.set_override('ovs_vsctl_timeout', new_timeout)
self._test_iface_to_br(new_timeout)
def test_iface_to_br_handles_ovs_vsctl_exception(self):
iface = 'tap0'
root_helper = 'sudo'
self.execute.side_effect = Exception
self.assertIsNone(ovs_lib.get_bridge_for_iface(root_helper, iface))
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper)
def test_delete_all_ports(self):
with mock.patch.object(self.br, 'get_port_name_list',
return_value=['port1']) as get_port:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=True)
get_port.assert_called_once_with()
delete_port.assert_called_once_with('port1')
def test_delete_neutron_ports(self):
port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
'ca:fe:de:ad:be:ef', 'br')
port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
'ca:ee:de:ad:be:ef', 'br')
with mock.patch.object(self.br, 'get_vif_ports',
return_value=[port1, port2]) as get_ports:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=False)
get_ports.assert_called_once_with()
delete_port.assert_has_calls([
mock.call('tap1234'),
mock.call('tap5678')
])
def test_delete_neutron_ports_list_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.delete_ports, all_ports=False)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _test_get_bridges(self, exp_timeout=None):
bridges = ['br-int', 'br-ex']
root_helper = 'sudo'
self.execute.return_value = 'br-int\nbr-ex\n'
timeout_str = self._build_timeout_opt(exp_timeout)
self.assertEqual(ovs_lib.get_bridges(root_helper), bridges)
self.execute.assert_called_once_with(
["ovs-vsctl", timeout_str, "list-br"],
root_helper=root_helper)
def test_get_bridges(self):
self._test_get_bridges()
def test_get_bridges_not_default_timeout(self):
new_timeout = 5
cfg.CONF.set_override('ovs_vsctl_timeout', new_timeout)
self._test_get_bridges(new_timeout)
def test_get_local_port_mac_succeeds(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address='foo')):
self.assertEqual('foo', self.br.get_local_port_mac())
def test_get_local_port_mac_raises_exception_for_missing_mac(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address=None)):
with testtools.ExpectedException(Exception):
self.br.get_local_port_mac()
def _test_get_vif_port_by_id(self, iface_id, data, br_name=None):
headings = ['external_ids', 'name', 'ofport']
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=external_ids,name,ofport",
"find", "Interface",
'external_ids:iface-id="%s"' % iface_id],
root_helper=self.root_helper),
self._encode_ovs_json(headings, data))]
if data:
if not br_name:
br_name = self.BR_NAME
expected_calls_and_values.append(
(mock.call(["ovs-vsctl", self.TO,
"iface-to-br", data[0][headings.index('name')]],
root_helper=self.root_helper),
br_name))
tools.setup_mock_calls(self.execute, expected_calls_and_values)
vif_port = self.br.get_vif_port_by_id(iface_id)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
return vif_port
def _test_get_vif_port_by_id_with_data(self, ofport=None, mac=None):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"]]
if mac:
external_ids.append(["attached-mac", mac])
data = [[["map", external_ids], "tap99",
ofport if ofport else '["set",[]]']]
vif_port = self._test_get_vif_port_by_id('tap99id', data)
if not ofport or ofport == -1 or not mac:
self.assertIsNone(vif_port)
return
self.assertEqual(vif_port.vif_id, 'tap99id')
self.assertEqual(vif_port.vif_mac, 'aa:bb:cc:dd:ee:ff')
self.assertEqual(vif_port.port_name, 'tap99')
self.assertEqual(vif_port.ofport, ofport)
def test_get_vif_by_port_id_with_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_without_ofport(self):
self._test_get_vif_port_by_id_with_data(mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_with_invalid_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=-1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_without_mac(self):
self._test_get_vif_port_by_id_with_data(ofport=1)
def test_get_vif_by_port_id_with_no_data(self):
self.assertIsNone(self._test_get_vif_port_by_id('whatever', []))
def test_get_vif_by_port_id_different_bridge(self):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"]]
data = [[["map", external_ids], "tap99", 1]]
self.assertIsNone(self._test_get_vif_port_by_id('tap99id', data,
"br-ext"))
def _check_ovs_vxlan_version(self, installed_usr_version,
installed_klm_version,
installed_kernel_version,
expecting_ok):
with mock.patch(
'neutron.agent.linux.ovs_lib.get_installed_ovs_klm_version'
) as klm_cmd:
with mock.patch(
'neutron.agent.linux.ovs_lib.get_installed_ovs_usr_version'
) as usr_cmd:
with mock.patch(
'neutron.agent.linux.ovs_lib.get_installed_kernel_version'
) as kernel_cmd:
try:
klm_cmd.return_value = installed_klm_version
usr_cmd.return_value = installed_usr_version
kernel_cmd.return_value = installed_kernel_version
ovs_lib.check_ovs_vxlan_version(root_helper='sudo')
version_ok = True
except SystemError:
version_ok = False
self.assertEqual(version_ok, expecting_ok)
def test_check_minimum_version(self):
min_vxlan_ver = constants.MINIMUM_OVS_VXLAN_VERSION
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(min_vxlan_ver, min_vxlan_ver,
min_kernel_ver, expecting_ok=True)
def test_check_future_version(self):
install_ver = str(float(constants.MINIMUM_OVS_VXLAN_VERSION) + 0.01)
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(install_ver, install_ver,
min_kernel_ver, expecting_ok=True)
def test_check_fail_version(self):
install_ver = str(float(constants.MINIMUM_OVS_VXLAN_VERSION) - 0.01)
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(install_ver, install_ver,
min_kernel_ver, expecting_ok=False)
def test_check_fail_no_version(self):
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(None, None,
min_kernel_ver,
expecting_ok=False)
def test_check_fail_klm_version(self):
min_vxlan_ver = constants.MINIMUM_OVS_VXLAN_VERSION
min_kernel_ver = OVS_LINUX_KERN_VERS_WITHOUT_VXLAN
install_ver = str(float(min_vxlan_ver) - 0.01)
self._check_ovs_vxlan_version(min_vxlan_ver,
install_ver,
min_kernel_ver,
expecting_ok=False)
def test_check_pass_kernel_version(self):
min_vxlan_ver = constants.MINIMUM_OVS_VXLAN_VERSION
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(min_vxlan_ver, min_vxlan_ver,
min_kernel_ver, expecting_ok=True)
|
vijayendrabvs/hap
|
neutron/tests/unit/openvswitch/test_ovs_lib.py
|
Python
|
apache-2.0
| 36,358
|
# -*- coding: utf-8 -*-
# Copyright 2021 Green Valley NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.5@@
from google.appengine.ext.ndb.model import GeoPt
from rogerthat.bizz.communities.communities import get_community
from rogerthat.bizz.communities.models import CommunityGeoFence, CommunityLocation, GeoFenceGeometry
from rogerthat.bizz.communities.to import CommunityGeoFenceTO
def get_geo_fence(community_id):
# type: (int) -> CommunityGeoFence
key = CommunityGeoFence.create_key(community_id)
fence = key.get() # type: CommunityGeoFence
if not fence:
fence = CommunityGeoFence(key=key)
fence.country = get_community(community_id).country
return fence
def update_geo_fence(community_id, data):
# type: (int, CommunityGeoFenceTO) -> CommunityGeoFence
fence = get_geo_fence(community_id)
fence.defaults = None
if data.defaults:
fence.defaults = CommunityLocation(locality=data.defaults.locality,
postal_code=data.defaults.postal_code)
fence.geometry = None
if data.geometry:
fence.geometry = GeoFenceGeometry(center=GeoPt(data.geometry.center.lat, data.geometry.center.lon),
max_distance=data.geometry.max_distance)
fence.put()
return fence
|
our-city-app/oca-backend
|
src/rogerthat/bizz/communities/geo_fence.py
|
Python
|
apache-2.0
| 1,845
|
# author: Asmaa Mirkhan ~ 2019
import os
import argparse
import cv2 as cv
from DetectorAPI import DetectorAPI
def blurBoxes(image, boxes):
"""
Argument:
image -- the image that will be edited as a matrix
boxes -- list of boxes that will be blurred, each box must be int the format (x_top_left, y_top_left, x_bottom_right, y_bottom_right)
Returns:
image -- the blurred image as a matrix
"""
for box in boxes:
# unpack each box
x1, y1, x2, y2 = [d for d in box]
# crop the image due to the current box
sub = image[y1:y2, x1:x2]
# apply GaussianBlur on cropped area
blur = cv.blur(sub, (10, 10))
# paste blurred image on the original image
image[y1:y2, x1:x2] = blur
return image
def main(args):
# assign model path and threshold
model_path = args.model_path
threshold = args.threshold
# create detection object
odapi = DetectorAPI(path_to_ckpt=model_path)
# open image
image = cv.imread(args.input_image)
# real face detection
boxes, scores, classes, num = odapi.processFrame(image)
# filter boxes due to threshold
# boxes are in (x_top_left, y_top_left, x_bottom_right, y_bottom_right) format
boxes = [boxes[i] for i in range(0, num) if scores[i] > threshold]
# apply blurring
image = blurBoxes(image, boxes)
# # show image
# cv.imshow('blurred', image)
# if image will be saved then save it
if args.output_image:
cv.imwrite(args.output_image, image)
print('Image has been saved successfully at', args.output_image,
'path')
else:
cv.imshow('blurred', image)
# when any key has been pressed then close window and stop the program
cv.waitKey(0)
cv.destroyAllWindows()
if __name__ == "__main__":
# creating argument parser
parser = argparse.ArgumentParser(description='Image blurring parameters')
# adding arguments
parser.add_argument('-i',
'--input_image',
help='Path to your image',
type=str,
required=True)
parser.add_argument('-m',
'--model_path',
default='/opt/blurry-faces/face_model/face.pb',
help='Path to .pb model',
type=str)
parser.add_argument('-o',
'--output_image',
help='Output file path',
type=str)
parser.add_argument('-t',
'--threshold',
help='Face detection confidence',
default=0.7,
type=float)
args = parser.parse_args()
print(args)
# if input image path is invalid then stop
assert os.path.isfile(args.input_image), 'Invalid input file'
# if output directory is invalid then stop
if args.output_image:
assert os.path.isdir(os.path.dirname(
args.output_image)), 'No such directory'
main(args)
|
grycap/scar
|
examples/mask-detector-workflow/blurry-faces/src/auto_blur_image.py
|
Python
|
apache-2.0
| 3,111
|
"""Class to hold all light accessories."""
import logging
from pyhap.const import CATEGORY_FAN
from homeassistant.components.fan import (
ATTR_DIRECTION, ATTR_OSCILLATING, DIRECTION_FORWARD, DIRECTION_REVERSE,
DOMAIN, SERVICE_OSCILLATE, SERVICE_SET_DIRECTION, SUPPORT_DIRECTION,
SUPPORT_OSCILLATE)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_SUPPORTED_FEATURES, SERVICE_TURN_OFF,
SERVICE_TURN_ON, STATE_OFF, STATE_ON)
from . import TYPES
from .accessories import HomeAccessory
from .const import (
CHAR_ACTIVE, CHAR_ROTATION_DIRECTION, CHAR_SWING_MODE, SERV_FANV2)
_LOGGER = logging.getLogger(__name__)
@TYPES.register('Fan')
class Fan(HomeAccessory):
"""Generate a Fan accessory for a fan entity.
Currently supports: state, speed, oscillate, direction.
"""
def __init__(self, *args):
"""Initialize a new Light accessory object."""
super().__init__(*args, category=CATEGORY_FAN)
self._flag = {CHAR_ACTIVE: False,
CHAR_ROTATION_DIRECTION: False,
CHAR_SWING_MODE: False}
self._state = 0
self.chars = []
features = self.hass.states.get(self.entity_id) \
.attributes.get(ATTR_SUPPORTED_FEATURES)
if features & SUPPORT_DIRECTION:
self.chars.append(CHAR_ROTATION_DIRECTION)
if features & SUPPORT_OSCILLATE:
self.chars.append(CHAR_SWING_MODE)
serv_fan = self.add_preload_service(SERV_FANV2, self.chars)
self.char_active = serv_fan.configure_char(
CHAR_ACTIVE, value=0, setter_callback=self.set_state)
if CHAR_ROTATION_DIRECTION in self.chars:
self.char_direction = serv_fan.configure_char(
CHAR_ROTATION_DIRECTION, value=0,
setter_callback=self.set_direction)
if CHAR_SWING_MODE in self.chars:
self.char_swing = serv_fan.configure_char(
CHAR_SWING_MODE, value=0, setter_callback=self.set_oscillating)
def set_state(self, value):
"""Set state if call came from HomeKit."""
_LOGGER.debug('%s: Set state to %d', self.entity_id, value)
self._flag[CHAR_ACTIVE] = True
service = SERVICE_TURN_ON if value == 1 else SERVICE_TURN_OFF
params = {ATTR_ENTITY_ID: self.entity_id}
self.hass.services.call(DOMAIN, service, params)
def set_direction(self, value):
"""Set state if call came from HomeKit."""
_LOGGER.debug('%s: Set direction to %d', self.entity_id, value)
self._flag[CHAR_ROTATION_DIRECTION] = True
direction = DIRECTION_REVERSE if value == 1 else DIRECTION_FORWARD
params = {ATTR_ENTITY_ID: self.entity_id, ATTR_DIRECTION: direction}
self.hass.services.call(DOMAIN, SERVICE_SET_DIRECTION, params)
def set_oscillating(self, value):
"""Set state if call came from HomeKit."""
_LOGGER.debug('%s: Set oscillating to %d', self.entity_id, value)
self._flag[CHAR_SWING_MODE] = True
oscillating = True if value == 1 else False
params = {ATTR_ENTITY_ID: self.entity_id,
ATTR_OSCILLATING: oscillating}
self.hass.services.call(DOMAIN, SERVICE_OSCILLATE, params)
def update_state(self, new_state):
"""Update fan after state change."""
# Handle State
state = new_state.state
if state in (STATE_ON, STATE_OFF):
self._state = 1 if state == STATE_ON else 0
if not self._flag[CHAR_ACTIVE] and \
self.char_active.value != self._state:
self.char_active.set_value(self._state)
self._flag[CHAR_ACTIVE] = False
# Handle Direction
if CHAR_ROTATION_DIRECTION in self.chars:
direction = new_state.attributes.get(ATTR_DIRECTION)
if not self._flag[CHAR_ROTATION_DIRECTION] and \
direction in (DIRECTION_FORWARD, DIRECTION_REVERSE):
hk_direction = 1 if direction == DIRECTION_REVERSE else 0
if self.char_direction.value != hk_direction:
self.char_direction.set_value(hk_direction)
self._flag[CHAR_ROTATION_DIRECTION] = False
# Handle Oscillating
if CHAR_SWING_MODE in self.chars:
oscillating = new_state.attributes.get(ATTR_OSCILLATING)
if not self._flag[CHAR_SWING_MODE] and \
oscillating in (True, False):
hk_oscillating = 1 if oscillating else 0
if self.char_swing.value != hk_oscillating:
self.char_swing.set_value(hk_oscillating)
self._flag[CHAR_SWING_MODE] = False
|
persandstrom/home-assistant
|
homeassistant/components/homekit/type_fans.py
|
Python
|
apache-2.0
| 4,695
|
# -*- coding: utf-8 -*-
import re
import datetime
import random
import requests
from urllib.parse import urlparse
from bs4 import BeautifulSoup
pages = set()
all_exernal_links = set()
all_internal_links = set()
def get_internal_link(bs_obj, include_url):
"""
获取页面所有的内链接
:param bs_obj:
:param include_url:
:return:
"""
include_url = urlparse(include_url).scheme +"://" +urlparse(include_url).netloc
internal_links = []
for link in bs_obj.findAll("a",href = re.compile("^(/|.*"+include_url+")")):
if link.attrs["href"] is not None:
if link.attrs["href"] not in internal_links:
if link.attrs["href"].startswith("/"):
internal_links.append(include_url+link.attrs["href"])
else:
internal_links.append(link.attrs["href"])
return internal_links
def get_external_link(bs_obj, exclude_url):
external_links = []
for link in bs_obj.findAll("a",href=re.compile("^(http|www)((?!"+exclude_url+").)*$")):
if link.attrs['href'] is not None:
if link.attrs['href'] not in external_links:
external_links.append(link.attrs['href'])
return external_links
def get_random_external_link(starting_page):
html = requests.get(starting_page)
bs_obj = BeautifulSoup(html.text)
external_links = get_external_link(bs_obj,urlparse(starting_page).netloc)
if not external_links:
print("No external links, looking around the site for one")
domain = urlparse(starting_page).scheme+"://"+urlparse(starting_page).netloc
internal_links = get_internal_link(bs_obj,domain)
if not internal_links:
return get_random_external_link(internal_links[random.randint(0,len(internal_links)-1)])
else:
print("----------stop--------------")
else:
return external_links[random.randint(0,len(external_links))-1]
def get_all_external_links(site_url):
html = requests.get(site_url)
bs_obj = BeautifulSoup(html.text)
internal_links = get_internal_link(bs_obj,site_url)
external_links = get_external_link(bs_obj,site_url)
for link in external_links:
if link not in all_exernal_links:
all_exernal_links.add(link)
print(link)
for link in internal_links:
if link not in all_internal_links:
all_internal_links.add(link)
print(link)
get_all_external_links(link)
def follow_external_only (start_site):
external_link = get_random_external_link(start_site)
print("Random external link is :",external_link)
follow_external_only(external_link)
if __name__ == '__main__':
start_site = "http://www.hao123.com"
# follow_external_only(start_site)
get_all_external_links(start_site)
|
javaor/PythonSpider
|
scrapy_outlink.py
|
Python
|
apache-2.0
| 2,835
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.natural_ventilation_and_duct_leakage import AirflowNetworkDistributionComponentHeatExchanger
log = logging.getLogger(__name__)
class TestAirflowNetworkDistributionComponentHeatExchanger(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_airflownetworkdistributioncomponentheatexchanger(self):
pyidf.validation_level = ValidationLevel.error
obj = AirflowNetworkDistributionComponentHeatExchanger()
# object-list
var_heatexchanger_name = "object-list|HeatExchanger Name"
obj.heatexchanger_name = var_heatexchanger_name
# alpha
var_heatexchanger_object_type = "HeatExchanger:AirToAir:FlatPlate"
obj.heatexchanger_object_type = var_heatexchanger_object_type
# real
var_air_path_length = 0.0001
obj.air_path_length = var_air_path_length
# real
var_air_path_hydraulic_diameter = 0.0001
obj.air_path_hydraulic_diameter = var_air_path_hydraulic_diameter
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.airflownetworkdistributioncomponentheatexchangers[0].heatexchanger_name, var_heatexchanger_name)
self.assertEqual(idf2.airflownetworkdistributioncomponentheatexchangers[0].heatexchanger_object_type, var_heatexchanger_object_type)
self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentheatexchangers[0].air_path_length, var_air_path_length)
self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentheatexchangers[0].air_path_hydraulic_diameter, var_air_path_hydraulic_diameter)
|
rbuffat/pyidf
|
tests/test_airflownetworkdistributioncomponentheatexchanger.py
|
Python
|
apache-2.0
| 1,989
|
from flask_wtf import Form
from wtforms.fields import DecimalField, SubmitField, SelectField
from wtforms.validators import Required, Length, NumberRange
class LatLongForm(Form):
months_pairs = [('4', "April"), ('5', "May"), ('6', "June"), ('7', "July"), ('8', "August"), ('9', "September")]
days_pairs = [('0', "Monday"), ('1', "Tuesday"), ('2', "Wednesday"), ('3', "Thursday"), ('4', "Friday"), ('5', "Saturday"), ('6', "Sunday")]
hours_pairs = [('0', '0'), ('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6'), ('7', '7'), ('8', '8'), ('9', '9'), ('10', '10'), \
('11', '11'), ('12', '12')]
minutes_pairs = [('0', '00'), ('10', '10'), ('20', '20'), ('30', '30'), ('40', '40'), ('50', '50')]
am_pm_pairs = [('0', 'AM'), ('1', 'PM')]
month1 = SelectField('Month 1: ', choices = months_pairs)
day1 = SelectField('Day 1: ', choices = days_pairs)
hour1 = SelectField('Hour 1: ', choices = hours_pairs)
minute1 = SelectField('Minute 1:', choices = minutes_pairs)
am_pm1 = SelectField('AM / PM 1:', choices = am_pm_pairs)
latitude1 = DecimalField('Latitude 1: ', validators=[NumberRange(min=40.6, max=40.87, message ='value greater than 40.6 and smaller than 40.87'), Required()])
longitude1 = DecimalField('Longitude 1: ', validators=[NumberRange(min=-74.05, max=-73.9, message ='value greater than -74.05 and smaller than -73.9'), Required()])
month2 = SelectField('Month 2: ', choices = months_pairs)
day2 = SelectField('Day 2: ', choices = days_pairs)
hour2 = SelectField('Hour 2: ', choices = hours_pairs)
minute2 = SelectField('Minute 2:', choices = minutes_pairs)
am_pm2 = SelectField('AM / PM 2:', choices = am_pm_pairs)
latitude2 = DecimalField('Latitude 2: ', validators=[NumberRange(min=40.6, max=40.87, message ='value greater than 40.6 and smaller than 40.87'), Required()])
longitude2 = DecimalField('Longitude 2: ', validators=[NumberRange(min=-74.05, max=-73.9, message ='value greater than -74.05 and smaller than -73.9'), Required()])
submit = SubmitField('Enter!')
|
seunghwanl/APMAE4990
|
webapp/forms.py
|
Python
|
apache-2.0
| 2,120
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from six import moves
from rally.cli import envutils
from rally import exceptions
from tests.unit import test
class EnvUtilsTestCase(test.TestCase):
def test_default_from_global(self):
@envutils.default_from_global("test_arg_name",
"test_env_name",
"test_missing_arg")
def test_function(test_arg_name=None):
pass
with mock.patch("sys.stdout",
new_callable=moves.StringIO) as mock_stdout:
test_function()
self.assertEqual(mock_stdout.getvalue(),
"Missing argument: --test_missing_arg\n")
@mock.patch.dict(os.environ,
values={envutils.ENV_DEPLOYMENT: "my_deployment_id"},
clear=True)
def test_get_deployment_id_in_env(self):
deployment_id = envutils.get_global(envutils.ENV_DEPLOYMENT)
self.assertEqual("my_deployment_id", deployment_id)
@mock.patch.dict(os.environ, values={}, clear=True)
@mock.patch("rally.cli.envutils.fileutils.load_env_file")
def test_get_deployment_id_with_exception(self, mock_file):
self.assertRaises(exceptions.InvalidArgumentsException,
envutils.get_global, envutils.ENV_DEPLOYMENT, True)
mock_file.assert_called_once_with(os.path.expanduser(
"~/.rally/globals"))
@mock.patch.dict(os.environ, values={}, clear=True)
@mock.patch("rally.cli.envutils.fileutils.load_env_file")
def test_get_deployment_id_with_none(self, mock_file):
self.assertIsNone(envutils.get_global(envutils.ENV_DEPLOYMENT))
mock_file.assert_called_once_with(os.path.expanduser(
"~/.rally/globals"))
@mock.patch.dict(os.environ, values={envutils.ENV_TASK: "my_task_id"},
clear=True)
def test_get_task_id_in_env(self):
self.assertEqual("my_task_id", envutils.get_global(envutils.ENV_TASK))
@mock.patch.dict(os.environ, values={}, clear=True)
@mock.patch("rally.cli.envutils.fileutils.load_env_file")
def test_get_task_id_with_exception(self, mock_file):
self.assertRaises(exceptions.InvalidArgumentsException,
envutils.get_global, envutils.ENV_TASK, True)
mock_file.assert_called_once_with(os.path.expanduser(
"~/.rally/globals"))
@mock.patch.dict(os.environ, values={}, clear=True)
@mock.patch("rally.cli.envutils.fileutils.load_env_file")
def test_get_task_id_with_none(self, mock_file):
self.assertIsNone(envutils.get_global("RALLY_TASK"))
mock_file.assert_called_once_with(os.path.expanduser(
"~/.rally/globals"))
@mock.patch.dict(os.environ,
values={envutils.ENV_DEPLOYMENT: "test_deployment_id"},
clear=True)
@mock.patch("os.path.exists")
@mock.patch("rally.cli.envutils.fileutils.update_env_file",
return_value=True)
def test_clear_global(self, mock_file, mock_file_status):
envutils.clear_global(envutils.ENV_DEPLOYMENT)
mock_file.assert_called_once_with(os.path.expanduser(
"~/.rally/globals"), envutils.ENV_DEPLOYMENT, "\n")
self.assertEqual(os.environ, {})
@mock.patch.dict(os.environ,
values={envutils.ENV_DEPLOYMENT: "test_deployment_id",
envutils.ENV_TASK: "test_task_id"},
clear=True)
@mock.patch("os.path.exists")
@mock.patch("rally.cli.envutils.fileutils.update_env_file",
return_value=True)
def test_clear_env(self, mock_file, mock_file_status):
envutils.clear_env()
self.assertEqual(os.environ, {})
|
vponomaryov/rally
|
tests/unit/cli/test_envutils.py
|
Python
|
apache-2.0
| 4,414
|
'''
New Integration test for testing running vm migration between hosts when attach ISO.
@author: Chenyuan.xu
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.image_operations as img_ops
import zstackwoodpecker.zstack_test.zstack_test_image as test_image
import zstackwoodpecker.test_state as test_state
import apibinding.inventory as inventory
import zstacklib.utils.ssh as ssh
import time
import os
vm = None
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def exec_cmd_in_vm(vm, cmd, fail_msg):
ret, output, stderr = ssh.execute(cmd, vm.get_vm().vmNics[0].ip, "root", "password", False, 22)
if ret != 0:
test_util.test_fail(fail_msg)
def test():
global vm
vm = test_stub.create_vr_vm('migrate_vm', 'imageName_net', 'l3VlanNetwork2')
vm.check()
ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid)
if ps.type == inventory.LOCAL_STORAGE_TYPE:
test_util.test_skip('Skip test on localstorage PS')
vm_inv = vm.get_vm()
vm_uuid = vm_inv.uuid
test_util.test_dsc('Add ISO Image')
#cond = res_ops.gen_query_conditions('name', '=', 'sftp')
bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0].uuid
img_option = test_util.ImageOption()
img_option.set_name('iso')
img_option.set_backup_storage_uuid_list([bs_uuid])
testIsoUrl = os.environ.get('testIsoUrl')
img_option.set_url(testIsoUrl)
image_inv = img_ops.add_iso_template(img_option)
image = test_image.ZstackTestImage()
image.set_image(image_inv)
image.set_creation_option(img_option)
test_obj_dict.add_image(image)
test_util.test_dsc('Attach ISO to VM')
cond = res_ops.gen_query_conditions('name', '=', 'iso')
iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid
img_ops.attach_iso(iso_uuid, vm_uuid)
time.sleep(10)
cmd = "mount /dev/sr0 /mnt"
exec_cmd_in_vm(vm, cmd, "Failed to mount /dev/sr0 /mnt.")
test_util.test_dsc('Migrate VM')
test_stub.migrate_vm_to_random_host(vm)
vm.check()
cmd = "umount /mnt"
exec_cmd_in_vm(vm, cmd, "Failed to umount /mnt.")
img_ops.detach_iso(vm_uuid)
img_ops.attach_iso(iso_uuid, vm_uuid)
time.sleep(10)
cmd = "mount /dev/sr0 /mnt"
exec_cmd_in_vm(vm, cmd, "Failed to mount /dev/sr0 /mnt.")
cmd = "cat /mnt/Licenses.txt"
exec_cmd_in_vm(vm, cmd, "Licenses.txt doesn't exist.")
img_ops.detach_iso(vm_uuid)
image.delete()
image.expunge()
test_obj_dict.rm_image(image)
vm.destroy()
test_util.test_pass('Migrate VM Test Success When Attach ISO')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
|
zstackorg/zstack-woodpecker
|
integrationtest/vm/multihosts/migrate/test_migrate_vm_with_iso2.py
|
Python
|
apache-2.0
| 2,976
|
#!/usr/bin/env python
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pkg_resources import resource_stream
from optparse import OptionParser
from os import path, chdir
from os.path import abspath
from shutil import rmtree, copyfileobj
from subprocess import check_call, CalledProcessError
from tempfile import mkdtemp
opts = OptionParser()
opts.add_option('-t', help='test report to convert')
opts.add_option('-o', help='output directory')
args, _ = opts.parse_args()
temp_dir = mkdtemp()
try:
try:
check_call(
['curl', '--proxy-anyauth', '-sfo', path.join(temp_dir, 'saxon.jar'),
'http://central.maven.org/maven2/net/sf/saxon/Saxon-HE/9.6.0-6/Saxon-HE-9.6.0-6.jar'])
except OSError as err:
print('could not invoke curl: %s\nis curl installed?' % err)
exit(1)
except CalledProcessError as err:
print('error using curl: %s' % err)
exit(1)
buck_report = abspath(args.t)
buck_to_junit_xsl = abspath(
path.join(temp_dir, "buckToJUnit.xsl"))
with open(buck_to_junit_xsl, 'w') as fp:
with resource_stream('buck_to_junit', 'buckToJUnit.xsl') as rfp:
copyfileobj(rfp, fp)
chdir(args.o)
try:
check_call(
['java', '-jar', path.join(temp_dir, 'saxon.jar'), '-s:' + buck_report,
'-xsl:' + buck_to_junit_xsl])
except CalledProcessError as err:
print('error converting to junit: %s' % err)
exit(1)
finally:
rmtree(temp_dir, ignore_errors=True)
|
ladderlife/loonie
|
tools/tests/buck_to_junit.py
|
Python
|
apache-2.0
| 1,975
|
'''
Author: Tobi and Gundram
'''
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops.rnn import bidirectional_rnn
from util.LoaderUtil import read_image_list, get_list_vals
from random import shuffle
from util.STR2CTC import get_charmap_lp, get_charmap_lp_inv
import os
import time
import numpy as np
import matplotlib.pyplot as plt
# Goes done to 10%
INPUT_PATH_TRAIN = './private/lists/lp_only_shifted_train.lst'
INPUT_PATH_VAL = './private/lists/lp_only_val.lst'
cm, nClasses = get_charmap_lp()
# Additional NaC Channel
nClasses += 1
nEpochs = 15
batchSize = 16
# learningRate = 0.001
# momentum = 0.9
# It is assumed that the TextLines are ALL saved with a consistent height of imgH
imgH = 48
# Depending on the size the image is cropped or zero padded
imgW = 256
channels = 1
nHiddenLSTM1 = 256
os.chdir("../..")
trainList = read_image_list(INPUT_PATH_TRAIN)
numT = 32998
stepsPerEpocheTrain = numT / batchSize
valList = read_image_list(INPUT_PATH_VAL)
stepsPerEpocheVal = len(valList) / batchSize
def inference(images, seqLen, keep_prob):
with tf.variable_scope('conv1') as scope:
kernel = tf.Variable(tf.truncated_normal([6, 5, channels, 32], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(images, kernel, [1, 4, 3, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[32]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
norm1 = tf.nn.local_response_normalization(conv1, name='norm1')
# _activation_summary(conv1)
# norm1 = tf.nn.local_response_normalization(conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm1')
seqFloat = tf.to_float(seqLen)
seqL2 = tf.ceil(seqFloat * 0.33)
with tf.variable_scope('conv2') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[64]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
norm2 = tf.nn.local_response_normalization(conv2, name='norm2')
# _activation_summary(conv2)
# norm2
# norm2 = tf.nn.local_response_normalization(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm2')
pool2 = tf.nn.max_pool(norm2, ksize=[1, 4, 2, 1], strides=[1, 4, 2, 1], padding='SAME', name='pool2')
seqL3 = tf.ceil(seqL2 * 0.5)
with tf.variable_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 3, 64, 128], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[128]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(pre_activation, name=scope.name)
norm3 = tf.nn.local_response_normalization(conv3, name='norm3')
pool3 = tf.nn.max_pool(norm3, ksize=[1, 3, 1, 1], strides=[1, 3, 1, 1], padding='SAME', name='pool2')
# NO POOLING HERE -> CTC needs an appropriate length.
seqLenAfterConv = tf.to_int32(seqL3)
with tf.variable_scope('RNN_Prep') as scope:
# (#batch Y X Z) --> (X #batch Y Z)
rnnIn = tf.transpose(pool3, [2, 0, 1, 3])
# (X #batch Y Z) --> (X #batch Y*Z)
shape = rnnIn.get_shape()
steps = shape[0]
rnnIn = tf.reshape(rnnIn, tf.pack([shape[0], shape[1], -1]))
# (X #batch Y*Z) --> (X*#batch Y*Z)
shape = rnnIn.get_shape()
rnnIn = tf.reshape(rnnIn, tf.pack([-1, shape[2]]))
# (X*#batch Y*Z) --> list of X tensors of shape (#batch, Y*Z)
rnnIn = tf.split(0, steps, rnnIn)
with tf.variable_scope('BLSTM1') as scope:
forwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
droppedFW = rnn_cell.DropoutWrapper(forwardH1, output_keep_prob=keep_prob)
backwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
droppedBW = rnn_cell.DropoutWrapper(backwardH1, output_keep_prob=keep_prob)
outputs, _, _ = bidirectional_rnn(droppedFW, droppedBW, rnnIn, dtype=tf.float32)
fbH1rs = [tf.reshape(t, [batchSize, 2, nHiddenLSTM1]) for t in outputs]
# outH1 = [tf.reduce_sum(tf.mul(t, weightsOutH1), reduction_indices=1) + biasesOutH1 for t in fbH1rs]
outH1 = [tf.reduce_sum(t, reduction_indices=1) for t in fbH1rs]
with tf.variable_scope('LOGIT') as scope:
weightsClasses = tf.Variable(tf.truncated_normal([nHiddenLSTM1, nClasses],
stddev=np.sqrt(2.0 / nHiddenLSTM1)))
biasesClasses = tf.Variable(tf.zeros([nClasses]))
logitsFin = [tf.matmul(t, weightsClasses) + biasesClasses for t in outH1]
logits3d = tf.pack(logitsFin)
return logits3d, seqLenAfterConv
def loss(logits3d, tgt, seqLenAfterConv):
loss = tf.reduce_sum(ctc.ctc_loss(logits3d, tgt, seqLenAfterConv))
return loss
print('Defining graph')
graph = tf.Graph()
with graph.as_default():
####Graph input
inputX = tf.placeholder(tf.float32, shape=(batchSize, imgH, imgW, channels))
targetIxs = tf.placeholder(tf.int64)
targetVals = tf.placeholder(tf.int32)
targetShape = tf.placeholder(tf.int64)
targetY = tf.SparseTensor(targetIxs, targetVals, targetShape)
seqLengths = tf.placeholder(tf.int32, shape=(batchSize))
keep_prob = tf.placeholder(tf.float32)
logits3d, seqAfterConv = inference(inputX, seqLengths, keep_prob)
loss = loss(logits3d, targetY, seqAfterConv)
# optimizer = tf.train.MomentumOptimizer(learningRate, momentum).minimize(loss)
optimizer = tf.train.AdamOptimizer().minimize(loss)
# pred = tf.to_int32(ctc.ctc_beam_search_decoder(logits3d, seqAfterConv, merge_repeated=False)[0][0])
pred = tf.to_int32(ctc.ctc_greedy_decoder(logits3d, seqAfterConv)[0][0])
edist = tf.edit_distance(pred, targetY, normalize=False)
tgtLens = tf.to_float(tf.size(targetY.values))
err = tf.reduce_sum(edist) / tgtLens
saver = tf.train.Saver()
with tf.Session(graph=graph) as session:
# writer = tf.train.SummaryWriter('./log', session.graph)
print('Initializing')
tf.global_variables_initializer().run()
# ckpt = tf.train.get_checkpoint_state("./private/models/lp2/")
# if ckpt and ckpt.model_checkpoint_path:
# saver.restore(session, ckpt.model_checkpoint_path)
# print(ckpt)
# workList = valList[:]
# errV = 0
# lossV = 0
# timeVS = time.time()
# cmInv = get_charmap_lp_inv()
# for bStep in range(stepsPerEpocheVal):
# bList, workList = workList[:batchSize], workList[batchSize:]
# batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
# imgW,
# mvn=True)
# feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
# targetShape: batchTargetShape, seqLengths: batchSeqLengths}
# lossB, aErr, p = session.run([loss, err, pred], feed_dict=feedDict)
# print(aErr)
# res = []
# for idx in p.values:
# res.append(cmInv[idx])
# print(res)
# # print(p)
# plt.imshow(batchInputs[0,:,:,0], cmap=plt.cm.gray)
# plt.show()
#
# lossV += lossB
# errV += aErr
# print('Val: CTC-loss ', lossV)
# errVal = errV / stepsPerEpocheVal
# print('Val: CER ', errVal)
# print('Val time ', time.time() - timeVS)
for epoch in range(nEpochs):
workList = trainList[:]
shuffle(workList)
workList = workList[0:32998]
print('Epoch', epoch + 1, '...')
lossT = 0
errT = 0
timeTS = time.time()
for bStep in range(stepsPerEpocheTrain):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: batchSeqLengths, keep_prob: 0.5}
_, lossB, aErr = session.run([optimizer, loss, err], feed_dict=feedDict)
# _, lossB, aErr, sET, sLT = session.run([optimizer, loss, err, err_train, loss_train], feed_dict=feedDict)
lossT += lossB
# writer.add_summary(sET, epoch * stepsPerEpocheTrain + bStep)
# writer.add_summary(sLT, epoch * stepsPerEpocheTrain + bStep)
errT += aErr
print('Train: CTC-loss ', lossT)
cerT = errT / stepsPerEpocheTrain
print('Train: CER ', cerT)
print('Train time ', time.time() - timeTS)
workList = valList[:]
errV = 0
lossV = 0
timeVS = time.time()
for bStep in range(stepsPerEpocheVal):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: batchSeqLengths, keep_prob: 1.0}
lossB, aErr = session.run([loss, err], feed_dict=feedDict)
# lossB, aErr, sE, sL = session.run([loss, err, err_val, loss_val], feed_dict=feedDict)
# writer.add_summary(sE, epoch*stepsPerEpocheVal + bStep)
# writer.add_summary(sL, epoch * stepsPerEpocheVal + bStep)
lossV += lossB
errV += aErr
print('Val: CTC-loss ', lossV)
errVal = errV / stepsPerEpocheVal
print('Val: CER ', errVal)
print('Val time ', time.time() - timeVS)
# Write a checkpoint.
checkpoint_file = os.path.join('./private/models/lp18/', 'checkpoint')
saver.save(session, checkpoint_file, global_step=epoch)
# Defining graph
# Initializing
# Epoch 1 ...
# Train: CTC-loss 573618.233516
# Train: CER 0.732053663761
# Train time 2566.37360501
# Val: CTC-loss 7505.82809639
# Val: CER 0.0942081060599
# Val time 70.4839401245
# Epoch 2 ...
# Train: CTC-loss 73084.4478534
# Train: CER 0.0846337911042
# Train time 2332.564466
# Val: CTC-loss 4603.30670595
# Val: CER 0.0561269361496
# Val time 70.5889220238
# Epoch 3 ...
# Train: CTC-loss 56508.2676985
# Train: CER 0.0645991757256
# Train time 2307.94393301
# Val: CTC-loss 4183.01323938
# Val: CER 0.0514986638259
# Val time 69.8001348972
# Epoch 4 ...
# Train: CTC-loss 50729.6482956
# Train: CER 0.0577029808028
# Train time 2291.99180603
# Val: CTC-loss 4037.41225258
# Val: CER 0.0482444823993
# Val time 71.2115728855
# Epoch 5 ...
# Train: CTC-loss 46517.1993931
# Train: CER 0.0531190908993
# Train time 2281.25194097
# Val: CTC-loss 3822.13585426
# Val: CER 0.0456547654697
# Val time 71.494040966
# Epoch 6 ...
# Train: CTC-loss 44121.3987505
# Train: CER 0.0502368603453
# Train time 2285.71324015
# Val: CTC-loss 3754.89540133
# Val: CER 0.0438374517296
# Val time 70.6552250385
# Epoch 7 ...
# Train: CTC-loss 41823.5579544
# Train: CER 0.0477986275146
# Train time 2265.03064203
# Val: CTC-loss 3644.30589531
# Val: CER 0.0427835063939
# Val time 72.7770349979
# Epoch 8 ...
# Train: CTC-loss 40823.7625133
# Train: CER 0.0468467820267
# Train time 2255.28358293
# Val: CTC-loss 3711.35232484
# Val: CER 0.0433838154652
# Val time 72.92958498
# Epoch 9 ...
# Train: CTC-loss 41962.8032772
# Train: CER 0.0473091210596
# Train time 2240.59626412
# Val: CTC-loss 3429.378994
# Val: CER 0.0395136085105
# Val time 68.2135629654
# Epoch 10 ...
# Train: CTC-loss 39743.3587489
# Train: CER 0.0449621349412
# Train time 2247.17607689
# Val: CTC-loss 3538.12361477
# Val: CER 0.0405062843353
# Val time 73.0749549866
# Epoch 11 ...
# Train: CTC-loss 37841.1128339
# Train: CER 0.0436842029487
# Train time 1877.61327505
# Val: CTC-loss 3562.50696394
# Val: CER 0.0415591884922
# Val time 59.2560589314
# Epoch 12 ...
# Train: CTC-loss 38564.065591
# Train: CER 0.0441661707426
# Train time 1815.8160131
# Val: CTC-loss 3493.62069036
# Val: CER 0.0383308982865
# Val time 59.280351162
# Epoch 13 ...
# Train: CTC-loss 36559.2618546
# Train: CER 0.0421647087487
# Train time 1828.42957115
# Val: CTC-loss 3482.46136662
# Val: CER 0.0394279501017
# Val time 58.629338026
# Epoch 14 ...
# Train: CTC-loss 36965.8608795
# Train: CER 0.0431305091687
# Train time 1601.83509898
# Val: CTC-loss 3487.08890994
# Val: CER 0.0393206818617
# Val time 45.286646843
# Epoch 15 ...
# Train: CTC-loss 35303.027672
# Train: CER 0.0411365469195
# Train time 1386.08462787
# Val: CTC-loss 3466.22052066
# Val: CER 0.0385100504752
# Val time 44.8697309494
|
gundramleifert/exp_tf
|
models/lp/bdlstm_lp_v18.py
|
Python
|
apache-2.0
| 14,333
|
# Copyright 2013 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks import workflows \
as network_workflows
LOG = logging.getLogger(__name__)
class CreateSubnetInfoAction(network_workflows.CreateSubnetInfoAction):
with_subnet = forms.BooleanField(initial=True, required=False,
widget=forms.HiddenInput())
class Meta:
name = _("Subnet")
help_text = _('You can create a subnet associated with the '
'network. Advanced configuration are available '
'at "Subnet Detail" tab.')
def clean(self):
cleaned_data = workflows.Action.clean(self)
self._check_subnet_data(cleaned_data)
return cleaned_data
class CreateSubnetInfo(network_workflows.CreateSubnetInfo):
action_class = CreateSubnetInfoAction
depends_on = ("network_id",)
class CreateSubnet(network_workflows.CreateNetwork):
slug = "create_subnet"
name = _("Create Subnet")
finalize_button_name = _("Create")
success_message = _('Created subnet "%s".')
failure_message = _('Unable to create subnet "%s".')
default_steps = (CreateSubnetInfo,
network_workflows.CreateSubnetDetail)
def format_status_message(self, message):
name = self.context.get('subnet_name') or self.context.get('subnet_id')
return message % name
def get_success_url(self):
return reverse("horizon:project:networks:detail",
args=(self.context.get('network_id'),))
def get_failure_url(self):
return reverse("horizon:project:networks:detail",
args=(self.context.get('network_id'),))
def handle(self, request, data):
subnet = self._create_subnet(request, data)
return True if subnet else False
class UpdateSubnetInfoAction(CreateSubnetInfoAction):
cidr = forms.IPField(label=_("Network Address"),
required=False,
initial="",
widget=forms.TextInput(
attrs={'readonly': 'readonly'}),
help_text=_("Network address in CIDR format "
"(e.g. 192.168.0.0/24)"),
version=forms.IPv4 | forms.IPv6,
mask=True)
# NOTE(amotoki): When 'disabled' attribute is set for the ChoiceField
# and ValidationError is raised for POST request, the initial value of
# the ip_version ChoiceField is not set in the re-displayed form
# As a result, 'IPv4' is displayed even when IPv6 is used if
# ValidationError is detected. In addition 'required=True' check complains
# when re-POST since the value of the ChoiceField is not set.
# Thus now I use HiddenInput for the ip_version ChoiceField as a work
# around.
ip_version = forms.ChoiceField(choices=[(4, 'IPv4'), (6, 'IPv6')],
#widget=forms.Select(
# attrs={'disabled': 'disabled'}),
widget=forms.HiddenInput(),
label=_("IP Version"))
gateway_ip = forms.IPField(
label=_("Gateway IP (optional)"),
required=False,
initial="",
help_text=_("IP address of Gateway (e.g. 192.168.0.254). "
"You need to specify an explicit address "
"to set the gateway. "
"If you want to use no gateway, "
"check 'Disable Gateway' below."),
version=forms.IPv4 | forms.IPv6,
mask=False)
no_gateway = forms.BooleanField(label=_("Disable Gateway"),
initial=False, required=False)
class Meta:
name = _("Subnet")
help_text = _('You can update a subnet associated with the '
'network. Advanced configuration are available '
'at "Subnet Detail" tab.')
def clean(self):
cleaned_data = workflows.Action.clean(self)
self._check_subnet_data(cleaned_data, is_create=False)
return cleaned_data
class UpdateSubnetInfo(CreateSubnetInfo):
action_class = UpdateSubnetInfoAction
depends_on = ("network_id", "subnet_id")
class UpdateSubnetDetailAction(network_workflows.CreateSubnetDetailAction):
allocation_pools = forms.CharField(widget=forms.HiddenInput(),
required=False)
class Meta:
name = _("Subnet Detail")
help_text = _('You can specify additional attributes for the subnet.')
class UpdateSubnetDetail(network_workflows.CreateSubnetDetail):
action_class = UpdateSubnetDetailAction
class UpdateSubnet(network_workflows.CreateNetwork):
slug = "update_subnet"
name = _("Edit Subnet")
finalize_button_name = _("Save")
success_message = _('Updated subnet "%s".')
failure_message = _('Unable to update subnet "%s".')
success_url = "horizon:project:networks:detail"
failure_url = "horizon:project:networks:detail"
default_steps = (UpdateSubnetInfo,
UpdateSubnetDetail)
def format_status_message(self, message):
name = self.context.get('subnet_name') or self.context.get('subnet_id')
return message % name
def get_success_url(self):
return reverse(self.success_url,
args=(self.context.get('network_id'),))
def _update_subnet(self, request, data):
network_id = self.context.get('network_id')
try:
subnet_id = self.context.get('subnet_id')
params = {}
params['name'] = data['subnet_name']
if data['no_gateway']:
params['gateway_ip'] = None
elif data['gateway_ip']:
params['gateway_ip'] = data['gateway_ip']
#We should send gateway_ip only when it is changed,
#because updating gateway_ip is prohibited
#when the ip is used.
#see bug 1227268
subnet = api.neutron.subnet_get(request, subnet_id)
if params['gateway_ip'] == subnet.gateway_ip:
del params['gateway_ip']
self._setup_subnet_parameters(params, data, is_create=False)
subnet = api.neutron.subnet_update(request, subnet_id, **params)
msg = _('Subnet "%s" was successfully updated.') % data['cidr']
LOG.debug(msg)
return subnet
except Exception as e:
msg = (_('Failed to update subnet "%(sub)s": '
' %(reason)s') %
{"sub": data['cidr'], "reason": e})
redirect = reverse(self.failure_url, args=(network_id,))
exceptions.handle(request, msg, redirect=redirect)
return False
def handle(self, request, data):
subnet = self._update_subnet(request, data)
return True if subnet else False
|
spandanb/horizon
|
openstack_dashboard/dashboards/project/networks/subnets/workflows.py
|
Python
|
apache-2.0
| 7,828
|
#-*- encoding:utf-8 -*-
"""
@author: Weihao Zhou
@github: https://github.com/zwhinmedia/
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import math
import networkx as nx
import numpy as np
import sys
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
#sentence_delimiters = ['?', '!', ';', '?', '!', '。', ';', '……', '…','】','【','\n']
sentence_delimiters = ['?', '!', ';', '?', '!', '。', ';', '……', '…','】','【','\n']
allow_speech_tags = ['an', 'i', 'j', 'l', 'n', 'nr', 'nrfg', 'ns', 'nt', 'nz', 't', 'v', 'vd', 'vn', 'eng']
PY2 = sys.version_info[0] == 2
if not PY2:
# Python 3.x and up
text_type = str
string_types = (str,)
xrange = range
def as_text(v): ## 生成unicode字符串
if v is None:
return None
elif isinstance(v, bytes):
return v.decode('utf-8', errors='ignore')
elif isinstance(v, str):
return v
else:
raise ValueError('Unknown type %r' % type(v))
def is_text(v):
return isinstance(v, text_type)
else:
# Python 2.x
text_type = unicode
string_types = (str, unicode)
xrange = xrange
def as_text(v):
if v is None:
return None
elif isinstance(v, unicode):
return v
elif isinstance(v, str):
return v.decode('utf-8', errors='ignore')
else:
raise ValueError('Invalid type %r' % type(v))
def is_text(v):
return isinstance(v, text_type)
__DEBUG = None
def debug(*args):
global __DEBUG
if __DEBUG is None:
try:
if os.environ['DEBUG'] == '1':
__DEBUG = True
else:
__DEBUG = False
except:
__DEBUG = False
if __DEBUG:
print( ' '.join([str(arg) for arg in args]) )
class AttrDict(dict):
"""Dict that can get attribute by dot"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def combine(word_list, window = 2):
"""构造在window下的单词组合,用来构造单词之间的边。
Keyword arguments:
word_list -- list of str, 由单词组成的列表。
windows -- int, 窗口大小。
"""
if window < 2: window = 2
for x in xrange(1, window):
if x >= len(word_list):
break
word_list2 = word_list[x:]
res = zip(word_list, word_list2)
for r in res:
yield r
def get_similarity(word_list1, word_list2):
"""默认的用于计算两个句子相似度的函数。
Keyword arguments:
word_list1, word_list2 -- 分别代表两个句子,都是由单词组成的列表
"""
words = list(set(word_list1 + word_list2))
vector1 = [float(word_list1.count(word)) for word in words]
vector2 = [float(word_list2.count(word)) for word in words]
vector3 = [vector1[x]*vector2[x] for x in xrange(len(vector1))]
vector4 = [1 for num in vector3 if num > 0.]
co_occur_num = sum(vector4)
if abs(co_occur_num) <= 1e-12:
return 0.
denominator = math.log(float(len(word_list1))) + math.log(float(len(word_list2))) # 分母
if abs(denominator) < 1e-12:
return 0.
return co_occur_num / denominator
def sort_words(vertex_source, edge_source, window = 2, pagerank_config = {'alpha': 0.85,}):
"""将单词按关键程度从大到小排序
Keyword arguments:
vertex_source -- 二维列表,子列表代表句子,子列表的元素是单词,这些单词用来构造pagerank中的节点
edge_source -- 二维列表,子列表代表句子,子列表的元素是单词,根据单词位置关系构造pagerank中的边
window -- 一个句子中相邻的window个单词,两两之间认为有边
pagerank_config -- pagerank的设置
"""
sorted_words = []
word_index = {}
index_word = {}
_vertex_source = vertex_source
_edge_source = edge_source
words_number = 0
for word_list in _vertex_source:
for word in word_list:
if not word in word_index:
word_index[word] = words_number
index_word[words_number] = word
words_number += 1
graph = np.zeros((words_number, words_number))
for word_list in _edge_source:
for w1, w2 in combine(word_list, window):
if w1 in word_index and w2 in word_index:
index1 = word_index[w1]
index2 = word_index[w2]
graph[index1][index2] = 1.0
graph[index2][index1] = 1.0
debug('graph:\n', graph)
nx_graph = nx.from_numpy_matrix(graph)
scores = nx.pagerank(nx_graph, **pagerank_config) # this is a dict
sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)
for index, score in sorted_scores:
item = AttrDict(word=index_word[index], weight=score)
sorted_words.append(item)
return sorted_words
def sort_sentences(sentences, words, sim_func = get_similarity, pagerank_config = {'alpha': 0.85,}):
"""将句子按照关键程度从大到小排序
Keyword arguments:
sentences -- 列表,元素是句子
words -- 二维列表,子列表和sentences中的句子对应,子列表由单词组成
sim_func -- 计算两个句子的相似性,参数是两个由单词组成的列表
pagerank_config -- pagerank的设置
"""
sorted_sentences = []
_source = words
sentences_num = len(_source)
graph = np.zeros((sentences_num, sentences_num))
for x in xrange(sentences_num):
for y in xrange(x, sentences_num):
similarity = sim_func( _source[x], _source[y] )
graph[x, y] = similarity
graph[y, x] = similarity
nx_graph = nx.from_numpy_matrix(graph)
scores = nx.pagerank(nx_graph, **pagerank_config) # this is a dict
sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)
for index, score in sorted_scores:
item = AttrDict(index=index, sentence=sentences[index], weight=score)
sorted_sentences.append(item)
return sorted_sentences
if __name__ == '__main__':
pass
|
zwhinmedia/TextRank
|
textrank4zh/util.py
|
Python
|
apache-2.0
| 6,512
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a wrapper to PyReach env that allows pausing and resuming it.
This is used as a building blcok for EnvTimeSlicer().
"""
import enum
import threading
from typing import Any, Callable, List, Optional, Tuple
import gym # type: ignore
from pyreach.gyms import reach_env
class SessionEndReason(enum.Enum):
"""Enumerates different reasons for detecting that a session has ended.
Used by PausableEnv to notify why a session was detected to have ended.
"""
AGENT_CALLED_RESET = 1
STEP_RETURNED_DONE = 2
AGENT_FINISHED = 3
class PausableEnv(gym.Wrapper):
"""Makes a gym environment pausable.
This environment wrapper allows the environment to be paused and resumed at
will. When paused, any calls that might modify the environment (such as move
the robot), will be blocked (i.e. the code path will be put on hold).
Example usage -
pausable_env = PausableEnv(env)
# Start an agent in the background.
threading.Thread(target=agent.run, args=[pausable_env]).start()
# The environment may be paused or resumed at any time.
# Pausing will continue work until the next command from the agent.
# The next method called by the agent will not return until env resumes.
pausable_env.pause()
# On resume(), the env will be marked active. If any agent was running and
# blocked, it will immediately get control back.
# Context such as agent id will be monitored and restored on resume.
pausable_env.resume()
"""
def __init__(self,
env: reach_env.ReachEnv,
start_paused: bool = False) -> None:
super().__init__(env)
self._env = env
# Event to know if the environment is active or paused.
self._is_active = threading.Event()
# Since the agent id can be changed while this env is paused, we
# remember the agent id and set it again on resume.
self._last_agent_id: Optional[str] = None
# Contains a tuple with a custom context, and a callback.
# The callback will be called as -
# callback(session_end_reason, custom_context).
# The custom_context can be defined in add_session_callback().
self._session_end_callbacks: List[Tuple[Any,
Callable[[SessionEndReason, Any],
None]]] = []
if not start_paused:
self.resume()
def is_active(self) -> bool:
return self._is_active.is_set()
def wait_till_active(self) -> None:
self._is_active.wait()
def pause(self) -> None:
"""Pauses this environment.
All calls that may require the environment to do something will be paused,
until resume() is called.
"""
self._is_active.clear()
def resume(self) -> None:
"""Resumes this particular environment."""
if self._last_agent_id is not None:
self._env.set_agent_id(self._last_agent_id)
self._is_active.set()
def _delegate(self, method: Callable[..., Any], *args: Any,
**kwargs: Any) -> Any:
self._is_active.wait()
return method(*args, **kwargs)
def _notify_new_session(self, session_end_reason: SessionEndReason) -> None:
"""Notifies any handlers that a session has ended."""
for custom_context, on_session_end in self._session_end_callbacks:
on_session_end(session_end_reason, custom_context)
def add_session_end_callback(self,
fn: Callable[[SessionEndReason, Any], None],
context: Any = None) -> None:
self._session_end_callbacks.append((context, fn))
def agent_ended(self) -> None:
"""Can be called by an agent to communicate that a session has ended."""
self._notify_new_session(SessionEndReason.AGENT_FINISHED)
# The environment may get paused as a result of the call above.
# If so, we block the agent which called this method until resumed.
self._is_active.wait()
# Override all methods that need to be paused when this env is not activated.
def step(self, *args: Any, **kwargs: Any) -> Any:
result = self._delegate(self._env.step, *args, **kwargs)
done = result[2] if len(result) >= 2 else False
if done:
self._notify_new_session(SessionEndReason.STEP_RETURNED_DONE)
return result
def reset(self, *args: Any, **kwargs: Any) -> Any:
result = self._delegate(self._env.reset, *args, **kwargs)
self._notify_new_session(SessionEndReason.AGENT_CALLED_RESET)
return result
def render(self, *args: Any, **kwargs: Any) -> Any:
return self._delegate(self._env.render, *args, **kwargs)
def close(self, *args: Any, **kwargs: Any) -> Any:
return self._delegate(self._env.close, *args, **kwargs)
def set_agent_id(self, agent_id: str) -> None:
self._last_agent_id = agent_id
return self._delegate(self._env.set_agent_id, agent_id)
|
google-research/pyreach
|
pyreach/gyms/pausable_env.py
|
Python
|
apache-2.0
| 5,412
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.db import models, IntegrityError
from django.db.models import F
from django.conf import settings
from django.utils.timezone import utc, localtime
from sis_provisioner.models import Import, ImportResource
from sis_provisioner.models.course import Course
from sis_provisioner.models.user import User
from sis_provisioner.dao.term import is_active_term
from sis_provisioner.dao.canvas import (
get_instructor_sis_import_role, ENROLLMENT_ACTIVE)
from sis_provisioner.exceptions import EmptyQueueException
from restclients_core.exceptions import DataFailureException
from datetime import datetime, timedelta
from logging import getLogger
logger = getLogger(__name__)
enrollment_log_prefix = 'ENROLLMENT:'
class EnrollmentManager(models.Manager):
def queue_by_priority(self, priority=ImportResource.PRIORITY_DEFAULT):
filter_limit = settings.SIS_IMPORT_LIMIT['enrollment']['default']
pks = super(EnrollmentManager, self).get_queryset().filter(
priority=priority, queue_id__isnull=True
).order_by(
'last_modified'
).values_list('pk', flat=True)[:filter_limit]
if not len(pks):
raise EmptyQueueException()
imp = Import(priority=priority, csv_type='enrollment')
imp.save()
super(EnrollmentManager, self).get_queryset().filter(
pk__in=list(pks)).update(queue_id=imp.pk)
return imp
def queued(self, queue_id):
return super(EnrollmentManager, self).get_queryset().filter(
queue_id=queue_id)
def dequeue(self, sis_import):
Course.objects.dequeue(sis_import)
if sis_import.is_imported():
# Decrement the priority
super(EnrollmentManager, self).get_queryset().filter(
queue_id=sis_import.pk, priority__gt=Enrollment.PRIORITY_NONE
).update(
queue_id=None, priority=F('priority') - 1)
else:
self.queued(sis_import.pk).update(queue_id=None)
self.purge_expired()
def purge_expired(self):
retention_dt = datetime.utcnow().replace(tzinfo=utc) - timedelta(
days=getattr(settings, 'ENROLLMENT_EVENT_RETENTION_DAYS', 180))
return super(EnrollmentManager, self).get_queryset().filter(
priority=Enrollment.PRIORITY_NONE,
last_modified__lt=retention_dt).delete()
def add_enrollment(self, enrollment_data):
section = enrollment_data.get('Section')
reg_id = enrollment_data.get('UWRegID')
role = enrollment_data.get('Role')
status = enrollment_data.get('Status').lower()
last_modified = enrollment_data.get('LastModified').replace(tzinfo=utc)
request_date = enrollment_data.get('RequestDate')
instructor_reg_id = enrollment_data.get('InstructorUWRegID', None)
course_id = '-'.join([section.term.canvas_sis_id(),
section.curriculum_abbr.upper(),
section.course_number,
section.section_id.upper()])
primary_course_id = None
if section.is_primary_section:
primary_course_id = None
else:
primary_course_id = section.canvas_course_sis_id()
full_course_id = '-'.join([course_id, instructor_reg_id]) if (
instructor_reg_id is not None) else course_id
try:
course = Course.objects.get(course_id=full_course_id)
if course.provisioned_date:
enrollment = Enrollment.objects.get(course_id=course_id,
reg_id=reg_id,
role=role)
if (last_modified > enrollment.last_modified or (
last_modified == enrollment.last_modified and
status == ENROLLMENT_ACTIVE)):
enrollment.status = status
enrollment.last_modified = last_modified
enrollment.request_date = request_date
enrollment.primary_course_id = primary_course_id
enrollment.instructor_reg_id = instructor_reg_id
if enrollment.queue_id is None:
enrollment.priority = enrollment.PRIORITY_DEFAULT
else:
enrollment.priority = enrollment.PRIORITY_HIGH
logger.info('{} IN QUEUE {}, {}, {}, {}'.format(
enrollment_log_prefix, full_course_id, reg_id,
role, enrollment.queue_id))
enrollment.save()
logger.info('{} UPDATE {}, {}, {}, {}, {}'.format(
enrollment_log_prefix, full_course_id, reg_id, role,
status, last_modified))
else:
logger.info('{} IGNORE {}, {}, {} before {}'.format(
enrollment_log_prefix, full_course_id, reg_id,
last_modified, enrollment.last_modified))
else:
logger.info('{} IGNORE Unprovisioned course {}, {}, {}'.format(
enrollment_log_prefix, full_course_id, reg_id, role))
course.priority = course.PRIORITY_HIGH
course.save()
except Enrollment.DoesNotExist:
enrollment = Enrollment(course_id=course_id, reg_id=reg_id,
role=role, status=status,
last_modified=last_modified,
primary_course_id=primary_course_id,
instructor_reg_id=instructor_reg_id)
try:
enrollment.save()
logger.info('{} ADD {}, {}, {}, {}, {}'.format(
enrollment_log_prefix, full_course_id, reg_id, role,
status, last_modified))
except IntegrityError:
self.add_enrollment(enrollment_data) # Try again
except Course.DoesNotExist:
if is_active_term(section.term):
# Initial course provisioning effectively picks up event
course = Course(course_id=full_course_id,
course_type=Course.SDB_TYPE,
term_id=section.term.canvas_sis_id(),
primary_id=primary_course_id,
priority=Course.PRIORITY_HIGH)
try:
course.save()
logger.info(
'{} IGNORE Unprovisioned course {}, {}, {}'.format(
enrollment_log_prefix, full_course_id, reg_id,
role))
except IntegrityError:
self.add_enrollment(enrollment_data) # Try again
else:
logger.info('{} IGNORE Inactive section {}, {}, {}'.format(
enrollment_log_prefix, full_course_id, reg_id, role))
class Enrollment(ImportResource):
""" Represents the provisioned state of an enrollment.
"""
reg_id = models.CharField(max_length=32)
status = models.CharField(max_length=16)
role = models.CharField(max_length=32)
course_id = models.CharField(max_length=80)
last_modified = models.DateTimeField()
request_date = models.DateTimeField(null=True)
primary_course_id = models.CharField(max_length=80, null=True)
instructor_reg_id = models.CharField(max_length=32, null=True)
priority = models.SmallIntegerField(
default=ImportResource.PRIORITY_DEFAULT,
choices=ImportResource.PRIORITY_CHOICES)
queue_id = models.CharField(max_length=30, null=True)
objects = EnrollmentManager()
def is_active(self):
return self.status.lower() == ENROLLMENT_ACTIVE.lower()
def is_instructor(self):
return self.role.lower() == get_instructor_sis_import_role()
def json_data(self):
return {
"reg_id": self.reg_id,
"status": self.status,
"course_id": self.course_id,
"last_modified": localtime(self.last_modified).isoformat() if (
self.last_modified is not None) else None,
"request_date": localtime(self.request_date).isoformat() if (
self.request_date is not None) else None,
"primary_course_id": self.primary_course_id,
"instructor_reg_id": self.instructor_reg_id,
"role": self.role,
"priority": self.PRIORITY_CHOICES[self.priority][1],
"queue_id": self.queue_id,
}
class Meta:
unique_together = ("course_id", "reg_id", "role")
class InvalidEnrollmentManager(models.Manager):
def queue_by_priority(self, priority=ImportResource.PRIORITY_DEFAULT):
filter_limit = settings.SIS_IMPORT_LIMIT['enrollment']['default']
pks = super(InvalidEnrollmentManager, self).get_queryset().filter(
priority=priority, queue_id__isnull=True
).order_by('pk').values_list('pk', flat=True)[:filter_limit]
if not len(pks):
raise EmptyQueueException()
imp = Import(priority=priority, csv_type='invalid_enrollment')
imp.save()
super(InvalidEnrollmentManager, self).get_queryset().filter(
pk__in=list(pks)).update(queue_id=imp.pk)
return imp
def queued(self, queue_id):
return super(InvalidEnrollmentManager, self).get_queryset().filter(
queue_id=queue_id)
def dequeue(self, sis_import):
if sis_import.is_imported():
self.queued(sis_import.pk).update(
queue_id=None, priority=InvalidEnrollment.PRIORITY_NONE)
def add_enrollments(self):
check_roles = getattr(settings, 'ENROLLMENT_TYPES_INVALID_CHECK')
for user in User.objects.get_invalid_enrollment_check_users():
# Verify that the check conditions still exist
if user.is_affiliate_user() or user.is_sponsored_user():
# User is OK to have any of the check_roles, restore if needed
for inv in InvalidEnrollment.objects.filter(
user=user, restored_date__isnull=True):
inv.priority = InvalidEnrollment.PRIORITY_DEFAULT
inv.save()
elif user.is_student_user():
# User is not OK to have any of the check_roles
try:
for enr in user.get_active_sis_enrollments(
roles=check_roles):
inv, _ = InvalidEnrollment.objects.get_or_create(
user=user, role=enr.role,
section_id=enr.sis_section_id)
if inv.priority == InvalidEnrollment.PRIORITY_NONE:
inv.priority = InvalidEnrollment.PRIORITY_DEFAULT
inv.save()
except DataFailureException as ex:
if ex.status != 404:
raise
# Clear check flag
user.invalid_enrollment_check_required = False
user.save()
class InvalidEnrollment(ImportResource):
user = models.ForeignKey(User, on_delete=models.CASCADE)
role = models.CharField(max_length=32)
section_id = models.CharField(max_length=80)
found_date = models.DateTimeField(auto_now_add=True)
deleted_date = models.DateTimeField(null=True)
restored_date = models.DateTimeField(null=True)
priority = models.SmallIntegerField(
default=ImportResource.PRIORITY_DEFAULT,
choices=ImportResource.PRIORITY_CHOICES)
queue_id = models.CharField(max_length=30, null=True)
class Meta:
constraints = [
models.UniqueConstraint(fields=['user', 'role', 'section_id'],
name='unique_enrollment')
]
objects = InvalidEnrollmentManager()
|
uw-it-aca/canvas-sis-provisioner
|
sis_provisioner/models/enrollment.py
|
Python
|
apache-2.0
| 12,161
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-03 15:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('messaging', '0003_blacklistedemail_stripped_email'),
]
operations = [
migrations.AddField(
model_name='message',
name='recipient_email_stripped',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='message',
name='sender_email_stripped',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
]
|
erikr/happinesspackets
|
happinesspackets/messaging/migrations/0004_auto_20160403_1742.py
|
Python
|
apache-2.0
| 747
|
import json
pkgs = [
# your stuff here...
]
result = {}
for path, metadata in pkgs:
result[path] = json.loads(metadata)
# run this script with "> result.py" and use your IDE to auto format with black (pprint does not concat strings to a single string, black will)
print(result)
|
anchore/anchore-engine
|
tests/functional/clients/scripts/convert-pkg-list.py
|
Python
|
apache-2.0
| 290
|
#!/usr/bin/env python
from __future__ import print_function
from docopt import docopt
from cloudmesh.cm_mongo import cm_mongo
from cloudmesh.config.cm_config import cm_config
from cloudmesh_base.logger import LOGGER
from tabulate import tabulate
log = LOGGER(__file__)
def shell_command_image(arguments):
"""
::
Usage:
image
image <cm_cloud>... [--refresh]
image -h | --help
image --version
Options:
-h help message
--refresh refresh images of IaaS
Arguments:
cm_cloud Name of the IaaS cloud e.g. india_openstack_grizzly.
Description:
image command provides list of available images. Image describes
pre-configured virtual machine image.
Result:
Examples:
$ image india_openstack_grizzly
"""
# log.info(arguments)
cloud_names = arguments['<cm_cloud>']
# None value means ALL clouds in c.images() function
if not cloud_names:
cloud_names = None
config = cm_config()
username = config.username()
c = cm_mongo()
c.activate(cm_user_id=username)
if arguments['--refresh']:
c.refresh(cm_user_id=username, names=cloud_names, types=['images'])
images_dict = c.images(cm_user_id=username, clouds=cloud_names)
your_keys = {"openstack":
[
# ["Metadata", "metadata"],
["status", "status"],
["name", "name"],
["id", "id"],
["type_id", "metadata", "instance_type_id"],
["iname", "metadata", "instance_type_name"],
["location", "metadata", "image_location"],
["state", "metadata", "image_state"],
["updated", "updated"],
["minDisk", "minDisk"],
["memory_mb", "metadata", 'instance_type_memory_mb'],
["fid", "metadata", "instance_type_flavorid"],
["vcpus", "metadata", "instance_type_vcpus"],
["user_id", "metadata", "user_id"],
["owner_id", "metadata", "owner_id"],
["gb", "metadata", "instance_type_root_gb"],
["arch", ""]
],
"ec2":
[
# ["Metadata", "metadata"],
["state", "extra", "state"],
["name", "name"],
["id", "id"],
["public", "extra", "is_public"],
["ownerid", "extra", "owner_id"],
["imagetype", "extra", "image_type"]
],
"azure":
[
["name", "label"],
["category", "category"],
["id", "id"],
["size", "logical_size_in_gb"],
["os", "os"]
],
"aws":
[
["state", "extra", "state"],
["name", "name"],
["id", "id"],
["public", "extra", "ispublic"],
["ownerid", "extra", "ownerid"],
["imagetype", "extra", "imagetype"]
]
}
images = _select_images(images_dict, your_keys)
_display(images)
def _select_images(data, selected_keys, env=[]):
"""
status ACTIVE
updated 2013-05-26T19:29:09Z
name menghan/custom-utuntu-01
links [{u'href': u'http://198.202.120.83:8774/v1.1/1ae6813a3a6d4cebbeb1912f6d139ad0/images/502a5967-18ff-448b-830f-d6150b650d6b', u'rel': u'self'}, {u'href': u'http://198.202.120.83:8774/1ae6813a3a6d4cebbeb1912f6d139ad0/images/502a5967-18ff-448b-830f-d6150b650d6b', u'rel': u'bookmark'}, {u'href': u'http://198.202.120.83:9292/1ae6813a3a6d4cebbeb1912f6d139ad0/images/502a5967-18ff-448b-830f-d6150b650d6b', u'type': u'application/vnd.openstack.image', u'rel': u'alternate'}]
created 2013-05-26T19:28:09Z
minDisk 0
metadata {u'instance_uuid': u'16a5f5ac-7f39-4b01-a2c3-b2003beffb9d',
u'image_location': u'snapshot',
u'image_state': u'available',
u'instance_type_memory_mb': u'2048',
u'instance_type_swap': u'0',
u'instance_type_vcpu_weight': u'None',
u'image_type': u'snapshot',
u'instance_type_id': u'5',
u'ramdisk_id': None,
u'instance_type_name': u'm1.small',
u'instance_type_ephemeral_gb': u'0',
u'instance_type_rxtx_factor': u'1',
u'kernel_id': None,
u'instance_type_flavorid': u'2',
u'instance_type_vcpus': u'1',
u'user_id': u'f603818711324203970ed1e3bb4b90ed',
u'instance_type_root_gb': u'20',
attributes = {"openstack":
[
['name','name'],
['status','status'],
['addresses','addresses'],
['flavor', 'flavor','id'],
['id','id'],
['image','image','id'],
['user_id', 'user_id'],
['metadata','metadata'],
['key_name','key_name'],
['created','created'],
],
"ec2":
[
["name", "id"],
["status", "extra", "status"],
["addresses", "public_ips"],
["flavor", "extra", "instance_type"],
['id','id'],
['image','extra', 'imageId'],
["user_id", 'user_id'],
["metadata", "metadata"],
["key_name", "extra", "key_name"],
["created", "extra", "launch_time"]
],
"aws":
[
["name", "name"],
["status", "extra", "status"],
["addresses", "public_ips"],
["flavor", "extra", "instance_type"],
['id','id'],
['image','extra', 'image_id'],
["user_id","user_id"],
["metadata", "metadata"],
["key_name", "extra", "key_name"],
["created", "extra", "launch_time"]
],
"azure":
[
['name','name'],
['status','status'],
['addresses','vip'],
['flavor', 'flavor','id'],
['id','id'],
['image','image','id'],
['user_id', 'user_id'],
['metadata','metadata'],
['key_name','key_name'],
u'base_image_ref': u'1a5fd55e-79b9-4dd5-ae9b-ea10ef3156e9',
u'owner_id': u'1ae6813a3a6d4cebbeb1912f6d139ad0'}
server {u'id': u'16a5f5ac-7f39-4b01-a2c3-b2003beffb9d', u'links': [{u'href': u'http://198.202.120.83:8774/v1.1/1ae6813a3a6d4cebbeb1912f6d139ad0/servers/16a5f5ac-7f39-4b01-a2c3-b2003beffb9d', u'rel': u'self'}, {u'href': u'http://198.202.120.83:8774/1ae6813a3a6d4cebbeb1912f6d139ad0/servers/16a5f5ac-7f39-4b01-a2c3-b2003beffb9d', u'rel': u'bookmark'}]}
cm_id sierra_openstack_grizzly-images-menghan/custom-utuntu-01
cm_refresh 2013-08-06T21-44-13Z
cm_cloud sierra_openstack_grizzly
minRam 0
progress 100
cm_kind images
_id 5201a66d7df38caf0fe160b5
cm_type openstack
id 502a5967-18ff-448b-830f-d6150b650d6b
OS-EXT-IMG-SIZE:size 876216320
b99fa4c8-6b92-49e6-b53f-37e56f9383b6
"""
images = []
keys = []
def _getFromDict(dataDict, mapList):
'''Get values of dataDict by mapList
mapList is a list of keys to find values in dict.
dataDict is a nested dict and will be searched by the list.
e.g. Access to the value 5 in dataDict
dataDict = { "abc": {
"def": 5
}
}
mapList = ["abc", "def"]
_getFromDict(dataDict, mapList) returns 5
ref: http://stackoverflow.com/questions/14692690/access-python-nested-dictionary-items-via-a-list-of-keys
'''
return reduce(lambda d, k: d[k], mapList, dataDict)
for cm_cloud, _id in data.iteritems():
for image_name, v in _id.iteritems():
values = [cm_cloud]
# cm_type is required to use a selected_keys for the cm_type
cm_type = v['cm_type']
keys = []
for k in selected_keys[cm_type]:
keys.append(k[0])
try:
values.append(_getFromDict(v, k[1:]))
except:
# print sys.exc_info()
values.append(0)
images.append(values)
headers = [keys]
return headers + images
def _display(json_data, headers="firstrow", tablefmt="orgtbl"):
table = tabulate(json_data, headers, tablefmt)
try:
separator = table.split("\n")[1].replace("|", "+")
except:
separator = "-" * 50
print(separator)
print(table)
print(separator)
def main():
arguments = docopt(shell_command_image.__doc__)
shell_command_image(arguments)
if __name__ == "__main__":
# print sys.argv
main()
|
rajpushkar83/cloudmesh
|
cloudmesh/shell/cm_image.py
|
Python
|
apache-2.0
| 9,773
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Security (SSL) Settings
Usage:
import libcloud.security
libcloud.security.VERIFY_SSL_CERT = True
# optional
libcloud.security.CA_CERTS_PATH.append("/path/to/cacert.txt")
"""
VERIFY_SSL_CERT = True
VERIFY_SSL_CERT_STRICT = True
# File containing one or more PEM-encoded CA certificates
# concatenated together
CA_CERTS_PATH = [
# centos/fedora: openssl
'/etc/pki/tls/certs/ca-bundle.crt',
# debian/ubuntu/arch/gentoo: ca-certificates
'/etc/ssl/certs/ca-certificates.crt',
# freebsd: ca_root_nss
'/usr/local/share/certs/ca-root-nss.crt',
# macports: curl-ca-bundle
'/opt/local/share/curl/curl-ca-bundle.crt',
]
CA_CERTS_UNAVAILABLE_WARNING_MSG = (
'Warning: No CA Certificates were found in CA_CERTS_PATH. '
'Toggling VERIFY_SSL_CERT to False.'
)
CA_CERTS_UNAVAILABLE_ERROR_MSG = (
'No CA Certificates were found in CA_CERTS_PATH. '
)
VERIFY_SSL_DISABLED_MSG = (
'SSL certificate verification is disabled, this can pose a '
'security risk. For more information how to enable the SSL '
'certificate verification, please visit the libcloud '
'documentation.'
)
|
Keisuke69/libcloud
|
libcloud/security.py
|
Python
|
apache-2.0
| 1,928
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
from collections import namedtuple, defaultdict, Counter
import subprocess
import re
from ansicolor import red, yellow, green, black
def readFileLines(filename):
with open(filename, encoding="iso-8859-1") as infile:
return infile.read().split("\n")
def extractToolStatistics(lines):
"""
From a list of excellon drill lines extract the number of holes for all drill sizes.
Returns a dict: {drill size: number of holes}
"""
#Get a tool --> diameter mapping
tools = extractExcellonTools(lines)
#Iterate over lines and count holes for each tool
currentTool = None
drillCountByDiameter = defaultdict(int)
toolRegex = re.compile(r"^(T\d+)$")
drillRegex = re.compile(r"^X[\+-]\d+Y[\+-]\d+$")
for line in lines:
if toolRegex.match(line):
#This line defines a new tool to use
currentTool = toolRegex.match(line).group(0)
if drillRegex.match(line):
drillCountByDiameter[tools[currentTool]] += 1
return drillCountByDiameter
def extractExcellonTools(lines):
"""
From a list of excellon lines, extract a dict of tools
Ignores non-tool-definition lines
Example: ["foobar", "T01C1.0", "T02C2.2"] -> {"T01": 1.0, "T02": 2.2}
"""
#Extract those lines that match a regex
toolDefRegex = re.compile(r"^(T\d+)C([0-9\.]+)$")
toolDefMatches = [toolDefRegex.match(l) for l in lines if toolDefRegex.match(l)]
return dict([(t.group(1), float(t.group(2))) for t in toolDefMatches])
def checkExcellonMetric(self, filepath):
"Check if a given file is a metric excellon file"
filename = os.path.basename(filepath)
lines = readFileLines(filepath)
#Check for excellon header
if lines[0] != "M48":
print(red("Can't find Excellon drill header (M48) in %s" % filename, bold="True"))
#Check for metric dimension: Line like METRIC,0000.00
if lines[1].partition(",")[0] != "METRIC":
print(red("Excellon drill program %s does not seem to be metric" % filename, bold="True"))
#
# Drill statistics
#
toolStats = extractToolStatistics(lines)
print(black(self.name + ":", bold=True))
for diameter, numDrills in toolStats.items():
print("\t%d through holes of diameter %.2fmm" % (numDrills, diameter))
#Print "None" if there are no holes in this file
if not toolStats:
print("\tNone")
#Multimap of allowed layer notes (ExpectedFile.name --> [%LN])
#Built for diptrace. Might need to be adjusted for other EDA tools.
allowedLayerNotes = defaultdict(list)
allowedLayerNotes.update({
"Top copper layer": ["Top", ['Copper', 'L1', 'Top']],
"Bottom copper layer": ["Bottom", ['Copper', 'L2', 'Bot']],
"Solder mask top": ["TopMask", ['Soldermask', 'Top']],
"Solder mask bottom": ["BotMask", ['Soldermask', 'Bot']],
"Board outline": ["BoardOutline", ['Profile']],
"Silk screen top": ["TopSilk", ['Legend', 'Top']],
})
#Gerber aperture
# id: The aperture identifier, e.g. D11
# type: "C"/"R"
# diameter: float, with implicit units
Aperture = namedtuple("Aperture", ["id", "type", "diameter"])
def parseGerberApertures(lines):
"From a list of gerber lines, parse all embedded apertures"
apertureRegex = re.compile(r"%AD(D\d+)([CR]),(\d+\.\d+)\*%")
apertures = []
#Find lines defining apertures
for line in lines:
if apertureRegex.match(line):
match = apertureRegex.match(line)
apertures.append(Aperture(match.group(1), match.group(2), float(match.group(3))))
return apertures
def findAperture(apertures, identifier):
"Find an aperture in a list of apertures (returns None if not found)"
for aperture in apertures:
if aperture.id == identifier: return aperture
return None
def parseGerberUnit(lines):
"""Returns the extended gerber unit ("mm"/"in") or None if not found"""
if "%MOIN*%" in lines:
return "in"
elif "%MOMM*%" in lines:
return "mm"
else: return None
def findCoordinateFormat(lines):
"""
Try to find a FSLAX line and return the decimal-point factor for coordinates.
"""
rgx = re.compile(r"\%FSLAX(\d{2})Y(\d{2})\*\%")
for line in lines:
m = rgx.match(line)
if m is not None:
return 10.**int(m.group(1)[-1]),10.**int(m.group(2)[-1])
print(red("Could not find coordinate format info %FSLAX. Using default %FSLAX33"))
return 100000.,100000.
def checkBoardOutline(self, filepath):
filename = os.path.basename(filepath)
#Basic gerber checks
checkGerberFile(self, filepath)
#Compute board outline
millLines = readFileLines(filepath)
# Find factors to get absolute coordinates:
x_factor, y_factor = findCoordinateFormat(millLines)
# Initialize X & Y
x, y = 0, 0
#We can only interpret the file if coordinates are absolute
if not "G90*" in millLines:
print(yellow("Mill coordinates in %s don't seem to be absolute (G90 missing!)" % filename))
return
#Determine coordinate units
unit = parseGerberUnit(millLines)
if unit is None: #Neither inch nor mm found
print(yellow("Could not find coordinate units (mm/in) in %s" % filename))
return
#Parse the aperture list
apertures = parseGerberApertures(millLines)
selectApertureRegex = re.compile(r"(D\d+)\*")
move2DRegex = re.compile(r"X(-?\d+)Y(-?\d+)D(\d+)\*") #Move (D2) or draw (D1)
move1DRegex = re.compile(r"([XY])(-?\d+)D(\d+)\*") #With only one coordinate
#Try to interpret gerber file
minCoords = (sys.maxsize, sys.maxsize)
maxCoords = (0, 0)
lastCoords = (0, 0)
currentAperture = None
apertureUseCount = Counter()
for line in millLines:
if selectApertureRegex.match(line):
apertureCode = selectApertureRegex.match(line).group(1)
currentAperture = findAperture(apertures, apertureCode)
elif move2DRegex.match(line):
match = move2DRegex.match(line)
x = int(match.group(1)) / x_factor
y = int(match.group(2)) / y_factor
apertureUseCount[currentAperture] += 1
elif move1DRegex.match(line):
match = move1DRegex.match(line)
apertureUseCount[currentAperture] += 1
if match.group(1) == "X":
x = int(match.group(2)) / x_factor
y = lastCoords[1]
elif match.group(1) == "Y":
x = lastCoords[0]
y = int(match.group(2)) / y_factor
else: raise Exception("Internal error: Invalid coordinate type in 1D move: %s" % match.group(1))
else: continue
#Compute min/max coordinates
lastCoords = (x, y)
minCoords = (min(minCoords[0], lastCoords[0]), min(minCoords[1], lastCoords[1]))
maxCoords = (max(maxCoords[0], lastCoords[0]), max(maxCoords[1], lastCoords[1]))
#Compute board size (minimum enclosing rectangle)
boardSize = (maxCoords[0] - minCoords[0], maxCoords[1] - minCoords[1])
# Compute size of most common aperture
mostCommonAperture = apertureUseCount.most_common(1)[0][0]
# info
print(black("\tGerber offset: ({1:.2f} {0}, {2:.2f} {0})".format(unit, minCoords[0], minCoords[1])))
print(black("\tBoard size (minimum rectangle): %.1f %s x %.1f %s" % \
(boardSize[0], unit, boardSize[1], unit)))
#print(black("\tBoard outline aperture size: {0:.2f} µm".format(1e3 * mostCommonAperture.diameter), bold=True))
def checkCopperLayer(self, filepath):
#Basic gerber checks
checkGerberFile(self, filepath)
#Check if smallest aperture is < 6mil = 150um
#NOTE: We currently don't compute the clearance (way too complicated)
lines = readFileLines(filepath)
apertures = parseGerberApertures(lines)
unit = parseGerberUnit(lines)
limit = 0.125 #TODO use inches if unit == "in"
if unit == "in": limit = 0.006
for aperture in apertures:
if aperture.diameter < limit:
print(red("Aperture %s (size %.3f %s) is smaller than %.3f %s minimum width" % \
(aperture.id, aperture.diameter, unit, limit, unit)))
def checkGerberFile(self, filepath):
"""
Check if the given file is a RS-274X gerber file
- Checks for a G04 command at the beginning of the file
- Checks for a %LN command and verifies it against the filename
- Checks for a G04 #@! TF.FileFunction command
"""
filename = os.path.basename(filepath)
lines = readFileLines(filepath)
#Find G04 line (i.e. what software created the file)
if not any(map(lambda l: l.startswith("G04 "), lines)):
print(red("Couldn't find G04 command (software description) in %s. Probably not a Gerber file." % filename, bold=True))
#Find %LN line, i.e. what the creating
# software thinks the current layer is (e.g. "BottomMask")
layerNoteRegex = re.compile(r"^\%LN([^\*]+)\*%$")
fileFunctionRegex = re.compile(r"G04 #@! TF\.FileFunction,([^\*]+)\*")
layerDescription = None
for line in lines:
if layerNoteRegex.match(line):
layerDescription = layerNoteRegex.match(line).group(1)
break #Expecting only one layer note
elif fileFunctionRegex.match(line):
layerDescription = fileFunctionRegex.match(line).group(1)
layerDescription = layerDescription.split(",")
#Check if the layer note we found makes sense
if layerDescription == None: #No %LN line found
print(yellow("Couldn't find %%LN command or file function command in %s" % filename))
else: #We found a layer description. Check for sanity
if isinstance(layerDescription, list): # FileFunction command
if layerDescription not in allowedLayerNotes[self.name]:
print(red("Layer description '%s' in %s does not match any of the expected descriptions: %s" % (layerDescription, filename, allowedLayerNotes[self.name]), bold=True))
else: # %LN command
if layerDescription not in allowedLayerNotes[self.name]:
print(red("Layer description '%s' in %s does not match any of the expected descriptions: %s" % (layerDescription, filename, allowedLayerNotes[self.name]), bold=True))
def extractProjectPrefix(files):
"""
Extract a common project prefix from all files in a directory
Fails & exits if no such prefix is found
Example: [ABC.top, ABC.bot] => "ABC"
"""
commonprefix = os.path.commonprefix(files)
if not commonprefix or not commonprefix.endswith("."):
print(red("Can't extract project name from files: %s" % ", ".join(files), bold=True))
print(red("Please ensure that all files have a common filename and only differ in their extension!", bold=True))
print(red("Example: MyBoard.top, MyBoard.bot, ...", bold=True))
sys.exit(1)
return commonprefix[:-1] #Strp off dot
def checkFile(directory, expectedFile, projectName):
"Check if a given expected file exists inside a directory"
filename = projectName + expectedFile.extension
filepath = os.path.join(directory, filename)
if os.path.isfile(filepath):
print(green("Found %s data %s" % (expectedFile.format, filename)))
if expectedFile.checkFN is not None:
expectedFile.checkFN(expectedFile, filepath)
else:
print(red("File %s (%s) missing" % (filename, expectedFile.name), bold=True))
return None
return filename
ExpectedFile = namedtuple('ExpectedFile', ['extension', 'name', 'format', 'checkFN'])
expectedFiles = [
#http://www.multi-circuit-boards.eu/support/leiterplatten-daten/gerber-daten.html
ExpectedFile(".top", "Top copper layer", "RS-274X", checkCopperLayer),
ExpectedFile(".bot", "Bottom copper layer", "RS-274X", checkCopperLayer),
ExpectedFile(".smt", "Solder mask top", "RS-274X", checkGerberFile),
ExpectedFile(".smb", "Solder mask bottom", "RS-274X", checkGerberFile),
ExpectedFile(".plt", "Silk screen top", "RS-274X", checkGerberFile),
ExpectedFile(".mil", "Board outline", "RS-274X", checkBoardOutline),
#Drilling
ExpectedFile(".pth", "Plated through holes", "Excellon", checkExcellonMetric),
ExpectedFile(".npth", "Non-plated through holes", "Excellon", checkExcellonMetric),
]
if __name__ == "__main__":
#Parse commandline arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="The directory to scan for project Gerber file")
parser.add_argument("--gerbv", action="store_true", help="Run gerbv on the files")
args = parser.parse_args()
#Perform check
files = os.listdir(args.directory)
projectName = extractProjectPrefix(files)
print(black("Project name: %s" % projectName))
checkedFiles = [checkFile(args.directory, f, projectName) for f in expectedFiles]
unknownFiles = set(files) - set(checkedFiles)
if unknownFiles:
print(red("Found unknown files: %s" % ",".join(unknownFiles)))
#Open viewer if enabled
if args.gerbv:
filePaths = [os.path.join(args.directory, f) for f in files]
subprocess.call(["gerbv"] + filePaths)
|
ulikoehler/PCBCheck
|
pcbcheck.py
|
Python
|
apache-2.0
| 13,258
|
#!/usr/bin/env python
#
# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This file contains the operations to perform database consistency checking
on two databases.
"""
from mysql.utilities.common.tools import check_python_version
# Check Python version compatibility
check_python_version()
import os
import re
import sys
from mysql.utilities.exception import UtilError, FormatError
from mysql.utilities.command.dbcompare import database_compare
from mysql.utilities.common.ip_parser import parse_connection
from mysql.utilities.common.dbcompare import DEFAULT_SPAN_KEY_SIZE
from mysql.utilities.common.pattern_matching import REGEXP_OBJ_NAME
from mysql.utilities.common.tools import check_connector_python
from mysql.utilities.common.messages import (PARSE_ERR_DB_PAIR,
PARSE_ERR_DB_PAIR_EXT,
PARSE_ERR_DB_MISSING_CMP,
PARSE_ERR_SPAN_KEY_SIZE_TOO_LOW)
from mysql.utilities.common.options import (add_difftype, add_verbosity,
check_verbosity,
add_changes_for, add_reverse,
add_format_option,
add_character_set_option,
add_ssl_options, get_ssl_dict,
setup_common_options,
check_password_security)
from mysql.utilities.common.sql_transform import (is_quoted_with_backticks,
remove_backtick_quoting,
quote_with_backticks)
# Constants
NAME = "MySQL Utilities - mysqldbcompare "
DESCRIPTION = "mysqldbcompare - compare databases for consistency"
USAGE = "%prog --server1=user:pass@host:port:socket " + \
"--server2=user:pass@host:port:socket db1:db2"
PRINT_WIDTH = 75
# Check for connector/python
if not check_connector_python():
sys.exit(1)
if __name__ == '__main__':
# Setup the command parser
parser = setup_common_options(os.path.basename(sys.argv[0]),
DESCRIPTION, USAGE, server=False)
# Connection information for the source server
parser.add_option("--server1", action="store", dest="server1",
type="string", default="root@localhost:3306",
help="connection information for first server in "
"the form: <user>[:<password>]@<host>[:<port>]"
"[:<socket>] or <login-path>[:<port>][:<socket>].")
# Connection information for the destination server
parser.add_option("--server2", action="store", dest="server2",
type="string", default=None,
help="connection information for second server in "
"the form: <user>[:<password>]@<host>[:<port>]"
"[:<socket>] or <login-path>[:<port>][:<socket>].")
# Add character set option
add_character_set_option(parser)
# Output format
add_format_option(parser, "display the output in either grid (default), "
"tab, csv, or vertical format", "grid")
# Add skips
parser.add_option("--skip-checksum-table", action="store_true",
dest="no_checksum_table",
help="skip CHECKSUM TABLE step in data consistency "
"check.")
parser.add_option("--skip-object-compare", action="store_true",
dest="no_object_check",
help="skip object comparison step.")
parser.add_option("--skip-row-count", action="store_true",
dest="no_row_count",
help="skip row count step.")
parser.add_option("--skip-diff", action="store_true",
dest="no_diff",
help="skip the object diff step.")
parser.add_option("--skip-data-check", action="store_true",
dest="no_data",
help="skip data consistency check.")
# Skip check of table options.
parser.add_option("--skip-table-options", action="store_true",
dest="skip_tbl_opts",
help="skip check of all table options (e.g., "
"AUTO_INCREMENT, ENGINE, CHARSET, etc.).")
# Add display width option
parser.add_option("--width", action="store", dest="width",
type="int", help="display width",
default=PRINT_WIDTH)
# run-all-tests mode
parser.add_option("-a", "--run-all-tests", action="store_true",
dest="run_all_tests",
help="do not abort when a diff test fails")
# Add compact option for resulting diff
parser.add_option("-c", "--compact", action="store_true",
dest="compact", help="compact output from a diff.")
# turn off binlog mode
parser.add_option("--disable-binary-logging", action="store_true",
default=False, dest="toggle_binlog",
help="turn binary logging off during operation if "
"enabled (SQL_LOG_BIN=1). Note: may require SUPER "
"privilege. Prevents compare operations from being "
"written to the binary log.")
# add the span key option
parser.add_option(
"--span-key-size", action="store", default=DEFAULT_SPAN_KEY_SIZE,
type="int", dest="span_key_size",
help="changes the size of the key used for compare table contents. A "
"higher value can help to get more accurate results comparing "
"large databases, but may slow the algorithm. Default value is "
"{0}.".format(DEFAULT_SPAN_KEY_SIZE)
)
# add the use indexes option
parser.add_option(
"--use-indexes", action="store", type="string", default='',
dest="use_indexes",
help="for each table, indicate which index to use as if were a "
"primary key (each of his columns must not allow null values)."
)
# Add verbosity and quiet (silent) mode
add_verbosity(parser, True)
# Add difftype option
add_difftype(parser, True)
# Add the direction (changes-for)
add_changes_for(parser)
# Add show reverse option
add_reverse(parser)
# Add ssl options
add_ssl_options(parser)
# Now we process the rest of the arguments.
opt, args = parser.parse_args()
# Check security settings
check_password_security(opt, args, "# ")
# Warn if quiet and verbosity are both specified
check_verbosity(opt)
# check unique keys
ukey_regexp = re.compile(r'(?:(?:;){{0,1}}{0}\.{0})'
''.format(REGEXP_OBJ_NAME))
db_idxes_l = None
# Split the table names considering backtick quotes
if opt.use_indexes:
grp = ukey_regexp.findall(opt.use_indexes)
if not grp:
parser.error("Can't parse the specified --use-indexes argument {0}"
"".format(opt.use_indexes))
db_idxes_l = []
for table, index in grp:
table_uc = (table if is_quoted_with_backticks(table)
else quote_with_backticks(table))
index_uc = (index if is_quoted_with_backticks(index)
else quote_with_backticks(index))
db_idxes_l.append((table_uc, index_uc))
# Set options for database operations.
options = {
"quiet": opt.quiet,
"verbosity": opt.verbosity,
"difftype": opt.difftype,
"run_all_tests": opt.run_all_tests,
"width": opt.width,
"no_checksum_table": opt.no_checksum_table,
"no_object_check": opt.no_object_check,
"no_diff": opt.no_diff,
"no_row_count": opt.no_row_count,
"no_data": opt.no_data,
"format": opt.format,
"toggle_binlog": opt.toggle_binlog,
"changes-for": opt.changes_for,
"reverse": opt.reverse,
"span_key_size": opt.span_key_size,
"skip_table_opts": opt.skip_tbl_opts,
"charset": opt.charset,
"use_indexes": db_idxes_l,
"compact": opt.compact
}
# Add ssl options to options instead of connection.
options.update(get_ssl_dict(opt))
# Parse server connection values
server2_values = None
try:
server1_values = parse_connection(opt.server1, None, options)
except FormatError:
_, err, _ = sys.exc_info()
parser.error("Server1 connection values invalid: %s." % err)
except UtilError:
_, err, _ = sys.exc_info()
parser.error("Server1 connection values invalid: %s." % err.errmsg)
if opt.server2:
try:
server2_values = parse_connection(opt.server2, None, options)
except FormatError:
_, err, _ = sys.exc_info()
parser.error("Server2 connection values invalid: %s." % err)
except UtilError:
_, err, _ = sys.exc_info()
parser.error("Server2 connection values invalid: %s." % err.errmsg)
# Check for arguments
if len(args) == 0:
parser.error(PARSE_ERR_DB_MISSING_CMP)
if opt.span_key_size and opt.span_key_size < DEFAULT_SPAN_KEY_SIZE:
parser.error(
PARSE_ERR_SPAN_KEY_SIZE_TOO_LOW.format(
s_value=opt.span_key_size, default=DEFAULT_SPAN_KEY_SIZE))
# Operations to perform:
# 1) databases exist
# 2) check object counts
# 3) check object differences
# 4) check row counts among the tables
# 5) check table data consistency
res = True
check_failed = False
arg_regexp = re.compile(r'{0}(?:(?:\:){0})?'.format(REGEXP_OBJ_NAME))
for db in args:
# Split the database names considering backtick quotes
grp = arg_regexp.match(db)
if not grp:
parser.error(PARSE_ERR_DB_PAIR.format(db_pair=db,
db1_label='db1',
db2_label='db2'))
parts = grp.groups()
matched_size = len(parts[0])
if not parts[1]:
parts = (parts[0], parts[0])
else:
# add 1 for the separator ':'
matched_size = matched_size + 1
matched_size = matched_size + len(parts[1])
# Verify if the size of the databases matched by the REGEX is equal
# to the initial specified string. In general, this identifies the
# missing use of backticks.
if matched_size != len(db):
parser.error(PARSE_ERR_DB_PAIR_EXT.format(db_pair=db,
db1_label='db1',
db2_label='db2',
db1_value=parts[0],
db2_value=parts[1]))
# Remove backtick quotes (handled later)
db1 = remove_backtick_quoting(parts[0]) \
if is_quoted_with_backticks(parts[0]) else parts[0]
db2 = remove_backtick_quoting(parts[1]) \
if is_quoted_with_backticks(parts[1]) else parts[1]
try:
res = database_compare(server1_values, server2_values,
db1, db2, options)
print
except UtilError:
_, e, _ = sys.exc_info()
print("ERROR: %s" % e.errmsg)
sys.exit(1)
if not res:
check_failed = True
if check_failed and not opt.run_all_tests:
break
if not opt.quiet:
print
if check_failed:
print("# Database consistency check failed.")
else:
sys.stdout.write("# Databases are consistent")
if (opt.no_object_check or opt.no_diff or
opt.no_row_count or opt.no_data or opt.skip_tbl_opts):
sys.stdout.write(" given skip options specified")
print(".")
print("#\n# ...done")
if check_failed:
sys.exit(1)
sys.exit()
|
scavarda/mysql-dbcompare
|
mysql-utilities-1.6.0/scripts/mysqldbcompare.py
|
Python
|
apache-2.0
| 13,042
|
#!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate fixtures."""
import os
import json
from numpy.random import rand
from scipy.stats import geom
# Get the file path:
FILE = os.path.realpath(__file__)
# Extract the directory in which this file resides:
DIR = os.path.dirname(FILE)
def gen(p, name):
"""Generate fixture data and write to file.
# Arguments
* `p`: success probability
* `name::str`: output filename
# Examples
``` python
python> p = rand(1000)
python> gen(p, './data.json')
```
"""
y = list()
for i in p:
y.append(geom.std(i))
# Store data to be written to file as a dictionary:
data = {
"p": p.tolist(),
"expected": y
}
# Based on the script directory, create an output filepath:
filepath = os.path.join(DIR, name)
# Write the data to the output filepath as JSON:
with open(filepath, "w") as outfile:
json.dump(data, outfile)
def main():
"""Generate fixture data."""
p = rand(1000)
gen(p, "data.json")
if __name__ == "__main__":
main()
|
stdlib-js/stdlib
|
lib/node_modules/@stdlib/stats/base/dists/geometric/stdev/test/fixtures/python/runner.py
|
Python
|
apache-2.0
| 1,687
|
#!/usr/bin/env python3
# Send DHT22 sensor data periodically to AWS IoT.
import time
import datetime
import ssl
import json
import paho.mqtt.client as mqtt
import dht22
import pigpio
# TODO: Change this to the name of our Raspberry Pi, also known as our "Thing Name"
deviceName = "g88pi"
# Public certificate of our Raspberry Pi, as provided by AWS IoT.
deviceCertificate = "tp-iot-certificate.pem.crt"
# Private key of our Raspberry Pi, as provided by AWS IoT.
devicePrivateKey = "tp-iot-private.pem.key"
# Root certificate to authenticate AWS IoT when we connect to their server.
awsCert = "aws-iot-rootCA.crt"
isConnected = False
# Assume we connected the DHT22 Sensor, YwRobot Light Sensor, L-934ID-5V LED as follows:
# DHT22/AM2302 --> Raspberry Pi:
# + --> GPIO 8
# Out --> GPIO 22
# - --> Ground (Pin 14)
power = 8
temp_sensor = 22
# YwRobot Light Sensor --> Raspberry Pi:
# Ground --> Ground (Pin 9)
# VCC --> 3.3V Power (Pin 1)
# DOUT --> GPIO 4
light_sensor = 4
# L-934ID-5V LED --> Raspberry Pi
# + --> GPIO 25
# Ground --> Ground (Pin 20)
led = 25
# This is the main logic of the program. We connect to AWS IoT via MQTT, send sensor data periodically to AWS IoT,
# and handle any actuation commands received from AWS IoT.
def main():
global isConnected
# Create an MQTT client for connecting to AWS IoT via MQTT.
client = mqtt.Client(deviceName + "_sr") # Client ID must be unique because AWS will disconnect any duplicates.
client.on_connect = on_connect # When connected, call on_connect.
client.on_message = on_message # When message received, call on_message.
client.on_log = on_log # When logging debug messages, call on_log.
# Set the certificates and private key for connecting to AWS IoT. TLS 1.2 is mandatory for AWS IoT and is supported
# only in Python 3.4 and later, compiled with OpenSSL 1.0.1 and later.
client.tls_set(awsCert, deviceCertificate, devicePrivateKey, ssl.CERT_REQUIRED, ssl.PROTOCOL_TLSv1_2)
# Connect to AWS IoT server. Use AWS command line "aws iot describe-endpoint" to get the address.
print("Connecting to AWS IoT...")
client.connect("A1P01IYM2DOZA0.iot.us-west-2.amazonaws.com", 8883, 60)
# Start a background thread to process the MQTT network commands concurrently, including auto-reconnection.
client.loop_start()
# Prepare the DHT22 sensor. Ensure we don't read from the DHT22 within 2 seconds, else it will eventually hang.
dht22_sensor = dht22.Sensor(pigpio.pi(), temp_sensor, power=power)
# Loop forever.
while True:
try:
# If we are not connected yet to AWS IoT, wait 1 second and try again.
if not isConnected:
time.sleep(1)
continue
# Read DHT22 sensor values. Skip if we detect an error.
dht22_sensor.trigger()
if dht22_sensor.bad_checksum() + dht22_sensor.short_message() + dht22_sensor.missing_message() + \
dht22_sensor.sensor_resets() != 0 or dht22_sensor.temperature() < 0 or dht22_sensor.humidity() < 0:
print(("DHT22 may be connected incorrectly: temperature={:3.1f}, humidity={:3.1f}, bad_checksum={}, " +
"short_message={}, missing_message={}, sensor_resets={}")
.format(dht22_sensor.temperature(), dht22_sensor.humidity(), dht22_sensor.bad_checksum(),
dht22_sensor.short_message(), dht22_sensor.missing_message(),
dht22_sensor.sensor_resets()))
continue
# Prepare our sensor data in JSON format.
payload = {
"state": {
"reported": {
"temperature": round(dht22_sensor.temperature(), 1),
"humidity": round(dht22_sensor.humidity(), 1),
"timestamp": datetime.datetime.now().isoformat()
}
}
}
print("Sending sensor data to AWS IoT...\n" +
json.dumps(payload, indent=4, separators=(',', ': ')))
# Publish our sensor data to AWS IoT via the MQTT topic, also known as updating our "Thing Shadow".
client.publish("$aws/things/" + deviceName + "/shadow/update", json.dumps(payload))
print("Sent to AWS IoT")
# Wait 30 seconds before sending the next set of sensor data.
time.sleep(30)
except KeyboardInterrupt:
# Stop the program when we press Ctrl-C.
break
except Exception as e:
# For all other errors, we wait a while and resume.
print("Exception: " + str(e))
time.sleep(10)
continue
# This is called when we are connected to AWS IoT via MQTT.
# We subscribe for notifications of desired state updates.
def on_connect(client, userdata, flags, rc):
global isConnected
isConnected = True
print("Connected to AWS IoT")
# Subscribe to our MQTT topic so that we will receive notifications of updates.
topic = "$aws/things/" + deviceName + "/shadow/update/accepted"
print("Subscribing to MQTT topic " + topic)
client.subscribe(topic)
# This is called when we receive a subscription notification from AWS IoT.
# If this is an actuation command, we execute it.
def on_message(client, userdata, msg):
# Convert the JSON payload to a Python dictionary.
# The payload is in binary format so we need to decode as UTF-8.
payload2 = json.loads(msg.payload.decode("utf-8"))
print("Received message, topic: " + msg.topic + ", payload:\n" +
json.dumps(payload2, indent=4, separators=(',', ': ')))
# Print out log messages for tracing.
def on_log(client, userdata, level, buf):
print("Log: " + buf)
# Start the main program.
main()
|
lupyuen/RaspberryPiImage
|
home/pi/TP-IoT/send_simple_sensor_data.py
|
Python
|
apache-2.0
| 5,884
|
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific NFS driver module."""
import itertools
import os
import shutil
import unittest
from lxml import etree
import mock
from mox3 import mox as mox_lib
import six
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
fake_api as netapp_api)
from cinder import utils as cinder_utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import common
from cinder.volume.drivers.netapp.dataontap import (nfs_7mode
as netapp_nfs_7mode)
from cinder.volume.drivers.netapp.dataontap import (nfs_cmode
as netapp_nfs_cmode)
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp import utils
from oslo_config import cfg
CONF = cfg.CONF
CONNECTION_INFO = {
'hostname': 'fake_host',
'transport_type': 'https',
'port': 443,
'username': 'admin',
'password': 'passw0rd',
}
FAKE_CONNECTION_INFO_HTTP = {
'hostname': '127.0.0.1',
'transport_type': 'http',
'port': None,
'username': 'admin',
'password': 'pass',
'vserver': 'openstack',
}
FAKE_CONNECTION_INFO_HTTPS = dict(FAKE_CONNECTION_INFO_HTTP,
transport_type='https')
FAKE_7MODE_CONNECTION_INFO_HTTP = dict(FAKE_CONNECTION_INFO_HTTP)
FAKE_7MODE_CONNECTION_INFO_HTTP.pop('vserver')
FAKE_7MODE_CONNECTION_INFO_HTTP['vfiler'] = 'test_vfiler'
FAKE_7MODE_CONNECTION_INFO_HTTPS = dict(FAKE_7MODE_CONNECTION_INFO_HTTP,
transport_type='https')
SEVEN_MODE_CONNECTION_INFO = dict(
itertools.chain(CONNECTION_INFO.items(),
{'vfiler': 'test_vfiler'}.items()))
FAKE_VSERVER = 'fake_vserver'
def create_configuration():
configuration = mox_lib.MockObject(conf.Configuration)
configuration.append_config_values(mox_lib.IgnoreArg())
configuration.max_over_subscription_ratio = 20.0
configuration.reserved_percentage = 0
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
configuration.nas_mount_options = None
configuration.nfs_used_ratio = .95
configuration.nfs_oversub_ratio = 1.0
configuration.netapp_server_hostname = CONNECTION_INFO['hostname']
configuration.netapp_transport_type = CONNECTION_INFO['transport_type']
configuration.netapp_server_port = CONNECTION_INFO['port']
configuration.netapp_login = CONNECTION_INFO['username']
configuration.netapp_password = CONNECTION_INFO['password']
configuration.netapp_vfiler = SEVEN_MODE_CONNECTION_INFO['vfiler']
return configuration
class FakeVolume(object):
def __init__(self, host='', size=0):
self.size = size
self.id = hash(self)
self.name = None
self.host = host
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
class FakeSnapshot(object):
def __init__(self, volume_size=0):
self.volume_name = None
self.name = None
self.volume_id = None
self.volume_size = volume_size
self.user_id = None
self.status = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeResponse(object):
def __init__(self, status):
"""Initialize FakeResponse.
:param status: Either 'failed' or 'passed'
"""
self.Status = status
if status == 'failed':
self.Reason = 'Sample error'
class NetAppCmodeNfsDriverTestCase(test.TestCase):
"""Test direct NetApp C Mode driver."""
TEST_NFS_HOST = 'nfs-host1'
TEST_NFS_SHARE_PATH = '/export'
TEST_NFS_EXPORT1 = '%s:%s' % (TEST_NFS_HOST, TEST_NFS_SHARE_PATH)
TEST_NFS_EXPORT2 = 'nfs-host2:/export'
TEST_MNT_POINT = '/mnt/nfs'
def setUp(self):
super(NetAppCmodeNfsDriverTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
self.mock_object(utils, 'OpenStackInfo')
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
# Inject fake netapp_lib module classes.
netapp_api.mock_netapp_lib([client_cmode, client_base])
self.mock_object(common.na_utils, 'check_netapp_lib')
self.mock_object(nfs_base, 'LOG')
self._driver = netapp_nfs_cmode.NetAppCmodeNfsDriver(**kwargs)
self._driver.zapi_client = mock.Mock()
config = self._driver.configuration
config.netapp_vserver = FAKE_VSERVER
def test_create_snapshot(self):
"""Test snapshot can be created and deleted."""
mox = self.mox
drv = self._driver
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
drv._clone_backing_file_for_volume(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Tests volume creation from snapshot."""
drv = self._driver
mox = self.mox
location = '127.0.0.1:/nfs'
host = 'hostname@backend#' + location
volume = FakeVolume(host, 1)
snapshot = FakeSnapshot(1)
expected_result = {'provider_location': location}
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
drv._clone_backing_file_for_volume(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
drv._get_volume_location(mox_lib.IgnoreArg()).AndReturn(location)
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions(mox_lib.IgnoreArg())
mox.ReplayAll()
self.mock_object(drv, '_do_qos_for_volume')
self.mock_object(utils, 'get_volume_extra_specs')
loc = drv.create_volume_from_snapshot(volume, snapshot)
self.assertEqual(expected_result, loc)
mox.VerifyAll()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(mox_lib.IgnoreArg())
drv._get_provider_location(mox_lib.IgnoreArg())
drv._volume_not_present(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._execute('rm', None, run_as_root=True)
drv._post_prov_deprov_in_ssc(mox_lib.IgnoreArg())
mox.ReplayAll()
return mox
def test_delete_existing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(True)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_delete_missing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(False)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup')
@mock.patch.object(client_cmode.Client, '__init__', return_value=None)
def test_do_setup(self, mock_client_init, mock_super_do_setup):
context = mock.Mock()
self._driver.do_setup(context)
mock_client_init.assert_called_once_with(vserver=FAKE_VSERVER,
**CONNECTION_INFO)
mock_super_do_setup.assert_called_once_with(context)
@mock.patch.object(nfs_base.NetAppNfsDriver, 'check_for_setup_error')
@mock.patch.object(ssc_cmode, 'check_ssc_api_permissions')
def test_check_for_setup_error(self, mock_ssc_api_permission_check,
mock_super_check_for_setup_error):
self._driver.zapi_client = mock.Mock()
self._driver.check_for_setup_error()
mock_ssc_api_permission_check.assert_called_once_with(
self._driver.zapi_client)
mock_super_check_for_setup_error.assert_called_once_with()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
drv.zapi_client = mox.CreateMockAnything()
mox.StubOutWithMock(drv, '_get_host_ip')
mox.StubOutWithMock(drv, '_get_export_path')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
drv.zapi_client.get_if_info_by_ip('127.0.0.1').AndReturn(
self._prepare_info_by_ip_response())
drv.zapi_client.get_vol_by_junc_vserver('openstack', '/nfs').AndReturn(
'nfsvol')
drv.zapi_client.clone_file('nfsvol', 'volume_name', 'clone_name',
'openstack')
drv._get_host_ip(mox_lib.IgnoreArg()).AndReturn('127.0.0.1')
drv._get_export_path(mox_lib.IgnoreArg()).AndReturn('/nfs')
drv._post_prov_deprov_in_ssc(mox_lib.IgnoreArg())
return mox
def _prepare_info_by_ip_response(self):
res = """<attributes-list>
<net-interface-info>
<address>127.0.0.1</address>
<administrative-status>up</administrative-status>
<current-node>fas3170rre-cmode-01</current-node>
<current-port>e1b-1165</current-port>
<data-protocols>
<data-protocol>nfs</data-protocol>
</data-protocols>
<dns-domain-name>none</dns-domain-name>
<failover-group/>
<failover-policy>disabled</failover-policy>
<firewall-policy>data</firewall-policy>
<home-node>fas3170rre-cmode-01</home-node>
<home-port>e1b-1165</home-port>
<interface-name>nfs_data1</interface-name>
<is-auto-revert>false</is-auto-revert>
<is-home>true</is-home>
<netmask>255.255.255.0</netmask>
<netmask-length>24</netmask-length>
<operational-status>up</operational-status>
<role>data</role>
<routing-group-name>c10.63.165.0/24</routing-group-name>
<use-failover-group>disabled</use-failover-group>
<vserver>openstack</vserver>
</net-interface-info></attributes-list>"""
response_el = etree.XML(res)
return netapp_api.NaElement(response_el).get_children()
def test_clone_backing_file_for_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + six.text_type(hash(volume_name))
share = 'ip:/share'
drv._clone_backing_file_for_volume(volume_name, clone_name, volume_id,
share)
mox.VerifyAll()
def test_register_img_in_cache_noshare(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_register_img_in_cache_with_share(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_find_image_in_cache_no_shares(self):
drv = self._driver
drv._mounted_shares = []
result = drv._find_image_in_cache('image_id')
if not result:
pass
else:
self.fail('Return result is unexpected')
def test_find_image_in_cache_shares(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(os.path, 'exists')
drv._get_mount_point_for_share('testshare').AndReturn('/mnt')
os.path.exists('/mnt/img-cache-id').AndReturn(True)
mox.ReplayAll()
result = drv._find_image_in_cache('id')
(share, file_name) = result[0]
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if (share == 'testshare' and file_name == 'img-cache-id'):
pass
else:
self.fail('Return result is unexpected')
def test_find_old_cache_files_notexists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', 720)
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((None, ''))
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == 0:
pass
else:
self.fail('No files expected but got return values.')
def test_find_old_cache_files_exists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', '720')
files = '/mnt/img-id1\n/mnt/img-id2\n'
r_files = ['img-id1', 'img-id2']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_shortlist_del_eligible_files')
drv._get_mount_point_for_share('share').AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((files, None))
drv._shortlist_del_eligible_files(
mox_lib.IgnoreArg(), r_files).AndReturn(r_files)
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == len(r_files):
for f in res:
r_files.remove(f)
else:
self.fail('Returned files not same as expected.')
def test_delete_files_till_bytes_free_success(self):
drv = self._driver
mox = self.mox
files = [('img-cache-1', 230), ('img-cache-2', 380)]
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_delete_file_at_path')
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._delete_file_at_path('/mnt/img-cache-2').AndReturn(True)
drv._delete_file_at_path('/mnt/img-cache-1').AndReturn(True)
mox.ReplayAll()
drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024)
mox.VerifyAll()
def test_clean_image_cache_exec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_find_old_cache_files')
mox.StubOutWithMock(drv, '_delete_files_till_bytes_free')
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 19))
drv._find_old_cache_files('testshare').AndReturn(['f1', 'f2'])
drv._delete_files_till_bytes_free(
['f1', 'f2'], 'testshare', bytes_to_free=31)
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clean_image_cache_noexec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 30, 70))
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clone_image_fromcache(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
mox.StubOutWithMock(drv, '_post_clone_image')
mox.StubOutWithMock(drv, '_is_share_clone_compatible')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn(
[('share', 'file_name')])
drv._is_share_clone_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name')
drv._post_clone_image(volume)
mox.ReplayAll()
drv.clone_image('',
volume,
('image_location', None),
{'id': 'image_id'}, '')
mox.VerifyAll()
def get_img_info(self, format):
class img_info(object):
def __init__(self, fmt):
self.file_format = fmt
return img_info(format)
def test_clone_image_cloneableshare_nospace(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_is_share_clone_compatible')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share(
mox_lib.IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_clone_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(False)
mox.ReplayAll()
(prop, cloned) = drv.clone_image(
'',
volume,
('nfs://127.0.0.1:/share/img-id', None),
{'id': 'image_id'},
'')
mox.VerifyAll()
if not cloned and not prop['provider_location']:
pass
else:
self.fail('Expected not cloned, got cloned.')
def test_clone_image_cloneableshare_raw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(drv, '_is_share_clone_compatible')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share(
mox_lib.IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_clone_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._clone_backing_file_for_volume(
'img-id', 'vol', share='127.0.0.1:/share', volume_id=None)
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, mox_lib.IgnoreArg())
mox.ReplayAll()
drv.clone_image(
'',
volume,
('nfs://127.0.0.1:/share/img-id', None),
{'id': 'image_id'},
'')
mox.VerifyAll()
def test_clone_image_cloneableshare_notraw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_clone_compatible')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_clone_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('notraw'))
image_utils.convert_image(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(),
'raw', run_as_root=True)
image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._register_image_in_cache(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, mox_lib.IgnoreArg())
mox.ReplayAll()
drv.clone_image(
'',
volume,
('nfs://127.0.0.1/share/img-id', None),
{'id': 'image_id'},
'')
mox.VerifyAll()
def test_clone_image_file_not_discovered(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_clone_compatible')
mox.StubOutWithMock(drv, '_do_qos_for_volume')
mox.StubOutWithMock(drv, 'local_path')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_clone_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('notraw'))
image_utils.convert_image(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(),
'raw', run_as_root=True)
image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._register_image_in_cache(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
drv._do_qos_for_volume(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(False)
mox.ReplayAll()
vol_dict, result = drv.clone_image(
'',
volume,
('nfs://127.0.0.1/share/img-id', None),
{'id': 'image_id'},
'')
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_clone_image_resizefails(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_do_qos_for_volume')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_clone_compatible')
mox.StubOutWithMock(drv, 'local_path')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_clone_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('notraw'))
image_utils.convert_image(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(), 'raw',
run_as_root=True)
image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._register_image_in_cache(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
drv._do_qos_for_volume(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions('/mnt/vol')
drv._resize_image_file(
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndRaise(exception.InvalidResults())
mox.ReplayAll()
vol_dict, result = drv.clone_image(
'',
volume,
('nfs://127.0.0.1/share/img-id', None),
{'id': 'image_id'},
'')
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_is_cloneable_share_badformats(self):
drv = self._driver
strgs = ['10.61.666.22:/share/img',
'nfs://10.61.666.22:/share/img',
'nfs://10.61.666.22//share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com//share/img',
'com.netapp.com://share/im\g',
'http://com.netapp.com://share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com:8080//share/img'
'nfs://com.netapp.com//img',
'nfs://[ae::sr::ty::po]/img']
for strg in strgs:
res = drv._is_cloneable_share(strg)
if res:
msg = 'Invalid format matched for url %s.' % strg
self.fail(msg)
def test_is_cloneable_share_goodformat1(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat2(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat3(self):
drv = self._driver
mox = self.mox
strg = 'nfs://com.netapp:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat4(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat5(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_check_share_in_use_no_conn(self):
drv = self._driver
share = drv._check_share_in_use(None, '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_invalid_conn(self):
drv = self._driver
share = drv._check_share_in_use(':8989', '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_incorrect_host(self):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(utils, 'resolve_hostname')
utils.resolve_hostname(mox_lib.IgnoreArg()).AndRaise(Exception())
mox.ReplayAll()
share = drv._check_share_in_use('incorrect:8989', '/dir')
mox.VerifyAll()
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_success(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['127.0.0.1:/dir/share']
mox.StubOutWithMock(utils, 'resolve_hostname')
mox.StubOutWithMock(drv, '_share_match_for_ip')
utils.resolve_hostname(mox_lib.IgnoreArg()).AndReturn('10.22.33.44')
drv._share_match_for_ip(
'10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share')
mox.ReplayAll()
share = drv._check_share_in_use('127.0.0.1:8989', '/dir/share')
mox.VerifyAll()
if not share:
self.fail('Expected share not detected')
def test_construct_image_url_loc(self):
drv = self._driver
img_loc = (None,
# Valid metdata
[{'metadata':
{'share_location': 'nfs://host/path',
'mountpoint': '/opt/stack/data/glance',
'id': 'abc-123',
'type': 'nfs'},
'url': 'file:///opt/stack/data/glance/image-id-0'},
# missing metadata
{'metadata': {},
'url': 'file:///opt/stack/data/glance/image-id-1'},
# missing location_type
{'metadata': {'location_type': None},
'url': 'file:///opt/stack/data/glance/image-id-2'},
# non-nfs location_type
{'metadata': {'location_type': 'not-NFS'},
'url': 'file:///opt/stack/data/glance/image-id-3'},
# missing share_location
{'metadata': {'location_type': 'nfs',
'share_location': None},
'url': 'file:///opt/stack/data/glance/image-id-4'},
# missing mountpoint
{'metadata': {'location_type': 'nfs',
'share_location': 'nfs://host/path',
# Pre-kilo we documented "mount_point"
'mount_point': '/opt/stack/data/glance'},
'url': 'file:///opt/stack/data/glance/image-id-5'},
# Valid metadata
{'metadata':
{'share_location': 'nfs://host/path',
'mountpoint': '/opt/stack/data/glance',
'id': 'abc-123',
'type': 'nfs'},
'url': 'file:///opt/stack/data/glance/image-id-6'}])
locations = drv._construct_image_nfs_url(img_loc)
self.assertIn("nfs://host/path/image-id-0", locations)
self.assertIn("nfs://host/path/image-id-6", locations)
self.assertEqual(2, len(locations))
def test_construct_image_url_direct(self):
drv = self._driver
img_loc = ("nfs://host/path/image-id", None)
locations = drv._construct_image_nfs_url(img_loc)
self.assertIn("nfs://host/path/image-id", locations)
def test_get_pool(self):
pool = self._driver.get_pool({'provider_location': 'fake-share'})
self.assertEqual('fake-share', pool)
def _set_config(self, configuration):
configuration.netapp_storage_family = 'ontap_cluster'
configuration.netapp_storage_protocol = 'nfs'
configuration.netapp_login = 'admin'
configuration.netapp_password = 'pass'
configuration.netapp_server_hostname = '127.0.0.1'
configuration.netapp_transport_type = 'http'
configuration.netapp_server_port = None
configuration.netapp_vserver = 'openstack'
configuration.nfs_shares_config = '/nfs'
return configuration
@mock.patch.object(utils, 'get_volume_extra_specs')
def test_check_volume_type_mismatch(self, get_specs):
if not hasattr(self._driver, 'vserver'):
return unittest.skip("Test only applies to cmode driver")
get_specs.return_value = {'thin_volume': 'true'}
self._driver._is_share_vol_type_match = mock.Mock(return_value=False)
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self._driver._check_volume_type, 'vol',
'share', 'file')
get_specs.assert_called_once_with('vol')
self._driver._is_share_vol_type_match.assert_called_once_with(
'vol', 'share', 'file')
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_all_default(self):
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_http_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'http'
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_https_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTPS)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_http_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_server_port = 81
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
FAKE_CONN_INFO_PORT_HTTP = dict(FAKE_CONNECTION_INFO_HTTP, port=81)
mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_https_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
configuration.netapp_server_port = 446
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
FAKE_CONN_INFO_PORT_HTTPS = dict(FAKE_CONNECTION_INFO_HTTPS, port=446)
mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTPS)
@mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11')
def test_convert_vol_ref_share_name_to_share_ip(self, mock_hostname):
drv = self._driver
share = "%s/%s" % (self.TEST_NFS_EXPORT1, 'test_file_name')
modified_share = '10.12.142.11:/export/test_file_name'
modified_vol_ref = drv._convert_vol_ref_share_name_to_share_ip(share)
self.assertEqual(modified_share, modified_vol_ref)
@mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11')
@mock.patch.object(os.path, 'isfile', return_value=True)
def test_get_share_mount_and_vol_from_vol_ref(self, mock_isfile,
mock_hostname):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, 'test_file_name')
vol_ref = {'source-name': vol_path}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
(share, mount, file_path) = \
drv._get_share_mount_and_vol_from_vol_ref(vol_ref)
self.assertEqual(self.TEST_NFS_EXPORT1, share)
self.assertEqual(self.TEST_MNT_POINT, mount)
self.assertEqual('test_file_name', file_path)
@mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11')
def test_get_share_mount_and_vol_from_vol_ref_with_bad_ref(self,
mock_hostname):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
vol_ref = {'source-id': '1234546'}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
self.assertRaises(exception.ManageExistingInvalidReference,
drv._get_share_mount_and_vol_from_vol_ref, vol_ref)
@mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11')
def test_get_share_mount_and_vol_from_vol_ref_where_not_found(self,
mock_host):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT2, 'test_file_name')
vol_ref = {'source-name': vol_path}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
self.assertRaises(exception.ManageExistingInvalidReference,
drv._get_share_mount_and_vol_from_vol_ref, vol_ref)
@mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11')
def test_get_share_mount_and_vol_from_vol_ref_where_is_dir(self,
mock_host):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
vol_ref = {'source-name': self.TEST_NFS_EXPORT2}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
self.assertRaises(exception.ManageExistingInvalidReference,
drv._get_share_mount_and_vol_from_vol_ref, vol_ref)
@mock.patch.object(cinder_utils, 'get_file_size', return_value=1073741824)
def test_manage_existing_get_size(self, get_file_size):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
test_file = 'test_file_name'
volume = FakeVolume()
volume['name'] = 'file-new-managed-123'
volume['id'] = 'volume-new-managed-123'
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file)
vol_ref = {'source-name': vol_path}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._get_share_mount_and_vol_from_vol_ref = mock.Mock(
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
vol_size = drv.manage_existing_get_size(volume, vol_ref)
self.assertEqual(1, vol_size)
@mock.patch.object(cinder_utils, 'get_file_size', return_value=1074253824)
def test_manage_existing_get_size_round_up(self, get_file_size):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
test_file = 'test_file_name'
volume = FakeVolume()
volume['name'] = 'file-new-managed-123'
volume['id'] = 'volume-new-managed-123'
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file)
vol_ref = {'source-name': vol_path}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._get_share_mount_and_vol_from_vol_ref = mock.Mock(
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
vol_size = drv.manage_existing_get_size(volume, vol_ref)
self.assertEqual(2, vol_size)
@mock.patch.object(cinder_utils, 'get_file_size', return_value='badfloat')
def test_manage_existing_get_size_error(self, get_size):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
test_file = 'test_file_name'
volume = FakeVolume()
volume['name'] = 'file-new-managed-123'
volume['id'] = 'volume-new-managed-123'
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file)
vol_ref = {'source-name': vol_path}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._get_share_mount_and_vol_from_vol_ref = mock.Mock(
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
self.assertRaises(exception.VolumeBackendAPIException,
drv.manage_existing_get_size, volume, vol_ref)
@mock.patch.object(cinder_utils, 'get_file_size', return_value=1074253824)
def test_manage_existing(self, get_file_size):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
test_file = 'test_file_name'
volume = FakeVolume()
volume['name'] = 'file-new-managed-123'
volume['id'] = 'volume-new-managed-123'
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file)
vol_ref = {'source-name': vol_path}
drv._check_volume_type = mock.Mock()
self.stubs.Set(drv, '_execute', mock.Mock())
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._get_share_mount_and_vol_from_vol_ref = mock.Mock(
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
shutil.move = mock.Mock()
mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs')
mock_get_specs.return_value = {}
self.mock_object(drv, '_do_qos_for_volume')
location = drv.manage_existing(volume, vol_ref)
self.assertEqual(self.TEST_NFS_EXPORT1, location['provider_location'])
drv._check_volume_type.assert_called_once_with(
volume, self.TEST_NFS_EXPORT1, test_file, {})
@mock.patch.object(cinder_utils, 'get_file_size', return_value=1074253824)
def test_manage_existing_move_fails(self, get_file_size):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
test_file = 'test_file_name'
volume = FakeVolume()
volume['name'] = 'volume-new-managed-123'
volume['id'] = 'volume-new-managed-123'
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file)
vol_ref = {'source-name': vol_path}
mock_check_volume_type = drv._check_volume_type = mock.Mock()
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._get_share_mount_and_vol_from_vol_ref = mock.Mock(
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
drv._execute = mock.Mock(side_effect=OSError)
mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs')
mock_get_specs.return_value = {}
self.mock_object(drv, '_do_qos_for_volume')
self.assertRaises(exception.VolumeBackendAPIException,
drv.manage_existing, volume, vol_ref)
mock_check_volume_type.assert_called_once_with(
volume, self.TEST_NFS_EXPORT1, test_file, {})
@mock.patch.object(nfs_base, 'LOG')
def test_unmanage(self, mock_log):
drv = self._driver
self.mock_object(utils, 'get_valid_qos_policy_group_info')
volume = FakeVolume()
volume['id'] = '123'
volume['provider_location'] = '/share'
drv.unmanage(volume)
self.assertEqual(1, mock_log.info.call_count)
class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
"""Test direct NetApp C Mode driver only and not inherit."""
def setUp(self):
super(NetAppCmodeNfsDriverOnlyTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
self.mock_object(utils, 'OpenStackInfo')
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs_cmode.NetAppCmodeNfsDriver(**kwargs)
self._driver.ssc_enabled = True
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
self._driver.zapi_client = mock.Mock()
self.mock_object(netapp_nfs_cmode, 'LOG')
self._fake_empty_qos_policy_group_info = {
'legacy': None,
'spec': None,
}
self._fake_legacy_qos_policy_group_info = {
'legacy': {
'policy_name': 'qos_policy_1'
},
'spec': None,
}
@mock.patch.object(utils, 'LOG', mock.Mock())
def test_create_volume(self):
drv = self._driver
drv.ssc_enabled = False
fake_extra_specs = {}
fake_share = 'localhost:myshare'
host = 'hostname@backend#' + fake_share
mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs')
mock_get_specs.return_value = fake_extra_specs
self.mock_object(drv, '_ensure_shares_mounted')
self.mock_object(drv, '_do_create_volume')
mock_get_qos_info =\
self.mock_object(utils, 'get_valid_qos_policy_group_info')
mock_get_qos_info.return_value = self._fake_empty_qos_policy_group_info
volume_info = self._driver.create_volume(FakeVolume(host, 1))
self.assertEqual(fake_share, volume_info.get('provider_location'))
self.assertEqual(0, utils.LOG.warning.call_count)
def test_create_volume_no_pool_specified(self):
drv = self._driver
drv.ssc_enabled = False
host = 'hostname@backend' # missing pool
with mock.patch.object(drv, '_ensure_shares_mounted'):
self.assertRaises(exception.InvalidHost,
self._driver.create_volume, FakeVolume(host, 1))
def test_create_volume_with_legacy_qos_policy(self):
drv = self._driver
drv.ssc_enabled = False
fake_extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
fake_share = 'localhost:myshare'
host = 'hostname@backend#' + fake_share
fake_volume = FakeVolume(host, 1)
mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs')
mock_get_specs.return_value = fake_extra_specs
mock_get_qos_info =\
self.mock_object(utils, 'get_valid_qos_policy_group_info')
mock_get_qos_info.return_value =\
self._fake_legacy_qos_policy_group_info
self.mock_object(drv, '_ensure_shares_mounted')
self.mock_object(drv, '_do_create_volume')
mock_set_qos = self.mock_object(drv, '_set_qos_policy_group_on_volume')
volume_info = self._driver.create_volume(fake_volume)
self.assertEqual('localhost:myshare',
volume_info.get('provider_location'))
mock_set_qos.assert_called_once_with(
fake_volume, self._fake_legacy_qos_policy_group_info)
def test_copy_img_to_vol_copyoffload_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_failure(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock(side_effect=Exception())
nfs_base.NetAppNfsDriver.copy_image_to_volume = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
nfs_base.NetAppNfsDriver.copy_image_to_volume.\
assert_called_once_with(context, volume, image_service, image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_nonexistent_binary_path(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = mock.Mock()
image_service.get_location.return_value = (mock.Mock(), mock.Mock())
image_service.show.return_value = {'size': 0}
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._construct_image_nfs_url = mock.Mock(return_value=["nfs://1"])
drv._check_get_nfs_path_segs = mock.Mock(return_value=("test:test",
"dr"))
drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.1268.1.1")
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._get_host_ip = mock.Mock()
drv._get_provider_location = mock.Mock()
drv._get_export_path = mock.Mock(return_value="dr")
drv._check_share_can_hold_size = mock.Mock()
# Raise error as if the copyoffload file can not be found
drv._clone_file_dst_exists = mock.Mock(side_effect=OSError())
# Verify the original error is propagated
self.assertRaises(OSError, drv._try_copyoffload,
context, volume, image_service, image_id)
def test_copyoffload_frm_cache_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')])
drv._copy_from_cache = mock.Mock(return_value=True)
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_cache.assert_called_once_with(volume,
image_id,
[('share', 'img')])
def test_copyoffload_frm_img_service_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._copy_from_img_service = mock.Mock()
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_img_service.assert_called_once_with(context,
volume,
image_service,
image_id)
def test_cache_copyoffload_workflow_success(self):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
cache_result = [('ip1:/openstack', 'img-cache-imgid')]
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._execute = mock.Mock()
drv._register_image_in_cache = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='/share')
drv._post_clone_image = mock.Mock()
copied = drv._copy_from_cache(volume, image_id, cache_result)
self.assertTrue(copied)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._execute.assert_called_once_with('cof_path', 'ip1', 'ip1',
'/openstack/img-cache-imgid',
'/exp_path/name',
run_as_root=False,
check_exit_code=0)
drv._post_clone_image.assert_called_with(volume)
drv._get_provider_location.assert_called_with('vol_id')
@mock.patch.object(image_utils, 'qemu_img_info')
def test_img_service_raw_copyoffload_workflow_success(self,
mock_qemu_img_info):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'raw'}
drv._check_get_nfs_path_segs =\
mock.Mock(return_value=('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._discover_file_till_timeout = mock.Mock(return_value=True)
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file_at_path = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert drv._execute.call_count == 1
drv._post_clone_image.assert_called_with(volume)
@mock.patch.object(image_utils, 'convert_image')
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch('os.path.exists')
def test_img_service_qcow2_copyoffload_workflow_success(self, mock_exists,
mock_qemu_img_info,
mock_cvrt_image):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'qcow2'}
drv._check_get_nfs_path_segs =\
mock.Mock(return_value=('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file_at_path = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert mock_cvrt_image.call_count == 1
assert drv._execute.call_count == 1
assert drv._delete_file_at_path.call_count == 2
drv._clone_file_dst_exists.call_count == 1
drv._post_clone_image.assert_called_with(volume)
class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase):
"""Test direct NetApp 7 Mode driver."""
def _custom_setup(self):
self.mock_object(utils, 'OpenStackInfo')
# Inject fake netapp_lib module classes.
netapp_api.mock_netapp_lib([client_cmode, client_base])
self.mock_object(common.na_utils, 'check_netapp_lib')
self.mock_object(common.na_utils, 'LOG')
self.mock_object(nfs_base, 'LOG')
self._driver = netapp_nfs_7mode.NetApp7modeNfsDriver(
configuration=create_configuration())
self._driver.zapi_client = mock.Mock()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(mox_lib.IgnoreArg())
drv._volume_not_present(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._execute('rm', None, run_as_root=True)
mox.ReplayAll()
return mox
def test_create_volume_no_pool_specified(self):
drv = self._driver
drv.ssc_enabled = False
host = 'hostname@backend' # missing pool
with mock.patch.object(drv, '_ensure_shares_mounted'):
self.assertRaises(exception.InvalidHost,
self._driver.create_volume, FakeVolume(host, 1))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup')
@mock.patch.object(client_7mode.Client, '__init__', return_value=None)
def test_do_setup(self, mock_client_init, mock_super_do_setup):
context = mock.Mock()
self._driver.do_setup(context)
mock_client_init.assert_called_once_with(**SEVEN_MODE_CONNECTION_INFO)
mock_super_do_setup.assert_called_once_with(context)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_all_default(self):
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_7mode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_http_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'http'
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_7mode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_https_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_7mode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTPS)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_http_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_server_port = 81
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_7mode, 'Client')
driver.do_setup(context='')
FAKE_CONN_INFO_PORT_HTTP = dict(FAKE_7MODE_CONNECTION_INFO_HTTP,
port=81)
mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_https_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
configuration.netapp_server_port = 446
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_7mode, 'Client')
driver.do_setup(context='')
FAKE_CONN_INFO_PORT_HTTPS = dict(FAKE_7MODE_CONNECTION_INFO_HTTPS,
port=446)
mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTPS)
@mock.patch.object(nfs_base.NetAppNfsDriver, 'check_for_setup_error')
def test_check_for_setup_error(self, mock_super_check_for_setup_error):
self._driver.zapi_client.get_ontapi_version.return_value = (1, 20)
self.assertIsNone(self._driver.check_for_setup_error())
mock_super_check_for_setup_error.assert_called_once_with()
def test_check_for_setup_error_old_version(self):
self._driver.zapi_client.get_ontapi_version.return_value = (1, 8)
self.assertRaises(exception.VolumeBackendAPIException,
self._driver.check_for_setup_error)
def test_check_for_setup_error_no_version(self):
self._driver.zapi_client.get_ontapi_version.return_value = None
self.assertRaises(exception.VolumeBackendAPIException,
self._driver.check_for_setup_error)
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_export_ip_path')
drv._get_export_ip_path(
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
return mox
def test_clone_backing_file_for_volume_clear(self):
drv = self._driver
mox = self._prepare_clone_mock('fail')
drv.zapi_client = mox.CreateMockAnything()
drv.zapi_client.get_actual_path_for_export('/nfs').AndReturn(
'/vol/vol1/nfs')
drv.zapi_client.clone_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + six.text_type(hash(volume_name))
try:
drv._clone_backing_file_for_volume(volume_name, clone_name,
volume_id)
except Exception as e:
if isinstance(e, netapp_api.NaApiError):
pass
else:
raise
mox.VerifyAll()
def test_get_pool(self):
pool = self._driver.get_pool({'provider_location': 'fake-share'})
self.assertEqual('fake-share', pool)
def _set_config(self, configuration):
super(NetApp7modeNfsDriverTestCase, self)._set_config(
configuration)
configuration.netapp_storage_family = 'ontap_7mode'
return configuration
def test_clone_backing_file_for_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
drv.zapi_client = mox.CreateMockAnything()
drv.zapi_client.get_actual_path_for_export('/nfs').AndReturn(
'/vol/vol1/nfs')
drv.zapi_client.clone_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + six.text_type(hash(volume_name))
share = 'ip:/share'
drv._clone_backing_file_for_volume(volume_name, clone_name, volume_id,
share)
mox.VerifyAll()
|
scottdangelo/RemoveVolumeMangerLocks
|
cinder/tests/unit/test_netapp_nfs.py
|
Python
|
apache-2.0
| 72,772
|
import os
from behave import *
@when(u'I run the command')
def step_impl(context):
context.output = context.env.run(
"bash -c '{}'".format(os.path.expandvars(context.text)),
expect_error = True,
expect_stderr = True)
def download_file(link, out):
import wget
return wget.download(link, out)
def get_stream(context, stream):
assert stream in ['stderr', 'stdout'], "Unknown output stream {}".format(stream)
return getattr(context.output, stream)
def assert_file_exists(file_):
assert os.path.isfile(file_), "The file \"{}\" does not exist.".format(file_)
def get_env_path(context, file_):
return os.path.join(context.env.cwd, file_)
def get_data_file_path(file_):
dir_ = os.path.dirname(os.path.abspath(__file__))
return os.path.join(dir_, '..', '..', 'test', file_)
@then(u'the file "{}" should exist')
def step_impl(context, file_):
assert_file_exists(get_env_path(context, file_))
@then(u'the exit code should be {code}')
def step_impl(context, code):
returned = context.output.returncode
assert returned == int(code), \
"Process should return exit code {} but was {}".format(code, returned)
@given(u'I download the file "{link}" to "{dest}"')
def step_impl(context, link, dest):
import sys
normalized_dest = get_env_path(context, dest)
sys.stdout = sys.__stdout__
download_file(link, normalized_dest)
@given(u'I copy the example data files')
def step_impl(context):
import shutil
for row in context.table.rows:
shutil.copy(get_data_file_path(row['source']),
get_env_path(context, row['dest']))
@then(u'the {stream} should contain')
def step_impl(context, stream):
output = get_stream(context, stream)
assert context.text in output
@given(u'I downloaded the scripts')
def create_tmp_dir(context):
dir_ = os.path.dirname(os.path.abspath(__file__))
tmp_dir = os.path.join(dir_, '..', '..', "tmp")
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
@given(u'I create the directory "{directory}"')
def step_impl(context, directory):
os.makedirs(get_env_path(context, directory))
|
CAMI-challenge/AMBER
|
features/steps/cli.py
|
Python
|
apache-2.0
| 2,147
|
#! -*- coding: utf-8; mode: python -*-
"""
ago.py: interact with an ArcGIS Portal instance
"""
import arcpy
import json
import time
import datetime
import mimetypes
import gzip
import random
import string
import getpass
import sys
import os
from io import BytesIO
import codecs
import uuid
import shutil
try:
import http.client as client
import urllib.parse as parse
from urllib.request import urlopen as urlopen
from urllib.request import Request as request
from urllib.request import HTTPError, URLError
from urllib.parse import urlencode as encode
# py2
except ImportError:
import httplib as client
from urllib2 import urlparse as parse
from urllib2 import urlopen as urlopen
from urllib2 import Request as request
from urllib2 import HTTPError, URLError
from urllib import urlencode as encode
unicode = str
# Valid package types on portal
ITEM_TYPES = {
".LPK": "Layer Package",
".LPKX": "Layer Package",
".MPK": "Map Package",
".MPKX": "Map Package",
".GPK": "Geoprocessing Package",
".GPKX": "Geoprocessing Package",
".RPK": "Rule Package",
".GCPK": "Locator Package",
".PPKX": "Project Package",
".APTX": "Project Template",
".TPK": "Tile Package",
".MMPK": "Mobile Map Package",
".VTPK": "Vector Tile Package"
}
class MultipartFormdataEncoder(object):
"""
Usage: request_headers, request_data =
MultipartFormdataEncoder().encodeForm(params, files)
Inputs:
params = {"f": "json", "token": token, "type": item_type,
"title": title, "tags": tags, "description": description}
files = {"file": {"filename": "some_file.sd", "content": content}}
Note: content = open(file_path, "rb").read()
"""
def __init__(self):
self.boundary = uuid.uuid4().hex
self.content_type = {
"Content-Type": "multipart/form-data; boundary={}".format(self.boundary)
}
@classmethod
def u(cls, s):
if sys.hexversion < 0x03000000 and isinstance(s, str):
s = s.decode('utf-8')
if sys.hexversion >= 0x03000000 and isinstance(s, bytes):
s = s.decode('utf-8')
return s
def iter(self, fields, files):
"""
Yield bytes for body. See class description for usage.
"""
encoder = codecs.getencoder('utf-8')
for key, value in fields.items():
yield encoder('--{}\r\n'.format(self.boundary))
yield encoder(
self.u('Content-Disposition: form-data; name="{}"\r\n').format(key))
yield encoder('\r\n')
if isinstance(value, int) or isinstance(value, float):
value = str(value)
yield encoder(self.u(value))
yield encoder('\r\n')
for key, value in files.items():
if "filename" in value:
filename = value.get("filename")
content_disp = 'Content-Disposition: form-data;name=' + \
'"{}"; filename="{}"\r\n'.format(key, filename)
content_type = 'Content-Type: {}\r\n'.format(
mimetypes.guess_type(filename)[0] or 'application/octet-stream')
yield encoder('--{}\r\n'.format(self.boundary))
yield encoder(content_disp)
yield encoder(content_type)
yield encoder('\r\n')
if "content" in value:
buff = value.get("content")
yield (buff, len(buff))
yield encoder('\r\n')
yield encoder('--{}--\r\n'.format(self.boundary))
def encodeForm(self, fields, files):
body = BytesIO()
for chunk, chunk_len in self.iter(fields, files):
body.write(chunk)
self.content_type["Content-Length"] = str(len(body.getvalue()))
return self.content_type, body.getvalue()
class AGOLHelper(object):
"""
Interact with an ArcGIS Portal instance, such as ArcGIS Online. Must be
initialized with either the login() method, or by reusing an existing
OAuth token via token_login(). Covers approximately 1/3 of the complete
API, primarily focused on the common operations around uploading and
managing services and web maps.
"""
def __init__(self, portal_url=None, token=None, debug=False):
if portal_url is None:
self.portal_url = arcpy.GetActivePortalURL()
else:
self.portal_url = portal_url
# in the absence of information, default to HTTP
self.protocol = 'https'
self.is_arcgis_online = False
url_parts = self._parse_url(self.portal_url)
if url_parts:
if url_parts.scheme:
self.protocol = url_parts.scheme
self.host = self._normalize_host_url(url_parts)
if url_parts.netloc == 'www.arcgis.com':
self.is_arcgis_online = True
self.protocol = 'https'
else:
arcpy.AddError(NO_PORTAL_URL_MSG)
sys.exit()
self.base_url = '{}://{}/sharing/rest'.format(self.protocol, self.host)
self.secure_url = 'https://{}/sharing/rest'.format(self.host)
self.token = token
self.debug = debug
self.headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'User-Agent': ('ago.py -- ArcGIS portal module 0.1')
}
self.portal_name = None
self.portal_info = {}
self.username = None
self.login_method = None
self.expiration = None
self._password = None
def login(self, username=None, password=None, repeat=None):
"""
Get a sign-in token from provided credentials.
Arguments:
username -- user to sign in with
password -- password for user (default: use getpass)
Returns:
None
"""
if username:
self.username = username
else:
arcpy.AddError("Expected user name. None given.")
return
if password is None:
self._password = getpass.getpass()
else:
self._password = password
token_url = '{}/generateToken?'.format(self.secure_url)
token_parameters = {
'username': username,
'password': self._password,
'referer': "http://maps.esri.com",
'expiration': 600,
}
token_response = self.url_request(
token_url, token_parameters, 'POST', repeat=repeat)
if token_response and 'token' in token_response:
self.token = token_response['token']
self.expiration = datetime.datetime.fromtimestamp(
token_response['expires'] / 1000) - datetime.timedelta(seconds=1)
if 'ssl' in token_response:
if token_response['ssl']:
self.protocol = 'https'
else:
self.protocol = 'http'
# update base information with token
self.information()
self.login_method = 'password'
else:
arcpy.AddError("Unable to get signin token.")
return
def token_login(self):
"""
Get a sign-in token generated from ArcPy.
Arguments:
None
Returns:
None
"""
# NOTE side-effects
token_response = arcpy.GetSigninToken()
if token_response and 'token' in token_response:
self.token = token_response['token']
self.expiration = datetime.datetime.fromtimestamp(
token_response['expires']) - datetime.timedelta(seconds=1)
if self.debug:
msg = 'Received token starting with ' + \
'"{}", valid for {} minutes.'.format(
self.token[0:10], self.valid_for)
arcpy.AddMessage(msg)
# update base information with token
self.information()
self.login_method = 'token'
else:
arcpy.AddError("Unable to get signin token.")
return
@property
def valid_for(self):
"""
Length the current token is valid for, in minutes.
Returns:
An integer of minutes token remains valid
"""
valid = False
if self.expiration and isinstance(self.expiration, datetime.datetime):
valid = (self.expiration - datetime.datetime.now()).seconds / 60
return valid
def information(self):
"""
Get portal 'self' information.
Arguments:
None
Returns:
A dictionary returned from portals/self.
"""
# NOTE side-effects; do separately
url = '{}/portals/self'.format(self.base_url)
portal_info = self.url_request(url)
self.portal_info = portal_info
self.portal_name = portal_info['portalName']
url = '{}/community/self'.format(self.base_url)
user_info = self.url_request(url)
self.username = user_info['username']
return self.portal_info
def random_string(self, length):
"""
Generate a random string of ASCII letters.
Arguments:
length = number of characters
Returns:
random string
"""
alpha = string.ascii_letters
return ''.join(random.choice(alpha) for ii in range(length + 1))
def encode_multipart_data(self, data, files):
"""
Create multipart boundaries between file streams.
Arguments:
data -- input data
files -- input files
Returns:
A tuple containing response -- (body, headers)
"""
boundary = self.random_string(30)
def get_content_type(filename):
""" Try to determine content type based on file extension."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def encode_field(field_name):
""" Encode fields using multipart specification."""
return('--' + boundary,
'Content-Disposition: form-data; name="%s"' % field_name,
'', str(data[field_name]))
def encode_file(field_name):
""" Encode file data using multipart specification."""
filename = str(files[field_name])
return('--' + boundary,
'Content-Disposition: form-data;'
'name="{}"; filename="{}"'.format(field_name, filename),
'Content-Type: %s' % get_content_type(filename),
'', open(filename, 'rb').read())
lines = []
for name in data:
lines.extend(encode_field(name))
for name in files:
lines.extend(encode_file(name))
lines.extend(('--%s--' % boundary, ''))
body = '\r\n'.join(lines)
headers = {
'content-type': 'multipart/form-data; boundary=' + boundary,
'content-length': str(len(body))
}
return body, headers
def list_folders(self):
"""
List available user folders.
Returns:
A dictionary of folder titles to ids.
"""
folders = {}
folder_request = self.user_content()['folders']
for folder in folder_request:
folders[folder['title']] = folder['id']
return folders
def create_folder(self, name):
"""
Create a folder item.
property to the created folder.
Arguments:
name -- folder name to create
Returns:
folder item id.
"""
folder = None
url = '{}/content/users/{}/createFolder'.format(
self.base_url, self.username)
parameters = {'title': name}
response = self.url_request(url, parameters, 'POST')
if response is not None and 'folder' in response:
folder = response['folder']['id']
return folder
def item(self, item_id=None, repeat=None):
"""
Get back information about a particular item. Must have read
access to the item requested.
Arguments:
item_id: the portal id of the desired item.
Returns:
Dictionary from item response.
"""
results = {}
if item_id:
url = '{}/content/items/{}'.format(self.base_url, item_id)
results = self.url_request(url, repeat=repeat)
return results
def move_items(self, target_folder_id, items):
"""
Move items to a target folder.
Arguments:
target_folder_id: folder id to move items to
items: list of one or more item ids to move
Returns:
None
"""
# Test if we have a None object somewhere
# This could potentially be the case if one of the previous
# portal responses was not successful.
if None in items:
arcpy.AddError(EMPTY_ITEM_MSG)
return
url = '{}/content/users/{}/moveItems'.format(
self.base_url, self.username)
parameters = {
'folder': target_folder_id,
'items': ','.join(map(str, items))
}
move_response = self.url_request(url, parameters, request_type='POST')
if self.debug:
msg = "Moving items, using {} with parameters {}, got {}".format(
url, parameters, move_response)
arcpy.AddMessage(msg)
return move_response
def move_items(self, target_folder_id, items):
"""
Move items to a target folder.
Arguments:
target_folder_id: folder id to move items to
items: list of one or more item ids to move
Returns:
None
"""
# Test if we have a None object somewhere
# This could potentially be the case if one of the previous
# portal responses was not successful.
url = '{}/content/users/{}/moveItems'.format(
self.base_url, self.username)
parameters = {
'folder': target_folder_id,
'items': ','.join(map(str, items))
}
move_response = self.url_request(url, parameters, request_type='POST')
return move_response
def share_items(self, groups=None, everyone=False, org=False, items=None):
"""
Shares one or more items with the specified groups. Can only share
items with groups the user belongs to. Can also share with
the users' current organization, and the public.
Arguments:
groups -- a list of group IDs to share items with
everyone -- publicly share the item (default: False)
org -- share with the users' organization (default: False)
items -- a list of item IDs to update sharing properties on
Returns:
A dictionary of JSON objects, one per item containing the item,
whether sharing was successful, any groups sharing failed with,
and any errors.
"""
if (groups is None and not everyone and not org) or not items:
if self.debug:
arcpy.AddWarning("Invalid sharing options set.")
return
# If shared with everyone, have to share with Org as well
if everyone:
org = True
url = '{}/content/users/{}/shareItems'.format(
self.base_url, self.username)
parameters = {
'everyone': everyone,
'org': org,
'items': ','.join(map(str, items))
}
# sharing with specific groups is optional
if groups:
parameters['groups'] = ','.join(map(str, groups))
sharing_response = self.url_request(url, parameters, 'POST')
if self.debug:
msg = "Sharing items, using {} with parameters {}, got {}".format(
url, parameters, sharing_response)
arcpy.AddMessage(msg)
return sharing_response
def search(self, title=None, item_type=None, group=None,
owner=None, item_id=None, repeat=None, num=10, id_only=True, name=None):
"""
Search for items, a partial implementation of the
search operation of the ArcGIS REST API. Requires one of:
title, item_type, group, owner.
Arguments:
title -- item title
item_type -- item type
group -- item group
owner -- username of item owner
item_id -- item id
repeat -- retry the search, up to this number of times (default: None)
num -- number of results (default: 10)
id_only -- return only IDs of results. If False, will return
full JSON results. (default: True)
Returns:
A list of search results item ids.
"""
query_types = {
'title': title,
'type': item_type,
'group': group,
'owner': self.username, #owner,
'id': item_id,
'name': name
}
query_parts = []
for (label, value) in list(query_types.items()):
if value:
query_parts.append('{}: "{}"'.format(label, value))
if len(query_parts) == 0:
return
elif len(query_parts) == 1:
query = query_parts[0]
else:
query = " AND ".join(query_parts)
if self.debug:
arcpy.AddMessage("Searching for '{}'".format(query))
url = '{}/search'.format(self.base_url)
parameters = {
'num': num,
'q': query
}
response_info = self.url_request(url, parameters)
results = []
if response_info and 'results' in response_info:
if response_info['total'] > 0:
for item in response_info['results']:
if 'id' in item:
if id_only:
results.append(item['id'])
else:
results.append(item)
if self.debug:
if results:
arcpy.AddMessage("Got results! Found items: {}".format(results))
else:
arcpy.AddMessage("No results found.")
# occasional timing conflicts are happening; repeat search until we
# can continue -- the result should be empty since we just deleted it.
if repeat and not results:
repeat -= 1
if repeat <= 0:
return
time.sleep(1)
results = self.search(
title=title, item_type=item_type, group=group, owner=owner,
item_id=item_id, repeat=repeat, num=num, id_only=id_only)
return results
def user(self, username=None):
"""
A user resource representing a registered user of the portal.
Arguments:
username -- user of interest
Returns:
A dictionary of the JSON response.
"""
if username is None:
username = self.username
url = '{}/community/users/{}'.format(self.base_url, username)
return self.url_request(url)
def user_content(self, username=None):
"""
User items and folders.
Arguments:
username -- user of interest
Returns:
A dictionary of user items and folders.
"""
if username is None:
username = self.username
url = '{}/content/users/{}'.format(self.base_url, username)
return self.url_request(url)
def list_groups(self, username=None):
"""
List users' groups.
Returns:
A dictionary of group titles to ids.
"""
groups = {}
if username is None:
username = self.username
groups_request = self.user(username)['groups']
for group in groups_request:
groups[group['title']] = group['id']
return groups
def add_item(self, file_to_upload, username=None, folder_id=None, itemtype=None, params=None):
"""
Adds an item to the portal.
All items are added as multipart. Once the item is added,
Add Part will be called.
Returns:
The response/item_id of the item added.
"""
if username is None:
username = self.username
url = '{}/content/users/{}/{}/addItem'.format(self.base_url, username, folder_id)
parameters = {
'multipart': 'true',
'filename': file_to_upload,
}
if params:
parameters.update(params)
if itemtype:
parameters['type'] = itemtype
else:
try:
file_name, file_ext = os.path.splitext(os.path.basename(file_to_upload))
itemtype = ITEM_TYPES[file_ext.upper()]
except KeyError:
msg = "Unable to upload file: {}, unknown type".format(
file_to_upload)
arcpy.AddError(msg)
return
details = {'filename': file_to_upload}
add_item_res = self.url_request(
url, parameters, request_type="POST", files=details)
return self._add_part(file_to_upload, add_item_res['id'], itemtype)
def _add_part(self, file_to_upload, item_id, upload_type=None):
""" Add item part to an item being uploaded."""
def read_in_chunks(file_object, chunk_size=10000000):
"""Generate file chunks (default: 10MB)"""
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
url = '{}/content/users/{}/items/{}/addPart'.format(
self.base_url, self.username, item_id)
with open(file_to_upload, 'rb') as f:
for part_num, piece in enumerate(read_in_chunks(f), start=1):
title = os.path.splitext(os.path.basename(file_to_upload))[0]
files = {"file": {"filename": file_to_upload, "content": piece}}
params = {
'f': "json",
'token': self.token,
'partNum': part_num,
'title': title,
'itemType': 'file',
'type': upload_type
}
headers, data = MultipartFormdataEncoder().encodeForm(params, files)
resp = self.url_request(url, data, "MULTIPART", headers, repeat=1)
return resp
def item_status(self, item_id, username=None):
"""
Gets the status of an item.
Returns:
The item's status. (partial | processing | failed | completed)
"""
if username is None:
username = self.username
url = '{}/content/users/{}/items/{}/status'.format(
self.base_url, username, item_id)
return self.url_request(url)
def commit(self, item_id, username=None):
"""
Commits an item that was uploaded as multipart
Returns:
Result of calling commit. (success: true| false)
"""
if username is None:
username = self.username
url = '{}/content/users/{}/items/{}/commit'.format(
self.base_url, username, item_id)
return self.url_request(url)
def update_item(self, item_id, metadata, username=None, folder_id=None, title=None):
"""
Updates metadata parts of an item.
Metadata expected as a tuple
Returns:
Result of calling update. (success: true | false)
"""
if username is None:
username = self.username
url = "{}/content/users/{}/{}/items/{}/update".format(
self.base_url, username, folder_id, item_id)
parameters = {
'snippet': metadata[0],
'description': metadata[1],
'tags': metadata[2],
'accessInformation': metadata[3],
'licenseInfo': metadata[4],
'token': self.token,
'f': 'json'
}
if title:
parameters['title'] = title
if len(metadata) > 5:
parameters['thumbnail'] = metadata[5]
with open(metadata[5], 'rb') as f:
d = f.read()
files = {"thumbnail": {"filename": metadata[5], "content": d }}
headers, data = MultipartFormdataEncoder().encodeForm(parameters, files)
resp = self.url_request(url, data, "MULTIPART", headers, repeat=1)
return resp
else:
return self.url_request(url, parameters, 'POST')
def url_request(self, in_url, request_parameters=None, request_type='GET',
additional_headers=None, files=None, repeat=0):
"""
Make a request to the portal, provided a portal URL
and request parameters, returns portal response. By default,
returns a JSON response, and reuses the current token.
Arguments:
in_url -- portal url
request_parameters -- dictionary of request parameters.
request_type -- HTTP verb (default: GET)
additional_headers -- any headers to pass along with the request.
files -- any files to send.
repeat -- repeat the request up to this number of times.
Returns:
dictionary of response from portal instance.
"""
# multipart requests pre-encode the parameters
if request_type == 'MULTIPART':
parameters = request_parameters
else:
parameters = {'f': 'json'}
# if we haven't logged in yet, won't have a valid token
if self.token:
parameters['token'] = self.token
if request_parameters:
parameters.update(request_parameters)
if request_type == 'GET':
req = request('?'.join((in_url, encode(parameters))))
elif request_type == 'MULTIPART':
req = request(in_url, parameters)
elif request_type == 'WEBMAP':
if files:
req = request(in_url, *self.encode_multipart_data(parameters, files))
else:
arcpy.AddWarning("Multipart request made, but no files provided.")
return
else:
req = request(
in_url, encode(parameters).encode('UTF-8'), self.headers)
if additional_headers:
for key, value in list(additional_headers.items()):
req.add_header(key, value)
req.add_header('Accept-encoding', 'gzip')
try:
response = urlopen(req)
except HTTPError as e:
arcpy.AddWarning("{} {} -- {}".format(
HTTP_ERROR_MSG, in_url, e.code))
return
except URLError as e:
arcpy.AddWarning("{} {} -- {}".format(
URL_ERROR_MSG, in_url, e.reason))
return
if response.info().get('Content-Encoding') == 'gzip':
buf = BytesIO(response.read())
with gzip.GzipFile(fileobj=buf) as gzip_file:
response_bytes = gzip_file.read()
else:
response_bytes = response.read()
response_text = response_bytes.decode('UTF-8')
# occasional timing conflicts; repeat until we get back a valid response.
response_json = json.loads(response_text)
# Check that data returned is not an error object
if not response_json or "error" in response_json:
rerun = False
if repeat > 0:
repeat -= 1
rerun = True
# token has expired. Revalidate, then rerun request
if response_json['error']['code'] == 498:
if self.debug:
arcpy.AddWarning("token invalid, retrying.")
if self.login_method is 'token':
# regenerate the token if we're logged in via the application
self.token_login()
else:
self.login(self.username, self._password, repeat=0)
# after regenerating token, we should have something long-lived
if not self.token or self.valid_for < 5:
arcpy.AddError("Unable to get signin token.")
return
rerun = True
if rerun:
time.sleep(2)
response_json = self.url_request(
in_url, request_parameters, request_type,
additional_headers, files, repeat)
return response_json
def save_file(self, url, saveFile):
"""Saves a file to a given location"""
if self.token:
url += "?token={}".format(self.token)
data = urlopen(url).read()
with open(saveFile, "wb") as out_file:
out_file.write(data)
return saveFile
def assert_json_success(self, data):
"""A function that checks that the input JSON object
is not an error object."""
success = False
obj = json.loads(data)
if 'status' in obj and obj['status'] == "error":
arcpy.AddWarning("{} {}".format("JSON object returned an error.", str(obj)))
elif 'error' in obj:
err = obj['error']
# format the error message
if 'messageCode' in err:
code = err['messageCode']
elif 'code' in err:
code = err['code']
else:
code = "No code provided."
msg = "Portal error: {}: {}".format(err['message'], code)
if 'details' in err and err['details']:
details = []
for detail in err['details']:
# only use unique detail messages
if detail is not err['message']:
details.append(detail)
if details:
msg += ". Details: {}".format("\n".join(details))
arcpy.AddWarning(msg)
else:
success = True
return success
def _parse_url(self, url=None):
""" Parse a url into components."""
results = None
if url:
results = parse.urlparse(url)
return results
def _normalize_host_url(self, parse_result):
""" Normalize a hostname to include just the validated
location and path."""
host_url = parse_result.netloc
if parse_result.path:
path = parse_result.path
if path[-1] == '/':
path = path[:-1]
host_url += path
return host_url
|
arcpy/sample-gp-tools
|
SharePackage2/ago.py
|
Python
|
apache-2.0
| 31,041
|
from eWUDAPT_analysis.utils import *
from pylab import *
from netCDF4 import Dataset, num2date
import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
class analysis_single:
def __init__(self, args, json):
self.filename = args.filename
self.outputdir = args.outputdir
self.ncdf_definition = load_netcdf_definition(json)
self.institute, self.model, self.version = define_inst_mod_ver(args.filename)
create_directory(self.outputdir)
self.create_plots()
def create_plots(self):
'''
create plots
'''
# loop through all variables
for variable in self.ncdf_definition['variables']:
if 'time' in variable['dimensions']:
# create time series plot of surface/first level
self.plot_time_series(variable)
if (len(set(['levf', 'levh', 'levs']) & set(variable['dimensions']))==1):
dimname = sorted(set(['levf', 'levh', 'levs']) & set(variable['dimensions']))[0]
# found a vertical dimension -> create vertical profile
if variable['name'] not in ['zf', 'zh', 'zs']:
# don't plot height in meters
self.plot_vertical_profile(variable, dimname)
def plot_time_series(self, variable):
'''
create time series plots'
'''
try:
outputfig = ('time_series_' + self.institute + '_' + self.model + '_' +
self.version + '_' + variable["name"] + '.png')
outputfile = os.path.join(self.outputdir, outputfig)
ncfile = Dataset(self.filename, 'r')
time = ncfile.variables['time']
try:
dt = [num2date(step, units=time.units, calendar=time.calendar)
for step in time[:]]
except AttributeError:
# fallback
dt = [num2date(step, units='seconds since 2006-07-01 12:00:00',
calendar='gregorian') for step in time[:]]
if (len(variable['dimensions'])==1):
# plot time series, 1D variable
val = ncfile.variables[variable['name']][:]
elif (len(variable['dimensions'])==2):
# plot first level, 2D variable
# TODO: add info to title on level
val = ncfile.variables[variable['name']][:]
if (len(np.shape(val)) == len(variable['dimensions'])):
if np.shape(val)[0] == np.shape(time)[0]:
val = val[:,0]
elif np.shape(val)[1] == np.shape(time)[0]:
val = val[0,:]
else:
pass
else:
return
else:
raise Exception('Variable ' + variable['name'] +
'contains more than two dimensions')
# create the plot
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y %H:%M'))
plt.gca().xaxis.set_major_locator(mdates.HourLocator(byhour=range(0,24,3)))
plt.plot(dt, val)
plt.gcf().autofmt_xdate()
plt.xlabel('time')
plt.ylabel(variable["long_name"] + ' [' + variable["unit"] + ']')
plt.savefig(outputfile)
plt.close()
# close netCDF file
ncfile.close()
return # TODO: savefig
except KeyError:
pass
def plot_vertical_profile(self, variable, dimname):
'''
create vertical profile plots at 0, 6, 12, 18h
'''
try:
ncfile = Dataset(self.filename, 'r')
time = ncfile.variables['time']
try:
dt = [num2date(step, units=time.units, calendar=time.calendar)
for step in time[:]]
except AttributeError:
# fallback
dt = [num2date(step, units='seconds since 2006-07-01 12:00:00',
calendar='gregorian') for step in time[:]]
# define timesteps at which vertical profiles are plotted
dt_profiles = np.arange(dt[0], dt[-1],np.timedelta64(6,'h'),
dtype='datetime64').astype(datetime.datetime)
for dt_profile in dt_profiles:
try:
idx = dt.index(dt_profile)
except ValueError:
continue
if (len(variable['dimensions'])==1):
# plot static vertical profile, 1D variable
val = ncfile.variables[variable['name']][:]
elif (len(variable['dimensions'])==2):
# plot vertical profile every 6 hours, 2D variable
# TODO: add info to title on level
val = ncfile.variables[variable['name']][:]
if (len(np.shape(val)) == len(variable['dimensions'])):
if np.shape(val)[0] == np.shape(time)[0]:
val = val[idx, :]
elif np.shape(val)[1] == np.shape(time)[0]:
val = val[: idx]
else:
pass
else:
return
else:
raise Exception('Variable ' + variable['name'] +
'contains more than two dimensions')
if (dimname=='levf'):
dimvar = 'zf'
elif (dimname=='levh'):
dimvar = 'zh'
elif (dimname=='levs'):
dimvar = 'zs'
levels = ncfile.variables[dimvar]
# create the plot
if dimvar != 'zs':
plt.plot(val, levels[idx, :])
else:
plt.plot(val, levels[:])
diminfo = get_dimension_information(self.ncdf_definition, dimname)
timestr = dt_profile.strftime('%Y-%m-%d %H:%M')
timestrplot = dt_profile.strftime('%Y-%m-%d_%H:%M')
outputfig = ('vert_profile_' + self.institute + '_' + self.model + '_' +
self.version + '_' + variable["name"] + timestrplot + '.png')
outputfile = os.path.join(self.outputdir, outputfig)
plt.title(variable['long_name'] + ' at ' + timestr)
plt.xlabel(variable["long_name"] + ' [' + variable["unit"] + ']')
plt.ylabel(levels.long_name + ' [' + levels.units + ']')
plt.savefig(outputfile)
plt.close()
# close netCDF file
ncfile.close()
return # TODO: savefig
except KeyError:
pass
|
eWUDAPT/eWUDAPT-analysis
|
eWUDAPT_analysis/analysis_single.py
|
Python
|
apache-2.0
| 5,899
|
"""
Production settings with some stubbed components in ci environment, like database.
Besides these stubs the settings are as similar to production as possible
Mainly used by service in docker on ci server
"""
from .base import *
from ._ci import *
|
sunForest/AviPost
|
avipost/avipost/settings/ci.py
|
Python
|
apache-2.0
| 252
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics utils files to compute certain similarity metrics."""
from absl import flags
import numpy as np
import tensorflow as tf
FLAGS = flags.FLAGS
def VerifyCompatibleImageShapes(img1, img2):
"""Checks if two image tensors are compatible for metric computation.
This function checks if two sets of images have ranks at least 3, and if the
last three dimensions match.
Args:
img1: The first images tensor.
img2: The second images tensor.
Returns:
A tuple of the first tensor shape, the second tensor shape, and a list of
tf.Assert() implementing the checks.
Raises:
ValueError: when static shape check fails.
"""
shape1 = img1.shape.with_rank_at_least(3)
shape2 = img2.shape.with_rank_at_least(3)
if shape1.ndims is not None and shape2.ndims is not None:
for dim1, dim2 in zip(reversed(shape1[:-3]), reversed(shape2[:-3])):
# For TF V1 compatibility.
try:
dim1 = dim1.value
dim2 = dim2.value
except AttributeError:
pass
if not (dim1 in (None, 1) or dim2 in (None, 1) or dim1 == dim2):
raise ValueError('Two images are not compatible: %s and %s' %
(shape1, shape2))
else:
raise ValueError('The two images do not have a defined shape.')
# Now assign shape tensors.
shape1, shape2 = tf.shape_n([img1, img2])
checks = []
checks.append(
tf.Assert(
tf.greater_equal(tf.size(shape1), 3), [shape1, shape2], summarize=10))
checks.append(
tf.Assert(
tf.reduce_all(tf.equal(shape1[-3:], shape2[-3:])), [shape1, shape2],
summarize=10))
return shape1, shape2, checks
def _SSIMHelper(x, y, reducer, max_val, compensation=1.0):
r"""Helper function to SSIM.
Arguments:
x: first set of images.
y: first set of images.
reducer: Function that computes 'local' averages from set of images. For
non-covolutional version, this is usually tf.reduce_mean(x, [1, 2]), and
for convolutional version, this is usually tf.nn.avg_pool or tf.nn.conv2d
with weighted-sum kernel.
max_val: The dynamic range (i.e., the difference between the maximum
possible allowed value and the minimum allowed value).
compensation: Compensation factor. See above.
Returns:
A pair containing the luminance measure and the contrast-structure measure.
"""
c1 = (0.01 * max_val)**2
c2 = (0.03 * max_val)**2
# SSIM luminance measure is
# (2 * mu_x * mu_y + c1) / (mu_x ** 2 + mu_y ** 2 + c1).
mean0 = reducer(x)
mean1 = reducer(y)
num0 = mean0 * mean1 * 2.0
den0 = tf.square(mean0) + tf.square(mean1)
luminance = (num0 + c1) / (den0 + c1)
# SSIM contrast-structure measure is
# (2 * cov_xy + c2) / (cov_xx + cov_yy + c2).
# Note that `reducer` is a weighted sum with weight w_k, \sum_i w_i = 1, then
# cov_xy = \sum_i w_i (x_i - mu_x) (y_i - mu_y)
# = \sum_i w_i x_i y_i - (\sum_i w_i x_i) (\sum_j w_j y_j).
num1 = reducer(x * y) * 2.0
den1 = reducer(tf.square(x) + tf.square(y))
c2 *= compensation
cs = (num1 - num0 + c2) / (den1 - den0 + c2)
# SSIM score is the product of the luminance and contrast-structure measures.
return luminance, cs
def SSIMWithoutFilter(a,
b,
max_val=255.0,
filter_size=(8, 8),
strides=None,
spatial_average=True,
channel_average=True):
"""Computes unfiltered SSIM index between a and b per channel.
Arguments:
a: First set of patches.
b: Second set of patches.
max_val: The dynamic range (i.e., the difference between the maximum
possible allowed value and the minimum allowed value).
filter_size: Determines the moving average filter size to aggregate the SSIM
over. Must be a sequence of length two: [filter_height, filter_width].
strides: The strides of the moving average filter. Must be None or a
sequence of length two: [row_stride, col_stride]. If None, defaults to
`filter_size`.
spatial_average: If True, return the mean value across space. Otherwise,
return the full 2D spatial map.
channel_average: If True, return the mean value across channels. Otherwise,
return SSIM per channel.
Returns:
The SSIM index for each individual element in the batch.
For color images, SSIM is averaged after computed in each channel
separately.
Raises:
ValueError: if a and b don't have the broadcastable shapes, or the ranks of
a and b are not at least 3.
"""
# Enforce rank and shape checks.
shape1, _, checks = VerifyCompatibleImageShapes(a, b)
with tf.control_dependencies(checks):
a = tf.identity(a)
if strides is None:
strides = filter_size
n = float(np.prod(filter_size))
kernel = tf.fill(
dims=list(filter_size) + [shape1[-1], 1],
value=tf.constant(1 / n, dtype=a.dtype))
strides = [1] + list(strides) + [1]
def reducer(x): # pylint: disable=invalid-name
shape = tf.shape(x)
# DepthwiseConv2D takes rank 4 tensors. Flatten leading dimensions.
x = tf.reshape(x, shape=tf.concat([[-1], shape[-3:]], 0))
y = tf.nn.depthwise_conv2d(x, kernel, strides=strides, padding='VALID')
return tf.reshape(y, tf.concat([shape[:-3], tf.shape(y)[1:]], 0))
compensation = (n - 1) / n
luminance, cs = _SSIMHelper(a, b, reducer, max_val, compensation)
ssim = luminance * cs
reduce_axis = [-3, -2] if spatial_average else []
if channel_average:
reduce_axis.append(-1)
if reduce_axis:
ssim = tf.reduce_mean(ssim, axis=reduce_axis)
return ssim
def GradientDifferenceLoss(img1,
img2,
dist_func=tf.square,
reduce_func=tf.reduce_sum,
name=None):
"""Returns an op that calculates loss between image gradients.
This function assumes that `img1` and `img2` are image batches,
i.e. [batch_size, row, col, channels].
Arguments:
img1: First image batch.
img2: Second image batch.
dist_func: A TensorFlow op to apply to edge map differences (e.g. tf.square
for L2 or tf.abs for L1).
reduce_func: A TensorFlow op to reduce edge map distances into a single loss
per image pair (e.g. tf.reduce_sum for a gradient or tf.reduce_mean for a
per-pixel average score).
name: Namespace in which to embed the computation.
Returns:
A tensor with size [batch_size] containing the finite difference edge loss
for each image pair in the batch.
"""
with tf.name_scope(name, 'GDL', [img1, img2]):
_, _, checks = VerifyCompatibleImageShapes(img1, img2)
dy1, dx1 = tf.image.image_gradients(img1)
dy2, dx2 = tf.image.image_gradients(img2)
diff = dist_func(dy1 - dy2) + dist_func(dx1 - dx2)
loss = reduce_func(diff, list(range(-3, 0)))
with tf.control_dependencies(checks):
return tf.identity(loss)
def PSNR(a, b, max_val=255.0, name=None):
"""Returns the Peak Signal-to-Noise Ratio between a and b.
Arguments:
a: first set of images.
b: second set of images.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
name: namespace to embed the computation in.
Returns:
The scalar PSNR between a and b. The shape of the returned tensor is
[batch_size, 1].
"""
with tf.name_scope(name, 'PSNR', [a, b]):
psnr = tf.image.psnr(a, b, max_val=max_val, name=name)
_, _, checks = VerifyCompatibleImageShapes(a, b)
with tf.control_dependencies(checks):
return tf.identity(psnr)
def ClippedPSNR(img1,
img2,
min_val=0.0,
max_val=255.0,
clip=True,
quantize=True,
max_psnr=100.0,
name=None):
"""Return average Clipped PSNR between `a` and `b`.
Arguments:
img1: first set of images.
img2: second set of images.
min_val: smallest valid value for a pixel.
max_val: largest valid value for a pixel.
clip: If True, pixel values will be clipped to [`min_value`, `max_value`].
quantize: If True, pixel values will be rounded before calculating PSNR.
max_psnr: If not None, PSNR will be clipped by this value before rounding.
name: namespace to embed the computation in.
Returns:
PSNR between img1 and img2 or average PSNR if input is a batch.
"""
with tf.name_scope(name, 'clipped_psnr', [img1, img2]):
if quantize:
img1 = tf.round(img1)
img2 = tf.round(img2)
if clip:
img1 = tf.clip_by_value(img1, min_val, max_val)
img2 = tf.clip_by_value(img2, min_val, max_val)
value_range = max_val - min_val
psnr = PSNR(img1, img2, max_val=value_range)
if max_psnr is not None:
psnr = tf.minimum(psnr, max_psnr)
return tf.reduce_mean(psnr)
def SobelEdgeLoss(img1, img2, dist_func=tf.square, reduce_func=tf.reduce_sum):
"""Returns an op that calculates Sobel edge loss between two images.
Arguments:
img1: First image batch.
img2: Second image batch.
dist_func: A TensorFlow op to apply to edge map differences (e.g. tf.square
for L2 or tf.abs for L1).
reduce_func: A TensorFlow op to reduce edge map distances into a single loss
per image pair (e.g. tf.reduce_sum for a gradient or tf.reduce_mean for a
per-pixel average score).
Returns:
A tensor with size [batch_size] containing the Sobel edge loss for each
image pair in the batch.
"""
_, _, checks = VerifyCompatibleImageShapes(img1, img2)
# Sobel tensor has shape [batch_size, h, w, d, num_kernels].
sobel1 = tf.image.sobel_edges(img1)
sobel2 = tf.image.sobel_edges(img2)
diff = dist_func(sobel1 - sobel2)
# To match GDL, sum across dy and dx regardless of reduce_func.
edge_maps = tf.reduce_sum(diff, axis=-1)
# Reduce over all dimensions except batch_size.
loss = reduce_func(edge_maps, list(range(-3, 0)))
with tf.control_dependencies(checks):
return tf.identity(loss)
|
google-research/google-research
|
optimizing_interpretability/metrics_utils.py
|
Python
|
apache-2.0
| 10,687
|
# Copyright 2019 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
from os_win import constants
from os_win.tests.functional import test_base
from os_win import utilsfactory
class VhdUtilsTestCase(test_base.OsWinBaseFunctionalTestCase):
def setUp(self):
super(VhdUtilsTestCase, self).setUp()
self._vhdutils = utilsfactory.get_vhdutils()
self._diskutils = utilsfactory.get_diskutils()
self._pathutils = utilsfactory.get_pathutils()
def _create_temp_vhd(self, size_mb=32,
vhd_type=constants.VHD_TYPE_DYNAMIC):
f = tempfile.TemporaryFile(suffix='.vhdx', prefix='oswin_vhdtest_')
f.close()
self._vhdutils.create_vhd(f.name, vhd_type,
max_internal_size=size_mb << 20)
self.addCleanup(os.unlink, f.name)
return f.name
def _create_temp_symlink(self, target, target_is_dir):
f = tempfile.TemporaryFile(prefix='oswin_vhdtest_link_')
f.close()
self._pathutils.create_sym_link(f.name, target, target_is_dir)
if target_is_dir:
self.addCleanup(os.rmdir, f.name)
else:
self.addCleanup(os.unlink, f.name)
return f.name
def test_attach_detach(self):
vhd_path = self._create_temp_vhd()
# We'll make sure that we can detect attached vhds, even when the
# paths contain symlinks.
vhd_link = self._create_temp_symlink(vhd_path, target_is_dir=False)
vhd_dir_link = self._create_temp_symlink(os.path.dirname(vhd_path),
target_is_dir=True)
# A second, indirect link.
vhd_link2 = os.path.join(vhd_dir_link,
os.path.basename(vhd_path))
def _check_attached(expect_attached):
# Let's try both approaches and all paths pointing to our image.
paths = [vhd_path, vhd_link, vhd_link2]
for path in paths:
self.assertEqual(
expect_attached,
self._vhdutils.is_virtual_disk_file_attached(path))
self.assertEqual(
expect_attached,
self._diskutils.is_virtual_disk_file_attached(path))
_check_attached(False)
try:
self._vhdutils.attach_virtual_disk(vhd_path)
_check_attached(True)
finally:
self._vhdutils.detach_virtual_disk(vhd_path)
_check_attached(False)
|
openstack/os-win
|
os_win/tests/functional/test_vhdutils.py
|
Python
|
apache-2.0
| 3,115
|
from datetime import timedelta, datetime
import pytz
from dateutil.parser import parse
from snowflake.connector.converter import (SnowflakeConverter)
def test_fetch_various_timestamps(conn_cnx):
"""
More coverage of timestamp
Currently TIMESTAMP_LTZ is not tested.
"""
PST_TZ = "America/Los_Angeles"
epoch_times = [
'1325568896',
'-2208943503',
'0',
'-1'
]
timezones = [
'+07:00',
'+00:00',
'-01:00',
'-09:00'
]
fractions = '123456789'
data_types = ['TIMESTAMP_TZ', 'TIMESTAMP_NTZ']
data = []
for dt in data_types:
for et in epoch_times:
if dt == 'TIMESTAMP_TZ':
for tz in timezones:
tzdiff = (int(tz[1:3]) * 60 + int(tz[4:6])) * (
-1 if tz[0] == '-' else 1)
tzinfo = SnowflakeConverter._generate_tzinfo_from_tzoffset(
tzdiff)
ts = datetime.fromtimestamp(float(et), tz=tzinfo)
data.append({
'scale': 0,
'dt': dt,
'inp': ts.strftime(
'%Y-%m-%d %H:%M:%S{tz}'.format(tz=tz)),
'out': ts
})
for idx in range(len(fractions)):
scale = idx + 1
if idx + 1 != 6: # SNOW-28597
ts0 = datetime.fromtimestamp(float(et), tz=tzinfo)
ts0_str = ts0.strftime(
'%Y-%m-%d %H:%M:%S.{ff}{tz}'.format(
ff=fractions[:idx + 1], tz=tz))
ts1 = parse(ts0_str)
data.append({
'scale': scale,
'dt': dt,
'inp': ts0_str,
'out': ts1
})
elif dt == 'TIMESTAMP_LTZ':
# WIP. this test work in edge case
tzinfo = pytz.timezone(PST_TZ)
ts0 = datetime.fromtimestamp(float(et))
ts0 = pytz.utc.localize(ts0, is_dst=False).astimezone(tzinfo)
ts0_str = ts0.strftime('%Y-%m-%d %H:%M:%S')
ts1 = ts0
data.append({
'scale': 0,
'dt': dt,
'inp': ts0_str,
'out': ts1
})
for idx in range(len(fractions)):
ts0 = datetime.fromtimestamp(float(et))
ts0 = pytz.utc.localize(ts0, is_dst=False).astimezone(
tzinfo)
ts0_str = ts0.strftime(
'%Y-%m-%d %H:%M:%S.{ff}'.format(
ff=fractions[:idx + 1]
))
ts1 = ts0 + timedelta(seconds=float(
'0.{0}'.format(fractions[:idx + 1])))
data.append({
'scale': idx + 1,
'dt': dt,
'inp': ts0_str,
'out': ts1
})
else:
ts0_str = datetime.fromtimestamp(
float(et)).strftime('%Y-%m-%d %H:%M:%S')
ts1 = parse(ts0_str)
data.append({
'scale': 0,
'dt': dt,
'inp': ts0_str,
'out': ts1
})
for idx in range(len(fractions)):
ts0_str = datetime.fromtimestamp(float(et)).strftime(
'%Y-%m-%d %H:%M:%S.{ff}'.format(
ff=fractions[:idx + 1]))
ts1 = parse(ts0_str)
data.append({
'scale': idx + 1,
'dt': dt,
'inp': ts0_str,
'out': ts1
})
sql = "SELECT "
for d in data:
sql += "'{inp}'::{dt}({scale}), ".format(
inp=d['inp'],
dt=d['dt'],
scale=d['scale']
)
sql += "1"
with conn_cnx() as cnx:
cur = cnx.cursor()
cur.execute("""
ALTER SESSION SET TIMEZONE='{tz}';
""".format(tz=PST_TZ))
rec = cur.execute(sql).fetchone()
for idx, d in enumerate(data):
comp, lower, higher = _in_range(d['out'], rec[idx])
assert comp, 'data: {d}: target={target}, lower={lower}, higher={' \
'higher}'.format(
d=d, target=rec[idx], lower=lower, higher=higher)
def _in_range(reference, target):
lower = reference - timedelta(microseconds=1)
higher = reference + timedelta(microseconds=1)
return lower <= target <= higher, lower, higher
|
mayfield/snowflake-connector-python
|
test/test_converter_more_timestamp.py
|
Python
|
apache-2.0
| 4,975
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import multiprocessing
import psutil
import re
import socket
from nova import exception
class VixException(exception.NovaException):
pass
def get_host_memory_info():
mem_info = psutil.phymem_usage()
return (mem_info.total, mem_info.free)
def get_disk_info(path):
disk_info = psutil.disk_usage(path)
return (disk_info.total, disk_info.free)
def get_cpu_count():
return multiprocessing.cpu_count()
def get_free_port():
sock = socket.socket()
try:
sock.bind(('', 0))
return sock.getsockname()[1]
finally:
sock.close()
def remove_lines(file_name, pattern):
lines = []
found = False
with open(file_name, 'r') as f:
for s in f.readlines():
if re.match(pattern, s):
found = True
else:
lines.append(s)
if found:
with open(file_name, 'w') as f:
f.writelines(lines)
return found
def get_text(file_name, pattern):
with open(file_name, 'r') as f:
for s in f.readlines():
m = re.match(pattern, s)
if m:
return m.groups()
def replace_text(file_name, pattern, replacement):
lines = []
found = False
with open(file_name, 'r') as f:
for s in f.readlines():
if re.match(pattern, s):
found = True
new_s = re.sub(pattern, replacement, s)
else:
new_s = s
lines.append(new_s)
if found:
with open(file_name, 'w') as f:
f.writelines(lines)
return found
|
cloudbase/nova-vix-driver
|
vix/utils.py
|
Python
|
apache-2.0
| 2,278
|
import random
class Game(object):
firstCount = 0
secondCount = 0
playerNumber = 0
def play(self):
currentCount = random.choice(range(1,7))
if (self.playerNumber == 0):
self.firstCount += currentCount
self.playerNumber = 1
if self.firstCount == self.secondCount:
return 1
if self.firstCount >= 30:
return 2
else:
self.secondCount += currentCount
self.playerNumber = 0
if self.secondCount == self.firstCount:
return 3
if self.secondCount >= 30:
return 4
return 0
def playGame():
game = Game()
while(True):
res = game.play()
if res == 2 or res == 4:
return 1
elif res == 1 or res == 3:
return 0
s = 0
for i in range(1000):
s += playGame()
print s/1000.0
|
Imperat/SSU-Courses
|
ssu-modeling/chapter2/subchapter1/game.py
|
Python
|
apache-2.0
| 925
|
#/bin/python/env
import sys
sys.path.append("..")
import elasticd
import os
#elasticd.startup()
elasticd.startup(os.path.dirname(os.path.realpath(__file__)) + '/../conf/settings.cfg')
|
bryantrobbins/Elasticd
|
bin/run.py
|
Python
|
apache-2.0
| 185
|
#!/usr/bin/env python
# -*- coding: us-ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
"""OpenROAD app server read-only vosa (nee vasa) experiment
either under Windows or cross platform with Java.
Based on code in OpenROAD 4.1 vasa_apps.asp
Windows/DCOM requires CPython Win32 Extensions to access DCOM, available
from http://sourceforge.net/projects/pywin32/
For Windows later than Windows XP also see
http://technet.microsoft.com/en-us/library/cc738214%28v=ws.10%29.aspx
short summary add remote windows users to "Distributed COM Users"
Java requires Jython.
Running
=======
Either have an app server configured locally that can be used
or setup OpenROAD client and setup Operating System evironment
variable to point to server, e.g.:
Windows
set TEST_ORSERVER=app_server_hostname
Unix
TEST_ORSERVER=app_server_hostname
export TEST_ORSERVER
If TEST_ORSERVER is not set and no command line argument is given, localhost is assumed.
"""
import os
import sys
from pprint import pprint
import orserver
APPSERVER_HOSTNAME = os.environ.get('TEST_ORSERVER') or 'localhost'
def doit(appserver_hostname=APPSERVER_HOSTNAME):
w4gl_image = 'ASA_ns'
connection_mode = None
connection_mode = ''
#connection_mode = 'unauthenticated'
#connection_mode = 'compressed'
#connection_mode = 'unauthenticated-compressed'
# declare OpenROAD RemoteServer and Helper objects
rso = orserver.or_connect(w4gl_image, appserver_hostname, connection_mode=connection_mode)
aso = orserver.get_aso_and_attach_rso(rso)
func_sig = 'b_arr_UCAkaDetail=UCARRAY; b_arr_UCAkaDetail.i_aka_detail_id=INTEGER; b_arr_UCAkaDetail.i_asolib=INTEGER; b_arr_UCAkaDetail.i_servertype=INTEGER; b_arr_UCAkaDetail.v_aka_name=STRING; b_arr_UCAkaDetail.v_cmdflags=STRING; b_arr_UCAkaDetail.v_imagefile=STRING; b_arr_UCAkaDetail.v_serverlocation=STRING; b_UCSPOConfig=USERCLASS; b_UCSPOConfig.i_MaxDispatchers=INTEGER; b_UCSPOConfig.i_MaxTotalSlaves=INTEGER; b_UCSPOConfig.i_PrfMonInterval=INTEGER; b_UCSPOConfig.i_PrfMonLevel=INTEGER; b_UCSPOConfig.i_PurgeInterval=INTEGER; b_UCSPOConfig.i_TraceFileAppend=INTEGER; b_UCSPOConfig.i_TraceInterval=INTEGER; b_UCSPOConfig.i_TraceLevel=INTEGER; b_UCSPOConfig.v_TraceFileName=STRING'
result = orserver.callproc(aso, 'GetAllNameServerData', func_sig=func_sig)
print result
print ''
pprint(result)
rso.disconnect()
def main(argv=None):
if argv is None:
argv = sys.argv
try:
hostname = argv[1]
doit(hostname)
except IndexError:
doit()
return 0
if __name__ == "__main__":
sys.exit(main())
|
clach04/pyopenroad
|
pyvosa.py
|
Python
|
apache-2.0
| 2,639
|
import uuid, sys, time, re
import Bio.Structure
from Bio.Range import GenomicRange
from subprocess import Popen, PIPE
# This whole format is a subclass of the Transcript subclass
class GPD(Bio.Structure.Transcript):
def __init__(self,gpd_line):
# Only store the line and ID at first.
self._line = gpd_line.rstrip()
self._id = str(uuid.uuid4())
m = re.match('[^\t]+\t[^\t]+\t([^\t]+)\t[^\t]+\t([^\t]+)\t([^\t]+)',gpd_line)
self._range = GenomicRange(m.group(1),int(m.group(2))+1,int(m.group(3)))
self._initialized = False
# Most of GPD has not been set yet. Each method accessing GPD
# will need to check to see if initialize has been run
def _initialize(self): # Wait to initialize to speed up streaming
if self._initialized: return # nothing to do if its done
self._initialized = True
self._entry = _line_to_entry(self._line)
self._exons = []
self._junctions = []
self._payload = []
self._direction = self.value('strand')
self._gene_name = self.value('gene_name')
self._transcript_name = self.value('name')
self._name = None
for i in range(0,self.value('exonCount')):
ex = Bio.Structure.Exon(GenomicRange(self.value('chrom'),self.value('exonStarts')[i]+1,self.value('exonEnds')[i]))
self._exons.append(ex)
if self.value('exonCount') > 1:
for i in range(0,self.value('exonCount')-1):
l = GenomicRange(self.value('chrom'),self.value('exonEnds')[i],self.value('exonEnds')[i])
r = GenomicRange(self.value('chrom'),self.value('exonStarts')[i+1]+1,self.value('exonStarts')[i+1]+1)
junc = Bio.Structure.Junction(l,r)
junc.set_exon_left(self._exons[i])
junc.set_exon_right(self._exons[i+1])
self._junctions.append(junc)
self._sequence = None
@property
def junctions(self):
self._initialize()
return self._junctions
@property
def exons(self):
self._initialize()
return self._exons
# override, we are garunteed to have the range since we initialize on reading a line
def get_range(self):
return self._range
def __str__(self):
return self.get_gpd_line()
#output the original gpd line
# Overrides Structure.Transcript
def get_gpd_line(self):
return self._line
def get_line(self):
return self._line
def value(self,key):
self._initialize()
return self._entry[key]
def _line_to_entry(line):
f = line.rstrip().split("\t")
d = {}
d['gene_name'] = f[0]
d['name'] = f[1]
d['chrom'] = f[2]
d['strand'] = f[3]
d['txStart'] = int(f[4])
d['txEnd'] = int(f[5])
d['cdsStart'] = int(f[6])
d['cdsEnd'] = int(f[7])
d['exonCount'] = int(f[8])
exonstarts = [int(x) for x in f[9].rstrip(",").split(",")]
d['exonStarts'] = exonstarts
exonends = [int(x) for x in f[10].rstrip(",").split(",")]
d['exonEnds'] = exonends
return d
class GPDStream:
def __init__(self,fh):
self.fh = fh
def read_entry(self):
ln = self.fh.readline()
if not ln: return False
gpd = GPD(ln)
return gpd
def __iter__(self):
return self
def next(self):
r = self.read_entry()
if not r:
raise StopIteration
else:
return r
class SortedOutputFile:
def __init__(self,filename,type='location',tempdir=None):
if type not in ['location','name']:
sys.stderr.write("ERROR: must be type location or name\n")
sys.exit()
self._gz = False
self._fh = open(filename,'w')
self._sh = None
if filename[-3:] == '.gz':
self._gz = True
self._pipes = []
scmd = "sort -k1,1 -k2,2"
if type == 'location':
scmd = "sort -k3,3 -k5,5n -k6,6n -k4,4"
if tempdir: scmd += " -T "+tempdir.rstrip('/')+'/'
if self._gz:
cmd1 = "gzip"
p1 = Popen(cmd1.split(),stdout=self._fh,stdin=PIPE,close_fds=True)
p2 = Popen(scmd.split(),stdout=p1.stdin,stdin=PIPE,close_fds=True)
self._sh = p2.stdin
self._pipes = [p2,p1]
else:
p = Popen(scmd.split(),stdout=self._fh,stdin=PIPE)
self._sh = p.stdin
self._pipes = [p]
def write(self,value):
self._sh.write(value)
def close(self):
#self._sh.flush()
#self._sh.close()
for p in self._pipes:
#p.stdin.flush()
#p.stdin.close()
p.communicate()
#self._pipes[0].stdin.flush()
#self._pipes[0].stdin.close()
#self._pipes[1].stdin.flush()
#self._pipes[1].stdin.close()
self._fh.close()
|
jason-weirather/Au-public
|
iron/pythonlib/Bio/Format/GPD.py
|
Python
|
apache-2.0
| 4,415
|
#!/usr/bin/python
import sys
from os.path import join,exists,dirname
import random
import numpy as np
from numpy.random import randint, choice
from sklearn.datasets import load_svmlight_file
from torch.autograd import Function, Variable
import torch.nn as nn
import torch.optim as optim
import torch
from torch import FloatTensor
from uda_common import read_feature_groups, read_feature_lookup
# the concepts here come from: https://github.com/fungtion/DANN/blob/master/models/model.py
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
# Totally random:
# output = Variable(torch.randn(grad_output.shape).cuda()) + grad_output * 0 # grad_output.neg() * ctx.alpha
# zero (ignores domain)
# output = 0 * grad_output
# reversed (default)
output = grad_output.neg() * ctx.alpha
# print("Input grad is %s, output grad is %s" % (grad_output.data.cpu().numpy()[:10], output.data.cpu().numpy()[:10]))
return output, None
# Instead of this, may be able to just regularize by forcing off-diagonal to zero
# didn't work bc/ of memory issues
class StraightThroughLayer(nn.Module):
def __init__(self, input_features):
super(StraightThroughLayer, self).__init__()
self.vector = nn.Parameter( torch.randn(1, input_features) )
#self.add_module('pass-through vector', self.vector)
def forward(self, input_data):
# output = input_data * self.vector
output = torch.mul(input_data, self.vector)
return output
class PivotLearnerModel(nn.Module):
def __init__(self, input_features):
super(PivotLearnerModel, self).__init__()
# Feature takes you from input to the "representation"
# self.feature = nn.Sequential()
# straight through layer just does an element-wise product with a weight vector
num_features = input_features
# num_features = 200
# self.vector = nn.Parameter( torch.randn(1, input_features) )
self.feature = nn.Sequential()
self.feature.add_module('input_layer', StraightThroughLayer(input_features))
# self.feature.add_module('feature_layer', nn.Linear(input_features, num_features))
self.feature.add_module('relu', nn.ReLU(True))
# Standard feed forward layer:
# num_features = 200
# self.feature.add_module('input_layer', nn.Linear(input_features, num_features))
# self.feature.add_module('relu', nn.ReLU(True))
# task_classifier maps from a feature representation to a task prediction
self.task_classifier = nn.Sequential()
self.task_classifier.add_module('task_binary', nn.Linear(num_features, 1))
self.task_classifier.add_module('task_sigmoid', nn.Sigmoid())
# domain classifier maps from a feature representation to a domain prediction
self.domain_classifier = nn.Sequential()
# hidden_nodes = 100
# self.domain_classifier.add_module('domain_hidden', nn.Linear(num_features, hidden_nodes, bias=False))
# self.domain_classifier.add_module('relu', nn.ReLU(True))
self.domain_classifier.add_module('domain_classifier', nn.Linear(num_features, 1, bias=False))
# # self.domain_classifier.add_module('domain_predict', nn.Linear(100, 1))
self.domain_classifier.add_module('domain_sigmoid', nn.Sigmoid())
# self.domain_classifier2 = nn.Sequential()
# self.domain_classifier2.add_module('domain_linear', nn.Linear(num_features, 1, bias=False))
# # # self.domain_classifier.add_module('domain_predict', nn.Linear(100, 1))
# self.domain_classifier2.add_module('domain_sigmoid', nn.Sigmoid())
def forward(self, input_data, alpha):
feature = self.feature(input_data)
# feature = input_data * self.vector
task_prediction = self.task_classifier(feature)
# Get domain prediction
reverse_feature = ReverseLayerF.apply(feature, alpha)
domain_prediction = self.domain_classifier(reverse_feature)
# Only domain predictor 1 is reversed
# domain_prediction2 = self.domain_classifier2(feature)
return task_prediction, domain_prediction #(domain_prediction, domain_prediction2)
def main(args):
if len(args) < 1:
sys.stderr.write("Required arguments: <data file> [backward True|False]\n")
sys.exit(-1)
device = 'cpu'
if torch.cuda.is_available():
device = 'cuda'
if len(args) > 1:
backward = bool(args[1])
print("Direction is backward based on args=%s" % (args[1]))
else:
backward = False
print("Direction is forward by default")
# Read the data:
goal_ind = 2
domain_weight = 1.0
reg_weight = 0.1
lr = 0.01
epochs = 1000
batch_size = 50
sys.stderr.write("Reading source data from %s\n" % (args[0]))
all_X, all_y = load_svmlight_file(args[0])
# y is 1,2 by default, map to 0,1 for sigmoid training
all_y -= 1 # 0/1
# continue to -1/1 for softmargin training:
# all_y *= 2 # 0/2
# all_y -= 1 # -1/1
num_instances, num_feats = all_X.shape
domain_map = read_feature_groups(join(dirname(args[0]), 'reduced-feature-groups.txt'))
domain_inds = domain_map['Domain']
feature_map = read_feature_lookup(join(dirname(args[0]), 'reduced-features-lookup.txt'))
direction = 1 if backward else 0
sys.stderr.write("using domain %s as source, %s as target\n" %
(feature_map[domain_inds[direction]],feature_map[domain_inds[1-direction]]))
source_instance_inds = np.where(all_X[:,domain_inds[direction]].toarray() > 0)[0]
X_source = all_X[source_instance_inds,:]
X_source[:, domain_inds[direction]] = 0
X_source[:, domain_inds[1-direction]] = 0
y_source = all_y[source_instance_inds]
num_source_instances = X_source.shape[0]
num_train_instances = int(X_source.shape[0] * 0.8)
X_task_train = X_source[:num_train_instances,:]
y_task_train = y_source[:num_train_instances]
X_task_valid = X_source[num_train_instances:, :]
y_task_valid = y_source[num_train_instances:]
target_instance_inds = np.where(all_X[:,domain_inds[1-direction]].toarray() > 0)[0]
X_target = all_X[target_instance_inds,:]
X_target[:, domain_inds[direction]] = 0
X_target[:, domain_inds[1-direction]] = 0
num_target_train = int(X_target.shape[0] * 0.8)
X_target_train = X_target[:num_target_train,:]
# y_target_train = y_target[:num_target_train]
X_target_valid = X_target[num_target_train:, :]
# y_target_dev = y_target[num_target_train:]
# y_test = all_y[target_instance_inds]
num_target_instances = X_target_train.shape[0]
model = PivotLearnerModel(num_feats).to(device)
task_loss_fn = nn.BCELoss()
domain_loss_fn = nn.BCELoss()
l1_loss = nn.L1Loss()
#task_loss_fn.cuda()
# domain_loss_fn.cuda()
# l1_loss.cuda()
optimizer = optim.Adam(model.parameters())
# optimizer = optim.SGD(model.parameters(), lr=lr)
# weights = model.vector
try:
weights = model.feature.input_layer.vector
print("Before training:")
print("Min (abs) weight: %f" % (torch.abs(weights).min()))
print("Max (abs) weight: %f" % (torch.abs(weights).max()))
print("Ave weight: %f" % (torch.abs(weights).mean()))
num_zeros = (weights.data==0).sum()
near_zeros = (torch.abs(weights.data)<0.000001).sum()
print("Zeros=%d, near-zeros=%d" % (num_zeros, near_zeros))
except:
pass
# Main training loop
inds = np.arange(num_train_instances)
for epoch in range(epochs):
epoch_loss = 0
model.train()
# Do a training epoch:
for batch in range( 1+ ( num_train_instances // batch_size ) ):
model.zero_grad()
start_ind = batch * batch_size
if start_ind >= num_train_instances:
#This happens if our number of instances is perfectly divisible by batch size (when batch_size=1 this is often).
break
end_ind = num_train_instances if start_ind + batch_size >= num_train_instances else start_ind+batch_size
this_batch_size = end_ind - start_ind
## Gradually increase (?) the importance of the regularization term
ave_ind = start_ind + this_batch_size // 2
p = float(ave_ind + epoch * num_train_instances*2) / (epochs * num_train_instances*2)
alpha = 2. / (1. + np.exp(-10 * p)) - 1
source_batch = FloatTensor(X_task_train[start_ind:end_ind,:].toarray()).to(device) # read input
source_task_labels = torch.unsqueeze(FloatTensor([y_task_train[start_ind:end_ind],]).to(device), 1)# read task labels
source_domain_labels = torch.zeros(this_batch_size,1, device=device) # set to 0
# Get the task loss and domain loss for the source instance:
task_out, task_domain_out = model.forward(source_batch, alpha)
task_loss = task_loss_fn(task_out, source_task_labels)
domain_loss = domain_loss_fn(task_domain_out, source_domain_labels)
# domain2_loss = domain_loss_fn(task_domain_out[1], source_domain_labels)
try:
weights = model.feature.input_layer.vector
reg_term = l1_loss(weights, torch.zeros_like(weights, device=device))
except:
reg_term = 0
# Randomly select a matching number of target instances:
target_inds = choice(num_target_instances, this_batch_size, replace=False)
target_batch = FloatTensor(X_target_train[target_inds,:].toarray()).to(device) # read input
target_domain_labels = torch.ones(this_batch_size, 1, device=device)
# Get the domain loss for the target instances:
_, target_domain_out = model.forward(target_batch, alpha)
target_domain_loss = domain_loss_fn(target_domain_out, target_domain_labels)
# target_domain2_loss = domain_loss_fn(target_domain_out[1], target_domain_labels)
# Get sum loss update weights:
# domain adaptation:
# total_loss = task_loss + domain_weight * (domain_loss + target_domain_loss)
# Task only:
# total_loss = task_loss
# Domain only:
# total_loss = domain_loss + target_domain_loss
# Debugging with 2 domain classifiers:
# total_loss = domain_loss + domain2_loss + target_domain_loss + target_domain2_loss
# With regularization and DA term:
total_loss = (task_loss +
domain_weight * (domain_loss + target_domain_loss) +
reg_weight * reg_term)
# With regularization only:
# total_loss = task_loss + reg_term
epoch_loss += total_loss
total_loss.backward()
# for param in model.named_parameters():
# print(param[0])
# print(param[1])
optimizer.step()
# At the end of every epoch, examine domain accuracy and how many non-zero parameters we have
# unique_source_inds = np.unique(selected_source_inds)
# all_source_inds = np.arange(num_train_instances)
# eval_source_inds = np.setdiff1d(all_source_inds, unique_source_inds)
# source_eval_X = X_train[eval_source_inds]
# source_eval_y = y_train[eval_source_inds]
source_eval_X = X_task_valid
source_eval_y = y_task_valid
source_task_out, source_domain_out = model.forward( FloatTensor(source_eval_X.toarray()).to(device), alpha=0.)
# If using BCEWithLogitsLoss which would automatically do a sigmoid post-process
# source_task_out = nn.functional.sigmoid(source_task_out)
# source_domain_out = nn.functional.sigmoid(source_domain_out)
# source domain is 0, count up predictions where 1 - prediction = 1
# If using sigmoid outputs (0/1) with BCELoss
source_domain_preds = np.round(source_domain_out.cpu().data.numpy())
# if using Softmargin() loss (-1/1) with -1 as source domain
# source_domain_preds = np.round(((source_domain_out.cpu().data.numpy() * -1) + 1) / 2)
source_predicted_count = np.sum(1 - source_domain_preds)
source_domain_acc = source_predicted_count / len(source_eval_y)
target_eval_X = X_target_valid
_, target_domain_out = model.forward( FloatTensor(target_eval_X.toarray()).to(device), alpha=0.)
# If ussing with BCEWithLogitsLoss (see above)
# target_domain_out = nn.functional.sigmoid(target_domain_out)
# if using sigmoid output (0/1) with BCELoss
target_domain_preds = np.round(target_domain_out.cpu().data.numpy())
# if using Softmargin loss (-1/1) with 1 as target domain:
# target_domain_preds = np.round(((source_domain_out.cpu().data.numpy()) + 1) / 2)
target_predicted_count = np.sum(target_domain_preds)
domain_acc = (source_predicted_count + target_predicted_count) / (source_eval_X.shape[0] + target_eval_X.shape[0])
# if using 0/1 predictions:
source_y_pred = np.round(source_task_out.cpu().data.numpy()[:,0])
# if using -1/1 predictions? (-1 = not negated, 1 = negated)
# source_y_pred = np.round((source_task_out.cpu().data.numpy()[:,0] + 1) / 2)
# source_eval_y += 1
# source_eval_y /= 2
# predictions of 1 are the positive class: tps are where prediction and gold are 1
tps = np.sum(source_y_pred * source_eval_y)
true_preds = source_y_pred.sum()
true_labels = source_eval_y.sum()
recall = tps / true_labels
prec = 1 if tps == 0 else tps / true_preds
f1 = 2 * recall * prec / (recall+prec)
try:
weights = model.feature.input_layer.vector
num_zeros = (weights.data==0).sum()
near_zeros = (torch.abs(weights.data)<0.000001).sum()
print("Min (abs) weight: %f" % (torch.abs(weights).min()))
print("Max (abs) weight: %f" % (torch.abs(weights).max()))
print("Ave weight: %f" % (torch.abs(weights).mean()))
except:
num_zeros = near_zeros = -1
print("[Source] Epoch %d: loss=%f\tzeros=%d\tnear_zeros=%d\tnum_insts=%d\tdom_acc=%f\tP=%f\tR=%f\tF=%f" % (epoch, epoch_loss, num_zeros, near_zeros, len(source_eval_y), domain_acc, prec, recall, f1))
weights = model.feature.input_layer.vector
ranked_inds = torch.sort(torch.abs(weights))[1]
pivots = ranked_inds[0,-1000:]
pivot_list = pivots.cpu().data.numpy().tolist()
# pivot_list.sort()
for pivot in pivot_list:
print('%d : %s' % (pivot, feature_map[pivot]))
if __name__ == '__main__':
main(sys.argv[1:])
|
tmills/uda
|
scripts/learn_pivots_gradient_reversal.py
|
Python
|
apache-2.0
| 15,061
|
import tensorflow as tf
import numpy as np
import GPflow
from GPflow import kernels
from GPflow.tf_wraps import eye
from GPflow._settings import settings
from GPflow.param import ParamList
float_type = settings.dtypes.float_type
np_float_type = np.float32 if float_type is tf.float32 else np.float64
class Kern(object):
"""
An object that added to multi-dimensional functionality to
GPflow.kernels.Kern.
This object is meant to be inherited along with GPflow.kernels.Kern in child
class.
The main difference of this kernel from GPflow.kernels.Stationary is that
this returns the multidimensional kernel values,
sized [X.shape[0],X2.shape[0],R].
The numpy equivalence is
np.vstack([v_0*core(X,X2), v_1*core(X,X2), ..., v_R*core(X,X2)])
This object provides efficient Cholesky Factorization method, self.Cholesky,
where the cholesky tensor is
np.vstack([sqrt(v_0)*chol, sqrt(v_1)*chol, ..., sqrt(v_R)*chol])
with
chol = Cholesky(K(X) + jitter)
"""
def __init__(self, output_dim):
"""
- input_dim is the dimension of the input to the kernel
- output_dim is the dimension of the output of this kernel
<-- This is an additional feature from GPflow.kernels.Stationary
- active_dims is a list of length input_dim which controls which
columns of X are used.
"""
# variance should be 1d-np.array sized [output_dim]
self.output_dim = output_dim
def K(self, X, X2=None):
core = tf.tile(tf.expand_dims(self._Kcore(X, X2),-1),
[1,1,tf.shape(self.variance)[0]]) # [N,N,R]
var = tf.tile(
tf.expand_dims(tf.expand_dims(self.variance, 0),0), # [1,1,R]
[tf.shape(core)[0],tf.shape(core)[1],1]) # [N,N,R]
return var * core
def Kdiag(self,X):
"""
Return: tf.tensor sized [N,R]
"""
return tf.tile(tf.expand_dims(self.variance,0), [tf.shape(X)[0],1])
def Cholesky(self, X):
core = self._Kcore(X, X2=None) + \
eye(tf.shape(X)[0]) * settings.numerics.jitter_level
chol = tf.cholesky(core)
var = tf.tile(tf.expand_dims(tf.expand_dims(
tf.sqrt(self.variance), 0),0),
[tf.shape(core)[0],tf.shape(core)[1],1])
return var * tf.tile(tf.expand_dims(chol, -1),[1,1,tf.shape(var)[2]])
def _Kcore(self, X, X2=None):
"""
Returns the unit kernel which is common for all the output dimensions.
"""
raise NotImplementedError
class Stationary(Kern, kernels.Stationary):
"""
Multidimensional version of Stationary kernel.
"""
def __init__(self, input_dim,
output_dim,
variance=None, lengthscales=None,
active_dims=None, ARD=False):
"""
- input_dim is the dimension of the input to the kernel
- output_dim is the dimension of the output of this kernel
<-- This is an additional feature from GPflow.kernels.Stationary
- variance : [1d-np.array] is the (initial) value for the variance parameter
with size output_dim.
- lengthscales is the initial value for the lengthscales parameter
defaults to 1.0 (ARD=False) or np.ones(input_dim) (ARD=True).
- active_dims is a list of length input_dim which controls which
columns of X are used.
- ARD specifies whether the kernel has one lengthscale per dimension
(ARD=True) or a single lengthscale (ARD=False).
"""
Kern.__init__(self, output_dim)
# variance should be 1d-np.array sized [output_dim]
if variance is None:
variance = np.ones(output_dim)
assert(variance.shape[0] == self.output_dim)
kernels.Stationary.__init__(self, input_dim, variance, lengthscales,
active_dims, ARD)
class RBF(Stationary):
"""
The radial basis function (RBF) or squared exponential kernel
"""
def _Kcore(self, X, X2=None):
X, X2 = self._slice(X, X2)
return tf.exp(-self.square_dist(X, X2)/2)
class RBF_csym(RBF):
"""
RBF kernel with a cylindrically symmetric assumption.
The kernel value is
K(x,x') = a exp(-(x+x)^2/2l^2)+a exp(-(x-x)^2/2l^2))
"""
def _Kcore(self, X, X2=None):
if X2 is None:
X2 = X
X = tf.abs(X)
X2= tf.abs(X2)
return RBF._Kcore(self, X, X2) + RBF._Kcore(self, X, -X2)
def Kdiag(self, X):
# returns [N] tensor
X, _ = self._slice(X, None)
X = tf.abs(X)
square_dist = tf.reduce_sum(tf.square((X+X)/self.lengthscales), 1)
# shape [N,R]
var = tf.tile(tf.expand_dims(self.variance,0), [tf.shape(X)[0],1])
diag = tf.exp(-0.5*square_dist)
diag = tf.tile(tf.expand_dims(tf.ones_like(diag)+diag, -1),
[1,tf.shape(var)[1]])
return var * diag
class RBF_casym(RBF):
"""
RBF kernel with a cylindrically anti-symmetric assumption.
The kernel value is
K(x,x') = a exp(-(x-x)^2/2l^2)) - a exp(-(x+x)^2/2l^2)
"""
def _Kcore(self, X, X2=None):
if X2 is None:
X2 = X
X = tf.abs(X)
X2= tf.abs(X2)
return RBF._Kcore(self, X, X2) - RBF._Kcore(self, X, -X2)
def Kdiag(self, X):
# returns [N] tensor
X, _ = self._slice(X, None)
X = tf.abs(X)
square_dist = tf.reduce_sum(tf.square((X+X)/self.lengthscales), 1)
# shape [N,R]
var = tf.tile(tf.expand_dims(self.variance,0), [tf.shape(X)[0],1])
diag = tf.exp(-0.5*square_dist)
diag = tf.tile(tf.expand_dims(tf.ones_like(diag)-diag, -1),
[1,tf.shape(var)[1]])
return var * diag
class Stack(Kern, kernels.Kern):
"""
Kernel object that returns multiple kinds of kernel values, stacked
vertically.
Input for the initializer is a list of Kernel object, [k_1,k_2,...,k_M].
The function call returns [k_1(X,X2),k_2(X,X2),...,k_M(X,X2)].
The size of the return is n x n2 x (sum_i k_i.output_dim).
"""
def __init__(self, list_of_kerns):
"""
:param list list_of_kerns: A list of Kernel object.
"""
output_dim = 0
for k in list_of_kerns:
# assert k is Kernel object
assert(isinstance(k, Kern))
output_dim += k.output_dim
Kern.__init__(self, output_dim)
kernels.Kern.__init__(self, input_dim=None)
# kernels are stored as ParamList
self.kern_list = ParamList(list_of_kerns)
def K(self, X, X2=None):
return tf.concat(2, [k.K(X,X2) for k in self.kern_list])
def Kdiag(self,X):
return tf.concat(1, [k.Kdiag(X) for k in self.kern_list])
def Cholesky(self, X):
return tf.concat(2, [k.Cholesky(X) for k in self.kern_list])
|
fujii-team/GPinv
|
GPinv/kernels.py
|
Python
|
apache-2.0
| 7,016
|
#!/usr/bin/env python
# vim:set nospell:
from LogRecord import LogRecord
from struct import unpack
from sys import argv
from uuid import UUID
SECTOR_SIZE = 512
# 0x0 __be32 h_magic jbd2 magic number, 0xC03B3998.
# 0x4 __be32 h_blocktype Description of what this block contains. One of:
# 1 Descriptor. This block precedes a series of data blocks that were
# written through the journal during a transaction.
# 2 Block commit record. This block signifies the completion of a
# transaction.
# 3 Journal superblock, v1.
# 4 Journal superblock, v2.
# 5 Block revocation records. This speeds up recovery by enabling the
# journal to skip writing blocks that were subsequently rewritten.
# 0x8 __be32 h_sequence The transaction ID that goes with this block.
class JBD2BlockHeader(object):
MAGIC = 0xC03B3998
BLOCKTYPE = { 0x1: 'Descriptor',
0x2: 'Commit',
0x3: 'Superblockv1',
0x4: 'Superblockv2',
0x5: 'Revocation'
}
def __init__(self, data):
self.h_magic, \
self.h_blocktype, \
self.h_sequence = unpack('>III', data)
def __str__(self):
retstr = '{ .h_magic = 0x%x, \n'
retstr += ' .h_blocktype = %s\n'
retstr += ' .h_sequence = 0x%x }'
return retstr % (self.h_magic,
JBD2BlockHeader.BLOCKTYPE[self.h_blocktype],
self.h_sequence)
# 0x0 journal_header_t (12 bytes) s_header Common header identifying this as a superblock.
# Static information describing the journal.
# 0xC __be32 s_blocksize Journal device block size.
# 0x10 __be32 s_maxlen Total number of blocks in this journal.
# 0x14 __be32 s_first First block of log information.
# Dynamic information describing the current state of the log.
# 0x18 __be32 s_sequence First commit ID expected in log.
# 0x1C __be32 s_start Block number of the start of log. Contrary to the comments, this field being zero does not imply that the journal is clean!
# 0x20 __be32 s_errno Error value, as set by jbd2_journal_abort().
# The remaining fields are only valid in a version 2 superblock.
# 0x24 __be32 s_feature_compat; Compatible feature set. Any of:
# 0x1 Journal maintains checksums on the data blocks. (JBD2_FEATURE_COMPAT_CHECKSUM)
# 0x28 __be32 s_feature_incompat Incompatible feature set. Any of:
# 0x1 Journal has block revocation records. (JBD2_FEATURE_INCOMPAT_REVOKE)
# 0x2 Journal can deal with 64-bit block numbers. (JBD2_FEATURE_INCOMPAT_64BIT)
# 0x4 Journal commits asynchronously. (JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)
# 0x8 This journal uses v2 of the checksum on-disk format. Each journal metadata block gets its own checksum, and the block tags in the descriptor table contain checksums for each of the data blocks in the journal. (JBD2_FEATURE_INCOMPAT_CSUM_V2)
# 0x10 This journal uses v3 of the checksum on-disk format. This is the same as v2, but the journal block tag size is fixed regardless of the size of block numbers. (JBD2_FEATURE_INCOMPAT_CSUM_V3)
# 0x2C __be32 s_feature_ro_compat Read-only compatible feature set. There aren't any of these currently.
# 0x30 __u8 s_uuid[16] 128-bit uuid for journal. This is compared against the copy in the ext4 super block at mount time.
# 0x40 __be32 s_nr_users Number of file systems sharing this journal.
# 0x44 __be32 s_dynsuper Location of dynamic super block copy. (Not used?)
# 0x48 __be32 s_max_transaction Limit of journal blocks per transaction. (Not used?)
# 0x4C __be32 s_max_trans_data Limit of data blocks per transaction. (Not used?)
# 0x50 __u8 s_checksum_type Checksum algorithm used for the journal. 1 = crc32, 2 = md5, 3 = sha1, 4 = crc32c. 1 or 4 are the most likely choices.
# 0x51 __u8[3] s_padding2
# 0x54 __u32 s_padding[42]
# 0xFC __be32 s_checksum Checksum of the entire superblock, with this field set to zero.
# 0x100 __u8 s_users[16*48] ids of all file systems sharing the log. e2fsprogs/Linux don't allow shared external journals, but I imagine Lustre (or ocfs2?), which use the jbd2 code, might.
class JBD2SuperBlock(object):
JBD2_FEATURE_COMPAT_CHECKSUM = 0x1
JBD2_FEATURE_INCOMPAT_REVOKE = 0x1
JBD2_FEATURE_INCOMPAT_64BIT = 0x2
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT = 0x4
JBD2_FEATURE_INCOMPAT_CSUM_V2 = 0x8
JBD2_FEATURE_INCOMPAT_CSUM_V3 = 0x10
CHECKSUM = { 1: 'crc32',
2: 'md5',
3: 'sha1',
4: 'crc32c'
}
def __init__(self, data):
self.s_blocksize, \
self.s_maxlen, \
self.s_first, \
self.s_sequence, \
self.s_start, \
self.s_errno, \
self.s_feature_compat, \
self.s_feature_incompat, \
self.s_feature_ro_compat, \
self.s_uuid, \
self.s_nr_users, \
self.s_dynsuper, \
self.s_max_transaction, \
self.s_max_trans_data, \
self.s_checksum_type, \
self.s_padding2, \
self.s_padding, \
self.s_checksum, \
self.s_users = \
unpack('>9I16s4IB3s168sI768s', data[:1012])
def __str__(self):
retstr = '-- JBD2 Superblock --\n'
retstr += '\ts_blocksize\t\t=\t%d\n' % (self.s_blocksize)
retstr += '\ts_maxlen\t\t=\t%d (%d MiB)\n' % (self.s_maxlen,
self.s_blocksize *
self.s_maxlen /
1024 ** 2)
retstr += '\ts_feature_compat\t=\t0x%0.8x\n' % (self.s_feature_compat)
if self.s_feature_compat & \
JBD2SuperBlock.JBD2_FEATURE_COMPAT_CHECKSUM:
retstr += '\tJBD2_FEATURE_COMPAT_CHECKSUM is set.\n'
retstr += '\ts_feature_incompat\t=\t0x%0.8x\n' % \
(self.s_feature_incompat)
if self.s_feature_incompat & \
JBD2SuperBlock.JBD2_FEATURE_INCOMPAT_REVOKE:
retstr += '\tJBD2_FEATURE_INCOMPAT_REVOCATION is set.\n'
if self.s_feature_incompat & \
JBD2SuperBlock.JBD2_FEATURE_INCOMPAT_64BIT:
retstr += '\tJBD2_FEATURE_INCOMPAT_64BIT is set.\n'
if self.s_feature_incompat & \
JBD2SuperBlock.JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT:
retstr += '\tJBD2_FEATURE_INCOMPAT_ASYNC_COMMIT is set.\n'
if self.s_feature_incompat & \
JBD2SuperBlock.JBD2_FEATURE_INCOMPAT_CSUM_V2:
retstr += '\tJBD2_FEATURE_COMPAT_CSUM_V2 is set.\n'
if self.s_feature_incompat & \
JBD2SuperBlock.JBD2_FEATURE_INCOMPAT_CSUM_V3:
retstr += '\tJBD2_FEATURE_COMPAT_CSUM_V3 is set.\n'
retstr += '\tself.s_uuid\t\t=\t%s\n' % UUID(bytes=self.s_uuid)
retstr += '\tself.s_nr_users\t\t=\t%d\n' % (self.s_nr_users)
retstr += '\tself.s_max_transaction\t=\t%d\n' % \
(self.s_max_transaction)
retstr += '\tself.s_max_trans_data\t=\t%d\n' % \
(self.s_max_trans_data)
if self.s_checksum_type != 0x0:
retstr += '\tself.s_checksum_type\t=\t%s\n' % \
(JBD2SuperBlock.CHECKSUM[self.s_checksum_type])
for i in xrange(self.s_nr_users):
retstr += '\tself.users[%d]\t\t=\t%s\n' % \
(i, UUID(bytes=self.s_users[16*i:16+16*i]))
retstr += '-- End JBD2 Superblock --\n'
return retstr
class JBD2RevocationBlock(object):
pass
# 0x0 journal_header_s (open coded) Common block header.
# 0xC unsigned char h_chksum_type The type of checksum to use to verify the integrity of the data blocks in the transaction. One of:
# 1 CRC32
# 2 MD5
# 3 SHA1
# 4 CRC32C
# 0xD unsigned char h_chksum_size The number of bytes used by the checksum. Most likely 4.
# 0xE unsigned char h_padding[2]
# 0x10 __be32 h_chksum[JBD2_CHECKSUM_BYTES] 32 bytes of space to store checksums. If JBD2_FEATURE_INCOMPAT_CSUM_V2 or JBD2_FEATURE_INCOMPAT_CSUM_V3 are set, the first __be32 is the checksum of the journal UUID and the entire commit block, with this field zeroed. If JBD2_FEATURE_COMPAT_CHECKSUM is set, the first __be32 is the crc32 of all the blocks already written to the transaction.
# 0x30 __be64 h_commit_sec The time that the transaction was committed, in seconds since the epoch.
# 0x38 __be32 h_commit_nsec Nanoseconds component of the above timestamp.
class JBD2CommitBlock(object):
def __init__(self, data):
self.h_chksum_type, \
self.h_chksum_size, \
self.h_padding, \
self.h_chksum, \
self.h_commit_sec, \
self.h_commit_nsec = \
unpack('>BB2s32sQI', data[:48])
def __str__(self):
retstr = '-- JBD2CommitBlock --\n'
retstr += '\th_chksum_type\t=\t%d\n' % self.h_chksum_type
retstr += '\th_chksum_size\t=\t%d\n' % self.h_chksum_size
retstr += '\th_chksum\t=\t%r\n' % self.h_chksum
retstr += '\th_commit_sec\t=\t%d\n' % self.h_commit_sec
retstr += '\th_commit_nsec\t=\t%d\n' % self.h_commit_nsec
return retstr
# 0x0 journal_header_t (open coded) Common block header.
# 0xC struct journal_block_tag_s open coded array[] Enough tags either to fill up the block or to describe all the data blocks that follow this descriptor block.
# Journal block tags have any of the following formats, depending on which journal feature and block tag flags are set.
# If JBD2_FEATURE_INCOMPAT_CSUM_V3 is set, the journal block tag is defined as struct journal_block_tag3_s, which looks like the following. The size is 16 or 32 bytes.
# Offset Type Name Descriptor
# 0x0 __be32 t_blocknr Lower 32-bits of the location of where the corresponding data block should end up on disk.
# 0x4 __be32 t_flags Flags that go with the descriptor. Any of:
# 0x1 On-disk block is escaped. The first four bytes of the data block just happened to match the jbd2 magic number.
# 0x2 This block has the same UUID as previous, therefore the UUID field is omitted.
# 0x4 The data block was deleted by the transaction. (Not used?)
# 0x8 This is the last tag in this descriptor block.
# 0x8 __be32 t_blocknr_high Upper 32-bits of the location of where the corresponding data block should end up on disk. This is zero if JBD2_FEATURE_INCOMPAT_64BIT is not enabled.
# 0xC __be32 t_checksum Checksum of the journal UUID, the sequence number, and the data block.
# This field appears to be open coded. It always comes at the end of the tag, after t_checksum. This field is not present if the "same UUID" flag is set.
# 0x8 or 0xC char uuid[16] A UUID to go with this tag. This field appears to be copied from the j_uuid field in struct journal_s, but only tune2fs touches that field.
# If JBD2_FEATURE_INCOMPAT_CSUM_V3 is NOT set, the journal block tag is defined as struct journal_block_tag_s, which looks like the following. The size is 8, 12, 24, or 28 bytes:
# Offset Type Name Descriptor
# 0x0 __be32 t_blocknr Lower 32-bits of the location of where the corresponding data block should end up on disk.
# 0x4 __be16 t_checksum Checksum of the journal UUID, the sequence number, and the data block. Note that only the lower 16 bits are stored.
# 0x6 __be16 t_flags Flags that go with the descriptor. Any of:
# 0x1 On-disk block is escaped. The first four bytes of the data block just happened to match the jbd2 magic number.
# 0x2 This block has the same UUID as previous, therefore the UUID field is omitted.
# 0x4 The data block was deleted by the transaction. (Not used?)
# 0x8 This is the last tag in this descriptor block.
# This next field is only present if the super block indicates support for 64-bit block numbers.
# 0x8 __be32 t_blocknr_high Upper 32-bits of the location of where the corresponding data block should end up on disk.
# This field appears to be open coded. It always comes at the end of the tag, after t_flags or t_blocknr_high. This field is not present if the "same UUID" flag is set.
# 0x8 or 0xC char uuid[16] A UUID to go with this tag. This field appears to be copied from the j_uuid field in struct journal_s, but only tune2fs touches that field.
# If JBD2_FEATURE_INCOMPAT_CSUM_V2 or JBD2_FEATURE_INCOMPAT_CSUM_V3 are set, the end of the block is a struct jbd2_journal_block_tail, which looks like this:
# Offset Type Name Descriptor
# 0x0 __be32 t_checksum Checksum of the journal UUID + the descriptor block, with this field set to zero.
class JBD2DescriptorBlock(object):
def __init__(self, data):
self.journal_block_tag_s = \
[tag for tag in JBD2DescriptorBlock.ReadBlockTags(data)]
@staticmethod
def ReadBlockTags(data):
pos = 0
tag = None
while pos < len(data) and (tag is None or not tag.t_flags & 0x8):
tag = JBD2BlockTag(data[pos:])
pos += tag.size
yield tag
def tagGenerator(self):
for tag in self.journal_block_tag_s:
yield tag
def __str__(self):
retstr = '-- JBD2 Descriptor Block --\n'
for tag in self.journal_block_tag_s:
retstr += str(tag)
return retstr
class JBD2BlockTag(object):
def __init__(self, data):
self.t_blocknr, \
self.t_checksum, \
self.t_flags = unpack('>IHH', data[0:8])
self.t_uuid = None
self.size = 8
if not self.t_flags & 0x2:
self.t_uuid = UUID(bytes=unpack('>16s', data[8:24])[0])
self.size = 24
def __str__(self):
retstr = '\t-- JBD2 Tag --\n'
retstr += '\t\tt_blocknr\t=\t%d\n' % self.t_blocknr
retstr += '\t\tt_checksum\t=\t%d\n' % self.t_checksum
retstr += '\t\tt_flags\t\t=\t0x%0.8x\n' % self.t_flags
if self.t_uuid is not None:
retstr += '\t\tt_uuid\t=\t%s\n' % self.t_uuid
return retstr
if __name__ == '__main__':
fname = argv[1]
with open(fname, 'rb') as f:
prevts = 0
current_tags = None
superblock = None
for log in LogRecord.LogRecordGenerator(f.read()):
if log.type == 'data':
print log
hdr = JBD2BlockHeader(log.write[:12])
if hdr.h_magic == JBD2BlockHeader.MAGIC:
print hdr
data = log.write[12:]
if hdr.h_blocktype == 0x1:
print '-- Descriptor Block --'
descriptor = JBD2DescriptorBlock(data)
current_tags = descriptor.tagGenerator()
print descriptor
elif hdr.h_blocktype == 0x2:
print '-- Commit Block --'
commit = JBD2CommitBlock(data)
try:
current_tags.next()
raise Exception('Did not process all tags!')
except StopIteration:
print '\tFinished Processing all tags.'
print commit
elif hdr.h_blocktype == 0x3:
print '-- Superblock v1 --'
elif hdr.h_blocktype == 0x4:
print '-- Superblock v2 --'
superblock = JBD2SuperBlock(data)
print superblock
elif hdr.h_blocktype == 0x5:
print '-- Revocation Block --'
exit()
else:
raise Exception('Unknown JBD2 Block Type.')
else:
tag = current_tags.next()
if tag.t_flags & 0x1: data[0:4] = (0xc0, 0x3b, 0x39, 0x98)
sector = tag.t_blocknr
sector *= superblock.s_blocksize
sector /= SECTOR_SIZE
print 'Data Write to Sector: %d\n' % (sector)
if prevts == 0: prevts = int(log.timestamp)
print int(log.timestamp) - prevts
prevts = int(log.timestamp)
else:
print log # metadata
|
cmusatyalab/gammaray
|
src/gray-inferencer/ext4/journal-parser.py
|
Python
|
apache-2.0
| 16,651
|