code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# to calculate metrics, perform factor analysis, and generate visualizations.
import sys
sys.path.append("../")
import bandicoot as bc
import csv
import glob
import os
#Metrics, FA, Tensor, Visualization
def metrics():
records_path = 'records/'
antenna_file = 'antennas.csv'
indicators = []
for f in glob.glob(records_path + '*.csv'):
user_id = os.path.basename(f)[:-4]
try:
B = bc.read_csv(user_id, records_path, antenna_file, describe=False)
metrics_dict = bc.utils.all(B)
except Exception as e:
metrics_dict = {'name': user_id, 'error': True}
indicators.append(metrics_dict)
bc.io.to_csv(indicators, 'bandicoot_indicators_full.csv')
#def FA():
#def tensor():
#def visualizations():
| shreya2111/PersonalityPrediction | src/analysis.py | Python | mit | 726 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-24 00:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('kiffel', '0001_initial'),
]
operations = [
#migrations.CreateModel(
# name='KDVUserBarcode',
# fields=[
# ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
# ('code', models.CharField(blank=True, max_length=255, null=True, verbose_name='Barcode')),
# ('identifiable_type', models.CharField(default='User', max_length=255)),
# ('created_at', models.DateTimeField(auto_now_add=True, null=True)),
# ('updated_at', models.DateTimeField(auto_now=True, null=True)),
# ],
# options={
# 'db_table': 'kdv_user_identifiers',
# },
#),
migrations.RemoveField(
model_name='person',
name='kdv_id',
),
migrations.AddField(
model_name='person',
name='kdv_balance',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='person',
name='nickname',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Nickname'),
),
#migrations.AddField(
# model_name='kdvuserbarcode',
# name='identifiable',
# field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
#),
]
| d120/kifplan | kiffel/migrations/0002_auto_20160424_0237.py | Python | agpl-3.0 | 1,734 |
#!/usr/bin/python
#
# Copyright (C) 2006-2014 Wyplay, All Rights Reserved.
# This file is part of xintegtools.
#
# xintegtools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# xintegtools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see file COPYING.
# If not, see <http://www.gnu.org/licenses/>.
#
#
from unittest import TestCase, main
from mock import patch, mock_open
from xintegtools.xbump.target_ebuild import TargetEbuildContent
class TargetEbuildContentTester(TestCase):
data = """# Header & copyright\nEAPI=5\n
inherit target-r1\n
LICENSE="Wyplay"\nSLOT="0"\nKEYWORDS="arm"\n
#EGIT_REPO_URI="gitsrv:deprecated_uri"
EGIT_REPO_URI="%(uri)s"
: ${EGIT_BRANCH:="%(branch)s"}
EGIT_COMMIT="%(commit)s"
XOV_BASE_PROTO=%(xov_base_proto)s
XOV_BASE_URI=%(xov_base_uri)s
XOV_BASE_BRANCH=%(xov_base_branch)s
XOV_BASE_REVISION=%(xov_base_revision)s
: ${XOV_BOARD_PROTO:="%(xov_base_proto)s"}
XOV_BOARD_URI=%(xov_board_uri)s
#XOV_BOARD_BRANCH=master
"""
data_values = {
'uri': str(),
'branch': str(),
'commit': str(),
'xov_base_uri': str(),
'xov_base_branch': str(),
'xov_base_revision': str(),
'xov_base_proto': str(),
'xov_board_uri': str()
}
def test_uri(self):
self.data_values['uri'] = 'gitsrv:genbox/profiles-project'
other_uri = 'user@github.com:frogbywyplay/profiles'
my_ebuild = TargetEbuildContent(self.data % self.data_values)
self.assertEqual(my_ebuild.uri, self.data_values['uri'])
my_ebuild.uri = other_uri
self.assertEqual(my_ebuild.uri, other_uri)
self.assertIn(other_uri, my_ebuild.data)
other_data = 'EAPI=3\ninherit target-r1\nEGIT_BRANCH="master"\n\n#garbage data\n'
other_ebuild = TargetEbuildContent(other_data)
self.assertEqual(other_ebuild.uri, str())
with self.assertRaisesRegexp(ValueError, 'Unable to set EGIT_REPO_URI to %s' % other_uri):
other_ebuild.uri = other_uri
def test_branch(self):
self.data_values['branch'] = '1.2/rb'
other_branch = 'wip-36152_new_board'
my_ebuild = TargetEbuildContent(self.data % self.data_values)
self.assertEqual(my_ebuild.branch, self.data_values['branch'])
my_ebuild.branch = other_branch
self.assertEqual(my_ebuild.branch, other_branch)
self.assertIn(other_branch, my_ebuild.data)
other_data = 'EGIT_REPO_URI="gitsrv:genbox/profiles"\nEAPI=1\n#garbage data\n'
other_ebuild = TargetEbuildContent(other_data)
self.assertEqual(other_ebuild.branch, str())
other_ebuild.branch = other_branch
self.assertEqual(other_ebuild.branch, other_branch)
self.assertIn(other_branch, other_ebuild.data)
def test_commit(self):
self.data_values['commit'] = '0' * 40
other_commit = 'f' * 40
my_ebuild = TargetEbuildContent(self.data % self.data_values)
self.assertEqual(my_ebuild.commit, self.data_values['commit'])
my_ebuild.commit = other_commit
self.assertEqual(my_ebuild.commit, other_commit)
self.assertIn(other_commit, my_ebuild.data)
def test_overlays(self):
self.data_values['xov_base_proto'] = 'git'
self.data_values['xov_base_uri'] = 'gitsrv:genbox/overlay_base'
self.data_values['xov_board_uri'] = 'gitsrv:genbox/overlay_board'
self.data_values['xov_base_branch'] = 'master'
self.data_values['xov_base_revision'] = '12345abcde' * 4
other_values = {'base': '0' * 40, 'board': 'f' * 40}
my_ebuild = TargetEbuildContent(self.data % self.data_values)
for value in ['uri', 'branch', 'proto', 'revision']:
base_value = 'xov_base_%s' % value
self.assertEqual(my_ebuild.overlays['base'][value], self.data_values[base_value])
self.assertEqual(my_ebuild.overlays['board']['uri'], self.data_values['xov_board_uri'])
my_ebuild.overlays = other_values
self.assertEqual(my_ebuild.overlays['base']['revision'], other_values['base'])
self.assertEqual(my_ebuild.overlays['base']['uri'], self.data_values['xov_base_uri'])
self.assertEqual(my_ebuild.overlays['board']['revision'], other_values['board'])
self.assertEqual(my_ebuild.overlays['board']['uri'], self.data_values['xov_board_uri'])
for value in other_values.values():
self.assertIn(value, my_ebuild.data)
@patch('__builtin__.open', new_callable=mock_open)
@patch('os.path.exists')
def test_write_into(self, mock_exists, mock_open_):
my_ebuild = TargetEbuildContent(self.data % self.data_values)
mock_exists.return_value = True
self.assertFalse(my_ebuild.write_into('mock', force=False))
self.assertTrue(my_ebuild.write_into('mock', force=True))
mock_exists.return_value = False
mock_open_.side_effect = IOError(13)
self.assertFalse(my_ebuild.write_into('mock'))
if __name__ == '__main__':
main()
| frogbywyplay/genbox_xintegtools | tests/test_xbump_target_ebuild.py | Python | gpl-2.0 | 5,455 |
# -*- coding: utf-8 -*-
GENDER = ((u'男',u'男'),(u'女',u'女'))
| youtaya/knight | fuzzybee/utils/constant.py | Python | mit | 67 |
# -*- coding: UTF-8 -*-
import pytest
from binascii import a2b_hex as a2b
try:
from unittest.mock import patch, call, MagicMock
except ImportError:
from mock import patch, call, MagicMock
from implib2.imp_bus import Bus, BusError
from implib2.imp_device import Device, DeviceError # noqa
from implib2.imp_commands import Command # noqa
from implib2.imp_responces import Responce # noqa
class TestBus:
def setup(self):
self.patcher1 = patch('implib2.imp_bus.Device')
self.patcher2 = patch('implib2.imp_bus.Command')
self.patcher3 = patch('implib2.imp_bus.Responce')
mock_dev = self.patcher1.start()
mock_cmd = self.patcher2.start()
mock_res = self.patcher3.start()
self.dev = mock_dev()
self.cmd = mock_cmd()
self.res = mock_res()
self.manager = MagicMock()
self.manager.attach_mock(self.dev, 'dev')
self.manager.attach_mock(self.cmd, 'cmd')
self.manager.attach_mock(self.res, 'res')
self.bus = Bus()
def teardown(self):
self.patcher1.stop()
self.patcher2.stop()
self.patcher3.stop()
def test_wakeup(self):
address = 16777215
table = 'ACTION_PARAMETER_TABLE'
param = 'EnterSleep'
value = 0
ad_param = 0
package = a2b('fd1504fffffffe05000035')
expected_calls = [
call.cmd.set_parameter(address, table, param, [value], ad_param),
call.dev.open_device(),
call.dev.write_pkg(package),
]
self.cmd.set_parameter.return_value = package
self.dev.write_pkg.return_value = True
assert self.bus.wakeup()
assert self.manager.mock_calls == expected_calls
def test_sync(self):
address = 16777215
table = 'SYSTEM_PARAMETER_TABLE'
param = 'Baudrate'
baudrate = 9600
value = baudrate // 100
ad_param = 0
package = a2b('fd0b05ffffffaf0400600054')
expected_calls = [
call.cmd.set_parameter(address, table, param, [value], ad_param),
call.dev.close_device(),
call.dev.open_device(baudrate=1200),
call.dev.write_pkg(package),
call.dev.close_device(),
call.dev.open_device(baudrate=2400),
call.dev.write_pkg(package),
call.dev.close_device(),
call.dev.open_device(baudrate=4800),
call.dev.write_pkg(package),
call.dev.close_device(),
call.dev.open_device(baudrate=9600),
call.dev.write_pkg(package),
call.dev.close_device(),
call.dev.open_device(baudrate=baudrate)
]
self.cmd.set_parameter.return_value = package
self.dev.write_pkg.return_value = True
self.bus.sync(baudrate=baudrate)
assert self.bus.bus_synced
assert self.manager.mock_calls == expected_calls
def test_sync_WithWrongBaudrate(self):
with pytest.raises(BusError, message="Unknown baudrate!"):
self.bus.sync(baudrate=6666)
def test_scan_AndFindEverything(self):
minserial = 0b0001 # 01
maxserial = 0b1010 # 10
self.bus.probe_range = MagicMock()
self.bus.probe_range.return_value = True
self.bus.probe_module_short = MagicMock()
self.bus.probe_module_short.return_value = True
range_list = [
call(0b1000), # 08
call(0b1100), # 12
call(0b1110), # 14
call(0b1111), # 15
call(0b1101), # 13
call(0b1010), # 10
call(0b1011), # 11
call(0b1001), # 09
call(0b0100), # 04
call(0b0110), # 06
call(0b0111), # 07
call(0b0101), # 05
call(0b0010), # 02
call(0b0011), # 03
call(0b0001) # 01
]
modules_list = [
call(0b1111), # 15
call(0b1110), # 14
call(0b1101), # 13
call(0b1100), # 12
call(0b1011), # 11
call(0b1010), # 10
call(0b1001), # 09
call(0b1000), # 08
call(0b0111), # 07
call(0b0110), # 06
call(0b0101), # 05
call(0b0100), # 04
call(0b0011), # 03
call(0b0010), # 02
call(0b0001), # 01
call(0b0000) # 00
]
results = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, )
assert self.bus.scan(minserial, maxserial) == results
assert self.bus.probe_range.call_args_list == range_list
assert self.bus.probe_module_short.call_args_list == modules_list
def test_scan_bus_ButNothingFound(self):
minserial = 0b0001 # 01
maxserial = 0b1010 # 10
self.bus.probe_range = MagicMock()
self.bus.probe_range.return_value = False
assert self.bus.scan(minserial, maxserial) is tuple()
self.bus.probe_range.assert_called_once_with(0b1000)
@pytest.mark.parametrize("probe", range(33000, 34001))
def test_scan_AndFindOne(self, probe):
minserial = 33000
maxserial = 34000
def check_range(bcast):
serno = probe
while not bcast & 1:
bcast = bcast >> 1
serno = serno >> 1
return (bcast >> 1) == (serno >> 1)
def check_serno(serno):
return serno == probe
self.bus.probe_range = MagicMock()
self.bus.probe_range.side_effect = check_range
self.bus.probe_module_short = MagicMock()
self.bus.probe_module_short.side_effect = check_serno
assert self.bus.scan(minserial, maxserial) == (probe,)
def test_find_single_module(self):
serno = 31002
package = a2b('fd0800ffffff60')
bytes_recv = a2b('000805ffffffd91a79000042')
expected_calls = [
call.cmd.get_negative_ack(),
call.dev.write_pkg(package),
call.dev.read_pkg(),
call.res.get_negative_ack(bytes_recv)
]
self.cmd.get_negative_ack.return_value = package
self.dev.read_pkg.return_value = bytes_recv
self.res.get_negative_ack.return_value = serno
assert self.bus.find_single_module() == serno
assert self.manager.mock_calls == expected_calls
def test_find_single_module_FindNothing(self):
package = a2b('fd0800ffffff60')
bytes_recv = DeviceError('Timeout reading header!')
expected_calls = [
call.cmd.get_negative_ack(),
call.dev.write_pkg(package),
call.dev.read_pkg()
]
self.cmd.get_negative_ack.return_value = package
self.dev.write_pkg.return_value = True
self.dev.read_pkg.side_effect = bytes_recv
assert not self.bus.find_single_module()
assert self.manager.mock_calls == expected_calls
def test_probe_module_long(self):
serno = 31002
package = a2b('fd02001a79009f')
bytes_recv = a2b('0002001a7900a7')
expected_calls = [
call.cmd.get_long_ack(serno),
call.dev.write_pkg(package),
call.dev.read_pkg(),
call.res.get_long_ack(bytes_recv, serno)
]
self.cmd.get_long_ack.return_value = package
self.dev.write_pkg.return_value = True
self.dev.read_pkg.return_value = bytes_recv
self.res.get_long_ack.return_value = True
assert self.bus.probe_module_long(serno)
assert self.manager.mock_calls == expected_calls
def test_probe_module_long_ButGetDeviceError(self):
serno = 31002
package = a2b('fd02001a79009f')
bytes_recv = DeviceError('Timeout reading header!')
expected_calls = [
call.cmd.get_long_ack(serno),
call.dev.write_pkg(package),
call.dev.read_pkg(),
]
self.cmd.get_long_ack.return_value = package
self.dev.write_pkg.return_value = True
self.dev.read_pkg.side_effect = bytes_recv
assert not self.bus.probe_module_long(serno)
assert self.manager.mock_calls == expected_calls
def test_probe_module_short(self):
serno = 31002
package = a2b('fd04001a790003')
bytes_recv = a2b('24')
expected_calls = [
call.cmd.get_short_ack(serno),
call.dev.write_pkg(package),
call.dev.read_bytes(1),
call.res.get_short_ack(bytes_recv, serno)
]
self.cmd.get_short_ack.return_value = package
self.dev.write_pkg.return_value = True
self.dev.read_bytes.return_value = bytes_recv
self.res.get_short_ack.return_value = True
assert self.bus.probe_module_short(serno)
assert self.manager.mock_calls == expected_calls
def test_probe_module_short_ButGetDeviceError(self):
serno = 31002
package = a2b('fd04001a790003')
bytes_recv = DeviceError('Timeout reading header!')
expected_calls = [
call.cmd.get_short_ack(serno),
call.dev.write_pkg(package),
call.dev.read_bytes(1)
]
self.cmd.get_short_ack.return_value = package
self.dev.write_pkg.return_value = True
self.dev.read_bytes.side_effect = bytes_recv
assert not self.bus.probe_module_short(serno)
assert self.manager.mock_calls == expected_calls
def test_probe_range(self):
broadcast = 0b111100000000000000000000
package = a2b('fd06000000f0d0')
bytes_recv = a2b('ff')
expected_calls = [
call.cmd.get_range_ack(broadcast),
call.dev.write_pkg(package),
call.dev.read(),
call.res.get_range_ack(bytes_recv)
]
self.cmd.get_range_ack.return_value = package
self.dev.write_pkg.return_value = True
self.dev.read.return_value = bytes_recv
self.res.get_range_ack.return_value = True
assert self.bus.probe_range(broadcast)
assert self.manager.mock_calls == expected_calls
def test_probe_range_AndFindNothing(self):
broadcast = 0b111100000000000000000000
package = a2b('fd06000000f0d0')
bytes_recv = bytes()
expected_calls = [
call.cmd.get_range_ack(broadcast),
call.dev.write_pkg(package),
call.dev.read(),
call.res.get_range_ack(bytes_recv)
]
self.cmd.get_range_ack.return_value = package
self.dev.write_pkg.return_value = True
self.dev.read.return_value = bytes_recv
self.res.get_range_ack.return_value = False
assert not self.bus.probe_range(broadcast)
assert self.manager.mock_calls == expected_calls
def test_get(self):
serno = 31002
table = 'SYSTEM_PARAMETER_TABLE'
param = 'SerialNum'
package = a2b('fd0a031a7900290100c4')
bytes_recv = a2b('000a051a7900181a79000042')
expected_calls = [
call.cmd.get_parameter(serno, table, param),
call.dev.write_pkg(package),
call.dev.read_pkg(),
call.res.get_parameter(bytes_recv, table, param)
]
self.cmd.get_parameter.return_value = package
self.dev.write_pkg.return_value = True
self.dev.read_pkg.return_value = bytes_recv
self.res.get_parameter.return_value = (31002,)
assert self.bus.get(serno, table, param) == (serno,)
assert self.manager.mock_calls == expected_calls
def test_set(self):
serno = 31002
table = 'PROBE_CONFIGURATION_PARAMETER_TABLE'
param = 'DeviceSerialNum'
value = [31003]
ad_param = 0
package = a2b('fd11071a79002b0c001b790000b0')
bytes_recv = a2b('0011001a790095')
expected_calls = [
call.cmd.set_parameter(serno, table, param, value, ad_param),
call.dev.write_pkg(package),
call.dev.read_pkg(),
call.res.set_parameter(bytes_recv, table, serno)
]
self.cmd.set_parameter.return_value = package
self.dev.write_pkg.return_value = True
self.dev.read_pkg.return_value = bytes_recv
self.res.set_parameter.return_value = True
assert self.bus.set(serno, table, param, value)
assert self.manager.mock_calls == expected_calls
def test_get_eeprom_page(self):
serno = 30001
page_nr = 0
page = [17, 47, 196, 78, 55, 2, 243, 231, 251, 61]
package = a2b('fd3c0331750029ff0081')
bytes_recv = a2b('003c0b1a790015112fc44e3702f3e7fb3dc5')
expected_calls = [
call.cmd.get_epr_page(serno, page_nr),
call.dev.write_pkg(package),
call.dev.read_pkg(),
call.res.get_epr_page(bytes_recv)
]
self.cmd.get_epr_page.return_value = package
self.dev.write_pkg.return_value = True
self.dev.read_pkg.return_value = bytes_recv
self.res.get_epr_page.return_value = page
assert self.bus.get_eeprom_page(serno, page_nr) == page
assert self.manager.mock_calls == expected_calls
def test_set_eeprom_page(self):
serno = 30001
page_nr = 7
page = [0, 0, 0, 0, 0, 0, 0, 0, 35, 255, 255, 0]
package = a2b('fd3d0f317500f6ff07000000000000000023ffff007b')
bytes_recv = a2b('003d001a79004c')
expected_calls = [
call.cmd.set_epr_page(serno, page_nr, page),
call.dev.write_pkg(package),
call.dev.read_pkg(),
call.res.set_epr_page(bytes_recv)
]
self.cmd.set_epr_page.return_value = package
self.dev.write_pkg.return_value = True
self.dev.read_pkg.return_value = bytes_recv
self.res.set_epr_page.return_value = True
assert self.bus.set_eeprom_page(serno, page_nr, page)
assert self.manager.mock_calls == expected_calls
| mhubig/implib2 | tests/test_bus.py | Python | mit | 14,029 |
#!/usr/bin/env python
""" Set up the node. A first startup consists of giving the command
'python manage'. This will create an empty settings file. Copy&paste
variables you want to change from settings_default.py to setup your
node. Next define your models. You'll need to add the folder containing
the 'models' directory to the INSTALLED_APPS tuple at least. Don't edit
settings_default.py directly. Next run 'python manage.py syncdb'. This
will read your settings file and create an empty database using your
models. If you change the models you need to run syncdb again. """
import sys
import os
import traceback
# Tack on the vamdc root directory to the python path.
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
_CREATED_SETTINGS = False
if not os.path.exists('settings.py'):
# If settings.py doesn't already exist, create it
string = "-"*50 + "\n Welcome to the VAMDC node setup."
string += "\n\n Created a fresh settings.py file for you."
print string
settings_file = open('settings.py', 'w')
_CREATED_SETTINGS = True
string = \
"""#
# VAMDC-node config file
#
# You may customize your setup by copy&pasting the variables you want to
# change from the default config file in nodes/settings_default.py to
# this file. Try to only copy over things you really need to customize
# and do *not* make any changes to settings_defaults.py directly. That
# way you'll always have a sane default to fall back on (also, the
# master file may change with updates).
from settings_default import *
# Comment out the following line once your node goes live.
DEBUG=True
###################################################
# Database connection
# Setting up the database type and information.
# Simplest for testing is sqlite3.
###################################################
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'node.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
#########################################
# Admin information
#########################################
ADMINS = (\
('Admin 1 Name', 'name1@mail.net'),
('Admin 2 Name', 'name2@mail.net'),
)
"""
settings_file.write(string)
settings_file.close()
# Settings file created or already existed. Test it.
try:
import settings
except Exception:
string = "\n" + traceback.format_exc()
string += \
"""
Error: Couldn't import the file 'settings.py' in the directory
containing %r. There can be two reasons for this:
1) You moved your settings.py elsewhere. In that case you need to run
django-admin.py, passing it the true location of your settings module.
2) The settings module is where it's supposed to be, but an exception
was raised when trying to load it. Review the traceback above to
resolve the problem, then try again. \n"""
sys.stderr.write(string % __file__)
sys.exit(1)
# At this point we have an imported settings module, although it may be empty.
if __name__ == "__main__":
if _CREATED_SETTINGS:
string = "\n Edit your new settings.py file as needed, then run\n"
string += " 'python manage syncdb'.\n"
string += "-"*50
print string
sys.exit()
# Run the django setup using our settings file.
from django.core.management import execute_manager
#from xml.sax import saxutils
execute_manager(settings)
| ivh/VAMDC-VALD | nodes/xstardb/manage.py | Python | gpl-3.0 | 3,909 |
import socket
import sys
import netifaces
from time import sleep
import ctypes
import fcntl
import select
from struct import unpack
import event
#testing ----
from datagram import ipv4datagram
#----
class interfaces(object):
"""Returns interfaces list and fetches IPs associated with them"""
_interface_list = []
def __init__(self):
self._interface_list = netifaces.interfaces()
def get_interface_list(self):
return self._interface_list
def get_interface_addr(self, inface):
if inface not in self._interface_list:
print "No Interface found"
return ""
else:
addrs = netifaces.ifaddresses(inface)
return addrs[netifaces.AF_INET][0]['addr']
def interface_exists(self, interface):
if interface in self._interface_list:
return True
else:
return False
class sockets(object):
"""docstring for sockets"""
def __init__(self):
pass
# self.sock = arg
def get_socket(self, interface):
try:
self.sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003))
self.sock.bind((interface,0))
#Should the above be changed to smth like socket.IPPROTO_IP but we are receiving ARP and other messages;
except socket.error , msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit(1)
return self.sock
class ifreq(ctypes.Structure):
_fields_ = [("ifr_ifrn", ctypes.c_char * 16),
("ifr_flags", ctypes.c_short)]
class sniffer(object):
"""docstring for sniffer"""
sock_o = sockets()
IFF_PROMISC = 0x100
SIOCGIFFLAGS = 0x8913
SIOCSIFFLAGS = 0x8914
_interface = ""
interface_ob = interfaces()
event_packet_is_received = event.Event("A packet is received")
def __init__(self,interface):
self.sock = self.sock_o.get_socket(interface)
if self.interface_ob.interface_exists(interface):
self._interface = interface
else:
print "Please give a suitable interface !!"
sys.exit(1)
# self.sock.setsockopt(socket.SOL_SOCKET, 25 , interface+'\0')
self.sock.setblocking(False)
self.running = False
print "Sniffer Initiliased"
# self.sock.bind(('',50000))
def packetisreceived(self,packet):
# Do some actions and fire event.
self.event_packet_is_received(packet)
def eth_addr (self,a) :
b = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" % (ord(a[0]) , ord(a[1]) , ord(a[2]), ord(a[3]), ord(a[4]) , ord(a[5]))
return b
def start_promisc(self):
"""Start promisious mode"""
try:
p_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error , msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit(1)
ifr = ifreq()
ifr.ifr_ifrn = self._interface
try:
fcntl.ioctl(p_sock.fileno() ,self.SIOCGIFFLAGS ,ifr)
except IOError, msg :
print "Wrong interface!! Error code" + str(msg[0]) + 'Message' + msg[1]
sys.exit(1)
ifr.ifr_flags |= self.IFF_PROMISC
fcntl.ioctl(p_sock.fileno(), self.SIOCSIFFLAGS, ifr) # S for Set
p_sock.close()
def close_promisc(self):
"""close promisious mode"""
try:
p_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error , msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit(1)
ifr = ifreq()
ifr.ifr_ifrn = self._interface
try:
fcntl.ioctl(p_sock.fileno() ,self.SIOCGIFFLAGS ,ifr)
except IOError, msg :
print "Wrong interface!! Error code" + str(msg[0]) + 'Message' + msg[1]
sys.exit(1)
ifr.ifr_flags &= ~self.IFF_PROMISC
fcntl.ioctl(p_sock.fileno(), self.SIOCSIFFLAGS, ifr)
p_sock.close()
def parse_ethernet_header(self,packet):
"""
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Ethernet destination address (first 32 bits) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Ethernet dest (last 16 bits) |Ethernet source (first 16 bits)|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Ethernet source address (last 32 bits) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type code | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
#parse ethernet header
eth_length = 14 #Size of header is 14 bytes
eth_header = packet[:eth_length]
eth = unpack('!6s6sH' , eth_header)
#6s for 6 char[] array
#H for unsigned short
eth_protocol = socket.ntohs(eth[2])
dest_mac = self.eth_addr(packet[0:6])
source_mac = self.eth_addr(packet[6:12])
print 'Destination MAC : ' + dest_mac + ' Source MAC : '+ source_mac + ' Protocol : ' + str(eth_protocol)
return dest_mac, source_mac, eth_protocol
def parse_ip_packet(self,packet):
#Parse IP header
#take first 20 characters for the ip header
eth_length = 14
ip_header = packet[eth_length:20+eth_length]
iph = unpack('!BBHHHBBH4s4s' , ip_header)
version_ihl = iph[0]
version = version_ihl >> 4
ihl = version_ihl & 0xF
iph_length = ihl * 4
ttl = iph[5]
protocol = iph[6]
s_addr = socket.inet_ntoa(iph[8]);
d_addr = socket.inet_ntoa(iph[9]);
print 'Version : ' + str(version) + ' IP Header Length : ' + str(ihl) + ' TTL : ' + str(ttl) + ' Protocol : ' \
+ str(protocol) + ' Source Address : ' + str(s_addr) + ' Destination Address : ' + str(d_addr)
if protocol == 6:
self.parse_tcp(packet,iph_length)
elif protocol == 1:
self.parse_icmp(packet,iph_length)
elif protocol == 17:
self.parse_udp(packet,iph_length)
else:
print("Other packet than tcp/udp/icmp")
def parse_tcp(self,packet,iph_length):
eth_length = 14
t = iph_length + eth_length
tcp_header = packet[t:t+20]
tcph = unpack('!HHLLBBHHH' , tcp_header)
source_port = tcph[0]
dest_port = tcph[1]
sequence = tcph[2]
acknowledgement = tcph[3]
doff_reserved = tcph[4]
tcph_length = doff_reserved >> 4
print 'Source Port : ' + str(source_port) + ' Dest Port : ' + str(dest_port) + ' Sequence Number : ' + str(sequence) + ' Acknowledgement : ' \
+ str(acknowledgement) + ' TCP header length : ' + str(tcph_length)
h_size = eth_length + iph_length + tcph_length * 4
data_size = len(packet) - h_size
#get data from the packet
data = packet[h_size:]
print 'Data : ' + data
def parse_icmp(self,packet,iph_length):
eth_length = 14
u = iph_length + eth_length
icmph_length = 4
icmp_header = packet[u:u+4]
icmph = unpack('!BBH' , icmp_header)
icmp_type = icmph[0]
code = icmph[1]
checksum = icmph[2]
print 'Type : ' + str(icmp_type) + ' Code : ' + str(code) + ' Checksum : ' + str(checksum)
h_size = eth_length + iph_length + icmph_length
data_size = len(packet) - h_size
#get data from the packet
data = packet[h_size:]
print 'Data : ' + data
def parse_udp(self,packet,iph_length):
eth_length = 14
u = iph_length + eth_length
udph_length = 8
udp_header = packet[u:u+8]
udph = unpack('!HHHH' , udp_header)
source_port = udph[0]
dest_port = udph[1]
length = udph[2]
checksum = udph[3]
print 'Source Port : ' + str(source_port) + ' Dest Port : ' \
+ str(dest_port) + ' Length : ' + str(length) + ' Checksum : ' + str(checksum)
h_size = eth_length + iph_length + udph_length
data_size = len(packet) - h_size
#get data from the packet
data = packet[h_size:]
print 'Data : ' + data
def start(self):
input_list = [self.sock,sys.stdin]
self.running = True
self.start_promisc()
while self.running:
inputready,outputready,exceptready = select.select(input_list,[],[])
for s in inputready:
if s == self.sock:
try:
packet = self.sock.recvfrom(65565);
except socket.timeout, e:
err = e.args[0]
# this next if/else is a bit redundant, but illustrates how the
# timeout exception is setup
if err == 'timed out':
sleep(1)
print 'recv timed out, retry later'
continue
else:
print e
# continue
sys.exit(1)
except socket.error, e:
print e
sys.exit(1)
else:
# forward packet
sender = packet[0]
packet = packet[0]
dest_mac, source_mac, eth_protocol = self.parse_ethernet_header(packet)
# self.packetisreceived(eth_protocol)
if eth_protocol == 8:
print "IP Packet"
#ip_packet = ipv4datgram(packet)
#ip_packet.parse_ip_packet()
self.parse_ip_packet(packet)
ip_packet = ipv4datagram(source_mac,dest_mac,eth_protocol,packet)
ip_packet.parse_ip_packet(packet)
#fire event
#self.packetisreceived(ip_packet)
self.packetisreceived(ip_packet)
# self.running = False
# break
# continue
if s == sys.stdin:
dummy = sys.stdin.readline()
running = False
else:
continue
self.close_promisc()
self.sock.close()
def _start(self):
print "Sniffer Started"
self.running = True
def close(self):
print "Close Called"
print self.running
if self.running == True:
self.running = False
self.close_promisc()
self.sock.close()
print "Sniffer Closed"
def _close(self):
print "Sniffer Closed"
self.running = False
###Debug
if __name__ == "__main__":
x = interfaces()
print x.get_interface_list()
print x.get_interface_addr('wlan0')
sniff = sniffer('wlan0')
sniff.start()
# sniff.start_promisc('eth0')
# raw_input()
# sniff.close_promisc() | guptaarchit/nacsniff | sniffer_socket.py | Python | gpl-2.0 | 11,607 |
#!/usr/bin/env python
"""Handles decoders"""
from .. import config
import os
from glob import glob
import sys
import imp
#Decoders
import flac
import mp3
import oggVorbis
import speex
import wave
import wavpack
modules = [flac, mp3, oggVorbis, speex, wave, wavpack]
handlers = { }
for m in modules:
for type in m.HANDLES:
handlers[type] = m
def decode(inF, outF):
"""Decodes file inF to probably outF. outF is a hint as to where to decode
to, but might not. Returns the filename that the file was decoded to."""
type = os.path.splitext(inF)[1][1:].lower()
if type in handlers:
return handlers[type].decode(inF, outF)
else:
return None
def getMetadata(inF):
type = os.path.splitext(inF)[1][1:].lower()
if type in handlers:
return handlers[type].getMetadata(inF)
else:
return None
| jeffayle/Transcode | audioTranscode/decoders/__init__.py | Python | isc | 859 |
from ds.vortex.core import baseNode
from ds.vortex.core import plug as plugs
class StringNode(baseNode.BaseNode):
def __init__(self, name):
"""
:param name: str, the name of the node
"""
baseNode.BaseNode.__init__(self, name)
def initialize(self):
baseNode.BaseNode.initialize(self)
self.outputPlug_ = plugs.OutputPlug("output", self)
self.valuePlug_ = plugs.InputPlug("value", self, value=[])
self.addPlug(self.outputPlug_, clean=True)
self.addPlug(self.valuePlug_, clean=True)
self.plugAffects(self.valuePlug_, self.outputPlug_)
def compute(self, requestPlug):
baseNode.BaseNode.compute(self, requestPlug=requestPlug)
if requestPlug != self.outputPlug_:
return None
result = str(self.valuePlug_.value)
requestPlug.value = result
requestPlug.dirty = False
return result
def getNode():
"""General function that returns our node, used to get create our node via Ui etc
:return: Node instance
"""
return StringNode
| dsparrow27/vortex | src/ds/vortex/nodes/string/string.py | Python | mit | 1,090 |
import datetime
import json
import logging
import md5
import random
import tba_config
import urllib
import uuid
import webapp2
from google.appengine.api import urlfetch
from google.appengine.ext import deferred
from google.appengine.ext import ndb
from consts.auth_type import AuthType
from controllers.base_controller import CacheableHandler
from datafeeds.parser_base import ParserInputException
from helpers.validation_helper import ValidationHelper
from models.api_auth_access import ApiAuthAccess
from models.cached_response import CachedResponse
from models.sitevar import Sitevar
# used for deferred call
def track_call(api_action, api_label, x_tba_app_id):
"""
For more information about GAnalytics Protocol Parameters, visit
https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters
"""
analytics_id = Sitevar.get_by_id("google_analytics.id")
if analytics_id is None:
logging.warning("Missing sitevar: google_analytics.id. Can't track API usage.")
else:
GOOGLE_ANALYTICS_ID = analytics_id.contents['GOOGLE_ANALYTICS_ID']
params = urllib.urlencode({
'v': 1,
'tid': GOOGLE_ANALYTICS_ID,
'cid': uuid.uuid3(uuid.NAMESPACE_X500, str(x_tba_app_id)),
't': 'event',
'ec': 'api-v02',
'ea': api_action,
'el': api_label,
'cd1': x_tba_app_id, # custom dimension 1
'ni': 1,
'sc': 'end', # forces tracking session to end
})
analytics_url = 'http://www.google-analytics.com/collect?%s' % params
urlfetch.fetch(
url=analytics_url,
method=urlfetch.GET,
deadline=10,
)
class ApiBaseController(CacheableHandler):
API_VERSION = 2
def __init__(self, *args, **kw):
super(ApiBaseController, self).__init__(*args, **kw)
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
self.response.headers['Access-Control-Allow-Origin'] = '*'
def handle_exception(self, exception, debug):
"""
Handle an HTTP exception and actually writeout a
response.
Called by webapp when abort() is called, stops code excution.
"""
if isinstance(exception, webapp2.HTTPException):
self.response.set_status(exception.code)
self.response.out.write(self._errors)
else:
logging.exception(exception)
self.response.set_status(500)
def get(self, *args, **kw):
self._validate_tba_app_id()
self._errors = ValidationHelper.validate(self._validators)
if self._errors:
self.abort(400)
self._track_call(*args, **kw)
super(ApiBaseController, self).get(*args, **kw)
self.response.headers['X-TBA-Version'] = '{}'.format(self.API_VERSION)
self.response.headers['Vary'] = 'Accept-Encoding'
def options(self, *args, **kw):
"""
Supply an OPTIONS method in order to comply with CORS preflghted requests
https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS#Preflighted_requests
"""
self.response.headers['Access-Control-Allow-Methods'] = "GET, OPTIONS"
self.response.headers['Access-Control-Allow-Headers'] = 'X-TBA-App-Id'
def _read_cache(self):
"""
Overrides parent method to use CachedResponse instead of memcache
"""
response = CachedResponse.get_by_id(self.cache_key)
if response:
self._last_modified = response.updated
return response
else:
return None
def _write_cache(self, response):
"""
Overrides parent method to use CachedResponse instead of memcache
"""
if tba_config.CONFIG["response_cache"]:
CachedResponse(
id=self.cache_key,
headers_json=json.dumps(dict(response.headers)),
body=response.body,
).put()
@classmethod
def delete_cache_multi(cls, cache_keys):
"""
Overrides parent method to use CachedResponse instead of memcache
"""
logging.info("Deleting cache keys: {}".format(cache_keys))
ndb.delete_multi([ndb.Key(CachedResponse, cache_key) for cache_key in cache_keys])
def _track_call_defer(self, api_action, api_label):
if random.random() < tba_config.GA_RECORD_FRACTION:
deferred.defer(track_call, api_action, api_label, self.x_tba_app_id, _queue="api-track-call")
def _validate_tba_app_id(self):
"""
Tests the presence of a X-TBA-App-Id header or URL param.
"""
self.x_tba_app_id = self.request.headers.get("X-TBA-App-Id")
if self.x_tba_app_id is None:
self.x_tba_app_id = self.request.get('X-TBA-App-Id')
logging.info("X-TBA-App-Id: {}".format(self.x_tba_app_id))
if not self.x_tba_app_id:
self._errors = json.dumps({"Error": "X-TBA-App-Id is a required header or URL param. Please see http://www.thebluealliance.com/apidocs for more info."})
self.abort(400)
x_tba_app_id_parts = self.x_tba_app_id.split(':')
if len(x_tba_app_id_parts) != 3 or any(len(part) == 0 for part in x_tba_app_id_parts):
self._errors = json.dumps({"Error": "X-TBA-App-Id must follow a specific format. Please see http://www.thebluealliance.com/apidocs for more info."})
self.abort(400)
class ApiTrustedBaseController(webapp2.RequestHandler):
REQUIRED_AUTH_TYPES = set()
def __init__(self, *args, **kw):
super(ApiTrustedBaseController, self).__init__(*args, **kw)
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
self.response.headers['Access-Control-Allow-Origin'] = '*'
def handle_exception(self, exception, debug):
"""
Handle an HTTP exception and actually writeout a
response.
Called by webapp when abort() is called, stops code excution.
"""
logging.info(exception)
if isinstance(exception, webapp2.HTTPException):
self.response.set_status(exception.code)
self.response.out.write(self._errors)
else:
self.response.set_status(500)
def options(self, event_key):
"""
Supply an OPTIONS method in order to comply with CORS preflghted requests
https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS#Preflighted_requests
"""
self.response.headers['Access-Control-Allow-Methods'] = "POST, OPTIONS"
self.response.headers['Access-Control-Allow-Headers'] = 'Content-Type, X-TBA-Auth-Id, X-TBA-Auth-Sig'
def post(self, event_key):
auth_id = self.request.headers.get('X-TBA-Auth-Id')
if not auth_id:
self._errors = json.dumps({"Error": "Must provide a request header parameter 'X-TBA-Auth-Id'"})
self.abort(400)
auth_sig = self.request.headers.get('X-TBA-Auth-Sig')
if not auth_sig:
self._errors = json.dumps({"Error": "Must provide a request header parameter 'X-TBA-Auth-Sig'"})
self.abort(400)
auth = ApiAuthAccess.get_by_id(auth_id)
expected_sig = md5.new('{}{}{}'.format(auth.secret if auth else None, self.request.path, self.request.body)).hexdigest()
if not auth or expected_sig != auth_sig:
logging.info("Auth sig: {}, Expected sig: {}".format(auth_sig, expected_sig))
self._errors = json.dumps({"Error": "Invalid X-TBA-Auth-Id and/or X-TBA-Auth-Sig!"})
self.abort(400)
allowed_event_keys = [ekey.id() for ekey in auth.event_list]
if event_key not in allowed_event_keys:
self._errors = json.dumps({"Error": "Only allowed to edit events: {}".format(', '.join(allowed_event_keys))})
self.abort(400)
missing_auths = self.REQUIRED_AUTH_TYPES.difference(set(auth.auth_types_enum))
if missing_auths != set():
self._errors = json.dumps({"Error": "You do not have permission to edit: {}. If this is incorrect, please contact TBA admin.".format(",".join([AuthType.type_names[ma] for ma in missing_auths]))})
self.abort(400)
try:
self._process_request(self.request, event_key)
except ParserInputException, e:
self._errors = json.dumps({"Error": e.message})
self.abort(400)
| synth3tk/the-blue-alliance | controllers/api/api_base_controller.py | Python | mit | 8,512 |
import logging
from autotest.client import utils
from autotest.client.shared import error
from virttest import libvirt_xml
from virttest import virsh
from virttest import utils_misc
from virttest import utils_test
from virttest import utils_libvirtd
def run(test, params, env):
"""
Test vcpupin while numad is running
"""
vcpu_placement = params.get("vcpu_placement")
bug_url = params.get("bug_url", "")
status_error = "yes" == params.get("status_error", "no")
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
# Prepare numatune memory parameter dict
mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
numa_memory = {}
for mem_param in mem_tuple:
value = params.get(mem_param)
if value:
numa_memory[mem_param.split('_')[1]] = value
libvirtd = utils_libvirtd.Libvirtd()
libvirtd.start()
try:
# Get host numa node list
host_numa_node = utils_misc.NumaInfo()
node_list = host_numa_node.online_nodes
logging.debug("host node list is %s", node_list)
if numa_memory.get('nodeset'):
used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
logging.debug("set node list is %s", used_node)
if not status_error:
for i in used_node:
if i > max(node_list):
raise error.TestNAError("nodeset %s out of range" %
numa_memory['nodeset'])
# Start numad
try:
utils.run("service numad start")
except error.CmdError, e:
# Bug 1218149 closed as not a bug, workaround this as in bug
# comment 12
logging.debug("start numad failed with %s", e)
logging.debug("remove message queue of id 0 and try again")
utils.run("ipcrm msg 0", ignore_status=True)
utils.run("service numad start")
# Start vm and do vcpupin
vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
vmxml.numa_memory = numa_memory
vmxml.placement = vcpu_placement
logging.debug("vm xml is %s", vmxml)
vmxml.sync()
vm.start()
vm.wait_for_login()
host_cpu_count = utils.count_cpus()
for i in range(host_cpu_count):
ret = virsh.vcpupin(vm_name, 0, i, debug=True, ignore_status=True)
if ret.exit_status:
raise error.TestFail("vcpupin failed while numad running, %s"
% bug_url)
virsh.vcpuinfo(vm_name, debug=True)
finally:
utils.run("service numad stop")
libvirtd.restart()
backup_xml.sync()
| will-Do/tp-libvirt | libvirt/tests/src/numa/numad_vcpupin.py | Python | gpl-2.0 | 2,804 |
'''
Copyright 2013 Neil Borle and Paul Lu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on 2013-08-12
@author: Neil Borle
'''
from PySide.QtCore import QTimer
class WidgetFader(object):
'''
For a given widget in the UI, this fader attaches a timer
that hides that widget after a specified interval
'''
def make_active(self):
'''
shows the widget then sets the hide timer
'''
self.controls.show()
self.timer.start(self.time_out)
def make_inactive(self):
self.controls.hide()
def __init__(self, controls, time_out=1500):
'''
Constructor
'''
self.timer = QTimer()
self.timer.timeout.connect(self.make_inactive)
self.timer.setSingleShot(True)
self.time_out = time_out
self.controls = controls
self.controls.hide() | NBor/SkyPython | src/views/WidgetFader.py | Python | apache-2.0 | 1,449 |
#!/usr/bin/env python
# coding=utf-8
class BinanceAPIException(Exception):
def __init__(self, response):
json_res = response.json()
self.status_code = response.status_code
self.response = response
self.code = json_res['code']
self.message = json_res['msg']
self.request = getattr(response, 'request', None)
def __str__(self): # pragma: no cover
return 'APIError(code=%s): %s' % (self.code, self.message)
class BinanceOrderException(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return 'BinanceOrderException(code=%s): %s' % (self.code, self.message)
class BinanceOrderMinAmountException(BinanceOrderException):
def __init__(self, value):
message = "Amount must be a multiple of %s" % value
super(BinanceOrderMinAmountException, self).__init__(-1013, message)
class BinanceOrderMinPriceException(BinanceOrderException):
def __init__(self, value):
message = "Price must be at least %s" % value
super(BinanceOrderMinPriceException, self).__init__(-1013, message)
class BinanceOrderMinTotalException(BinanceOrderException):
def __init__(self, value):
message = "Total must be at least %s" % value
super(BinanceOrderMinTotalException, self).__init__(-1013, message)
class BinanceOrderUnknownSymbolException(BinanceOrderException):
def __init__(self, value):
message = "Unknown symbol %s" % value
super(BinanceOrderUnknownSymbolException, self).__init__(-1013, message)
class BinanceOrderInactiveSymbolException(BinanceOrderException):
def __init__(self, value):
message = "Attempting to trade an inactive symbol %s" % value
super(BinanceOrderInactiveSymbolException, self).__init__(-1013, message)
| doubleDragon/QuantBot | quant/api/binance_exceptions.py | Python | mit | 1,872 |
'''Train a memory network on the bAbI dataset.
References:
- Jason Weston, Antoine Bordes, Sumit Chopra, Tomas Mikolov, Alexander M. Rush,
"Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks",
http://arxiv.org/abs/1502.05698
- Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, Rob Fergus,
"End-To-End Memory Networks",
http://arxiv.org/abs/1503.08895
Reaches 98.6% accuracy on task 'single_supporting_fact_10k' after 120 epochs.
Time per epoch: 3s on CPU (core i7).
'''
from __future__ import print_function
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers.core import Activation, Dense, Merge, Permute, Dropout
from keras.layers.recurrent import LSTM
from keras.utils.data_utils import get_file
from keras.preprocessing.sequence import pad_sequences
from functools import reduce
import tarfile
import numpy as np
import re
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
def parse_stories(lines, only_supporting=False):
'''Parse stories provided in the bAbi tasks format
If only_supporting is true, only the sentences that support the answer are kept.
'''
data = []
story = []
for line in lines:
line = line.decode('utf-8').strip()
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line:
q, a, supporting = line.split('\t')
q = tokenize(q)
substory = None
if only_supporting:
# Only select the related substory
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=None):
'''Given a file name, read the file, retrieve the stories, and then convert the sentences into a single story.
If max_length is supplied, any stories longer than max_length tokens will be discarded.
'''
data = parse_stories(f.readlines(), only_supporting=only_supporting)
flatten = lambda data: reduce(lambda x, y: x + y, data)
data = [(flatten(story), q, answer) for story, q, answer in data if not max_length or len(flatten(story)) < max_length]
return data
def vectorize_stories(data, word_idx, story_maxlen, query_maxlen):
X = []
Xq = []
Y = []
for story, query, answer in data:
x = [word_idx[w] for w in story]
xq = [word_idx[w] for w in query]
y = np.zeros(len(word_idx) + 1) # let's not forget that index 0 is reserved
y[word_idx[answer]] = 1
X.append(x)
Xq.append(xq)
Y.append(y)
return (pad_sequences(X, maxlen=story_maxlen),
pad_sequences(Xq, maxlen=query_maxlen), np.array(Y))
path = get_file('babi-tasks-v1-2.tar.gz',
origin='http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz')
tar = tarfile.open(path)
challenges = {
# QA1 with 10,000 samples
'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt',
# QA2 with 10,000 samples
'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt',
}
challenge_type = 'single_supporting_fact_10k'
challenge = challenges[challenge_type]
print('Extracting stories for the challenge:', challenge_type)
train_stories = get_stories(tar.extractfile(challenge.format('train')))
test_stories = get_stories(tar.extractfile(challenge.format('test')))
vocab = sorted(reduce(lambda x, y: x | y, (set(story + q + [answer]) for story, q, answer in train_stories + test_stories)))
# Reserve 0 for masking via pad_sequences
vocab_size = len(vocab) + 1
story_maxlen = max(map(len, (x for x, _, _ in train_stories + test_stories)))
query_maxlen = max(map(len, (x for _, x, _ in train_stories + test_stories)))
print('-')
print('Vocab size:', vocab_size, 'unique words')
print('Story max length:', story_maxlen, 'words')
print('Query max length:', query_maxlen, 'words')
print('Number of training stories:', len(train_stories))
print('Number of test stories:', len(test_stories))
print('-')
print('Here\'s what a "story" tuple looks like (input, query, answer):')
print(train_stories[0])
print('-')
print('Vectorizing the word sequences...')
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
inputs_train, queries_train, answers_train = vectorize_stories(train_stories, word_idx, story_maxlen, query_maxlen)
inputs_test, queries_test, answers_test = vectorize_stories(test_stories, word_idx, story_maxlen, query_maxlen)
print('-')
print('inputs: integer tensor of shape (samples, max_length)')
print('inputs_train shape:', inputs_train.shape)
print('inputs_test shape:', inputs_test.shape)
print('-')
print('queries: integer tensor of shape (samples, max_length)')
print('queries_train shape:', queries_train.shape)
print('queries_test shape:', queries_test.shape)
print('-')
print('answers: binary (1 or 0) tensor of shape (samples, vocab_size)')
print('answers_train shape:', answers_train.shape)
print('answers_test shape:', answers_test.shape)
print('-')
print('Compiling...')
# embed the input sequence into a sequence of vectors
input_encoder_m = Sequential()
input_encoder_m.add(Embedding(input_dim=vocab_size,
output_dim=64,
input_length=story_maxlen))
input_encoder_m.add(Dropout(0.3))
# output: (samples, story_maxlen, embedding_dim)
# embed the question into a sequence of vectors
question_encoder = Sequential()
question_encoder.add(Embedding(input_dim=vocab_size,
output_dim=64,
input_length=query_maxlen))
question_encoder.add(Dropout(0.3))
# output: (samples, query_maxlen, embedding_dim)
# compute a 'match' between input sequence elements (which are vectors)
# and the question vector sequence
match = Sequential()
match.add(Merge([input_encoder_m, question_encoder],
mode='dot',
dot_axes=[(2,), (2,)]))
# output: (samples, story_maxlen, query_maxlen)
# embed the input into a single vector with size = story_maxlen:
input_encoder_c = Sequential()
input_encoder_c.add(Embedding(input_dim=vocab_size,
output_dim=query_maxlen,
input_length=story_maxlen))
input_encoder_c.add(Dropout(0.3))
# output: (samples, story_maxlen, query_maxlen)
# sum the match vector with the input vector:
response = Sequential()
response.add(Merge([match, input_encoder_c], mode='sum'))
# output: (samples, story_maxlen, query_maxlen)
response.add(Permute((2, 1))) # output: (samples, query_maxlen, story_maxlen)
# concatenate the match vector with the question vector,
# and do logistic regression on top
answer = Sequential()
answer.add(Merge([response, question_encoder], mode='concat', concat_axis=-1))
# the original paper uses a matrix multiplication for this reduction step.
# we choose to use a RNN instead.
answer.add(LSTM(32))
# one regularization layer -- more would probably be needed.
answer.add(Dropout(0.3))
answer.add(Dense(vocab_size))
# we output a probability distribution over the vocabulary
answer.add(Activation('softmax'))
answer.compile(optimizer='rmsprop', loss='categorical_crossentropy')
# Note: you could use a Graph model to avoid repeat the input twice
answer.fit([inputs_train, queries_train, inputs_train], answers_train,
batch_size=32,
nb_epoch=120,
show_accuracy=True,
validation_data=([inputs_test, queries_test, inputs_test], answers_test))
| daviddiazvico/keras | examples/babi_memnn.py | Python | mit | 8,097 |
from datetime import datetime, timedelta
import dateutil
import os
from django.test import TestCase
import json
from casexml.apps.case.models import CommCareCase
from corehq.apps.domain.models import Domain
from corehq.apps.hqadmin.dbaccessors import get_all_forms_in_all_domains
from corehq.apps.users.models import CommCareUser
from corehq.util.timezones.conversions import ServerTime
from couchforms.models import XFormInstance
from pact.dot_data import filter_obs_for_day, query_observations, DOTDay, get_dots_case_json
from pact.enums import PACT_DOTS_DATA_PROPERTY, PACT_DOMAIN, XMLNS_DOTS_FORM, XMLNS_PATIENT_UPDATE_DOT, DOT_DAYS_INTERVAL, DOT_NONART, DOT_ART, \
PACT_TIMEZONE
from pact.models import PactPatientCase
from pact.regimen import regimen_dict_from_choice
from pact.utils import submit_xform
NO_PILLBOX_ID = "83bfe01c-9f96-4e25-a1ad-f8164defa5d1"
START_DATE = datetime.strptime("2012-11-17", "%Y-%m-%d")
END_DATE = datetime.strptime("2012-12-17", "%Y-%m-%d")
ANCHOR_DATE = datetime.strptime("2012-12-07", "%Y-%m-%d")
#ANCHOR_DATE = datetime.strptime("7 Dec 2012 05:00:00 GMT", "%d %b %Y ")
CASE_ID = "66a4f2d0e9d5467e34122514c341ed92"
PILLBOX_ID = "a1811d7e-c968-4b63-aea5-6195ce0d8759"
NO_PILLBOX_ID2 = "ea30a77d-389c-4743-b9ae-16e0bdf057de"
START_DATE2 = datetime.strptime("2013-01-01", "%Y-%m-%d")
END_DATE2 = datetime.strptime("2013-1-30", "%Y-%m-%d")
ANCHOR_DATE2 = datetime.strptime("2013-1-22", "%Y-%m-%d")
CTSIMS_ID = 'ff6c662bfc2a448dadc9084056a4abdf'
class dotsSubmissionTests(TestCase):
def setUp(self):
for doc in get_all_forms_in_all_domains():
# purge all xforms prior to start
if doc.xmlns in [XMLNS_DOTS_FORM, XMLNS_PATIENT_UPDATE_DOT]:
doc.delete()
two_weeks = timedelta(days=14)
self.domain = Domain()
self.domain.name = PACT_DOMAIN
self.domain.is_active = True
self.domain.date_created = datetime.utcnow() - two_weeks
self.domain.save()
self.submit_url = '/a/%s/receiver' % self.domain.name
self.user = CommCareUser.create(self.domain.name, 'ctsims', 'mockmock', uuid=CTSIMS_ID)
nonart_case_regimens = regimen_dict_from_choice(DOT_NONART, "morning,evening,bedtime")
art_case_regimens = regimen_dict_from_choice(DOT_ART, "morning,noon")
props= {'_id': CASE_ID, 'dot_status': 'DOT1', 'domain': self.domain.name}
props.update(nonart_case_regimens)
props.update(art_case_regimens)
case = CommCareCase(**props)
case.save()
#generate CaseDoc
self.pillbox_form = ""
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'dots_data',
'01_pillbox.xml')) as fin:
self.pillbox_form = fin.read()
self.no_pillbox_form = ""
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'dots_data',
'02_no_pillbox.xml')) as fin:
self.no_pillbox_form = fin.read()
self.no_pillbox_form2 = ""
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'dots_data',
'03_no_pillbox.xml')) as fin:
self.no_pillbox_form2 = fin.read()
def tearDown(self):
CommCareCase.get_db().delete_doc(CASE_ID)
CommCareUser.get_db().delete_doc(CTSIMS_ID)
self.user = None
def testSignal(self):
"""
Test to ensure that with a DOT submission the signal works
"""
start_dot = len(XFormInstance.view(
'reports_forms/all_forms',
startkey=['submission xmlns', self.domain.name, XMLNS_DOTS_FORM],
endkey=['submission xmlns', self.domain.name, XMLNS_DOTS_FORM, {}],
reduce=False
).all())
start_update = len(XFormInstance.view(
'reports_forms/all_forms',
startkey=['submission xmlns', self.domain.name, XMLNS_PATIENT_UPDATE_DOT],
endkey=['submission xmlns', self.domain.name, XMLNS_PATIENT_UPDATE_DOT, {}],
reduce=False
).all())
submit_xform(self.submit_url, self.domain.name, self.pillbox_form)
submitted = XFormInstance.get(PILLBOX_ID)
self.assertTrue(hasattr(submitted, PACT_DOTS_DATA_PROPERTY))
dot_count = XFormInstance.view(
'reports_forms/all_forms',
startkey=['submission xmlns', self.domain.name, XMLNS_DOTS_FORM],
endkey=['submission xmlns', self.domain.name, XMLNS_DOTS_FORM, {}],
).all()[0]['value']
update_count = XFormInstance.view(
'reports_forms/all_forms',
startkey=['submission xmlns', self.domain.name, XMLNS_PATIENT_UPDATE_DOT],
endkey=['submission xmlns', self.domain.name, XMLNS_PATIENT_UPDATE_DOT, {}],
).all()[0]['value']
self.assertEquals(dot_count, update_count)
self.assertEquals(start_dot+start_update + 2, dot_count + update_count)
casedoc = CommCareCase.get(CASE_ID)
self.assertEqual(casedoc.xform_ids[-2], PILLBOX_ID)
computed_submit = XFormInstance.get(casedoc.xform_ids[-1])
self.assertEqual(computed_submit.xmlns, XMLNS_PATIENT_UPDATE_DOT)
def testNoPillboxCheckFirst(self):
"""
Test the dot map function that the no-pillbox checker is faithfully returning DOT data in the calendar thanks to the view
"""
bundle = {"xml": self.no_pillbox_form, "start_date": START_DATE, "end_date": END_DATE, "xform_id": NO_PILLBOX_ID, "anchor_date": END_DATE}
self._doTestNoPillbox(bundle)
def testNoPillboxCheckSecond(self):
bundle = {"xml": self.no_pillbox_form2, "start_date": START_DATE2, "end_date": END_DATE2, "xform_id": NO_PILLBOX_ID2, "anchor_date": END_DATE2}
self._doTestNoPillbox(bundle)
def _doTestNoPillbox(self, bundle):
submit_xform(self.submit_url, self.domain.name, bundle['xml'])
submitted = XFormInstance.get(bundle['xform_id'])
self.assertTrue(hasattr(submitted, PACT_DOTS_DATA_PROPERTY))
observations = query_observations(CASE_ID, bundle['start_date'], bundle['end_date'])
observed_dates = set()
#assume to be five - 3,2 same as the regimen count, we are refilling empties
self.assertEqual(5, len(observations), msg="Observations do not match regimen count: %d != %d" % ( 5, len(observations)))
art_nonart = set()
for obs in observations:
observed_dates.add(obs.observed_date)
self.assertEquals(obs.day_note, "No check, from form") #magic string from the view to indicate a generated DOT observation from form data.
art_nonart.add(obs.is_art)
self.assertEquals(obs.doc_id, bundle['xform_id'])
art = filter(lambda x: x.is_art, observations)
self.assertEquals(2, len(art))
art_answered = filter(lambda x: x.adherence != "unchecked", art)
self.assertEquals(1, len(art_answered))
nonart = filter(lambda x: not x.is_art, observations)
self.assertEquals(3, len(nonart))
nonart_answered = filter(lambda x: x.adherence != "unchecked", nonart)
self.assertEquals(1, len(nonart_answered))
#this only does SINGLE observations for art and non art
self.assertEquals(len(observed_dates), 1)
self.assertEquals(len(art_nonart), 2)
# inspect the regenerated submission and ensure the built xml block is correctly filled.
case_json = get_dots_case_json(PactPatientCase.get(CASE_ID), anchor_date=bundle['anchor_date'])
for day_delta in range(DOT_DAYS_INTERVAL):
ret_index = DOT_DAYS_INTERVAL - day_delta - 1
day_arr = case_json['days'][ret_index]
nonart_day_data = day_arr[0]
art_day_data = day_arr[1]
self.assertEquals(len(nonart_day_data), 3)
self.assertEquals(len(art_day_data), 2)
def testDOTFormatConversion(self):
"""
When a DOT submission comes in, it gets sliced into the CObservations
and put into the DOTDay format.
On resubmit/recompute, it's transmitted back into the packed json format and sent back to the phone and resubmitted with new data.
This test confirms that the conversion process works.
"""
self.testSignal()
submitted = XFormInstance.get(PILLBOX_ID)
orig_data = getattr(submitted, PACT_DOTS_DATA_PROPERTY)['dots']
del orig_data['anchor'] # can't reproduce gmt offset
#hack, bootstrap the labels manually
casedoc = PactPatientCase.get(CASE_ID)
casedoc.nonartregimen = 3
casedoc.dot_n_one = 0
casedoc.dot_n_two = 2
casedoc.dot_n_three = 3
casedoc.dot_n_four = None
casedoc.artregimen = 2
casedoc.dot_a_one = 0
casedoc.dot_a_two = 1
casedoc.dot_a_three = ''
casedoc.dot_a_four = None
computed_json = json.loads(
json.dumps(get_dots_case_json(casedoc, anchor_date=ANCHOR_DATE)))
del computed_json['anchor']
for k in orig_data.keys():
if k != 'days':
self.assertEquals(orig_data[k], computed_json[k])
self.assertEquals(json.dumps(orig_data), json.dumps(computed_json))
def testPillboxCheck(self):
"""
This test tries to accomplish a few things
0: ensure the content of the signal is correctly set
1: verify that the dots_observations view is working correctly in reporting dates correclty back in order
2: Ensure that if you fill out the questions on the form, it won't redundantly fill out the pillbox cells again as well
3: ensure that proper ART/NonART sequences are put in the correct buckets when combined into a "DOTS Day" cell
todo: get label day_slot to work correctly
"""
#check to make sure that 0th and nth elements are where they ought to be
#hit the VIEW to make sure it's there
#make sure the pact_dots_data signal is working
#check no pillbox check entries that entries show up, and NOTHING more.
#ensure signal works
#todo: labeling checks
self.testSignal()
observations = query_observations(CASE_ID, START_DATE, END_DATE)
td = END_DATE - START_DATE
def check_obs_props(obs, props):
for k, v in props.items():
if k.endswith("_date"):
# datetime check
obs_datetime = getattr(obs, k)
val_datetime = dateutil.parser.parse(v)
if k in ('completed_date', 'created_date'):
obs_datetime = ServerTime(obs_datetime).user_time(PACT_TIMEZONE).done()
obs_date = obs_datetime.date()
val_date = val_datetime.date()
self.assertEquals(obs_date, val_date)
else:
self.assertEquals(getattr(obs, k), v,
msg="Error, observation %s\n\t%s didn't match: %s != %s" % (
json.dumps(obs.to_json(), indent=4), k, getattr(obs, k),
v))
for d in range(td.days):
this_day = START_DATE + timedelta(days=d)
day_submissions = filter_obs_for_day(this_day.date(), observations)
day_data = DOTDay.merge_from_observations(day_submissions)
if this_day.date() == START_DATE.date():
art_first = day_data.art.dose_dict[1][0]
art_first_check_props = {
"encounter_date": "2012-12-07T05:00:00Z",
"total_doses": 2,
"day_note": "art first noon",
"day_index": 20,
"note": "",
"pact_id": "999999",
"provider": "ctsims",
"method": "pillbox",
"observed_date": "2012-11-17T05:00:00Z",
#"day_slot": 1,
"completed_date": "2012-12-16T22:00:28.847000Z",
"adherence": "partial",
"dose_number": 1, #zero indexed
# "doc_type": "CObservation",
"is_reconciliation": False,
"anchor_date": "2012-12-07T05:00:00Z",
"created_date": "2012-12-16T21:37:52.771000Z",
"is_art": True,
"_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759",
"doc_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759"
}
check_obs_props(art_first, art_first_check_props)
non_art_first_1 = day_data.nonart.dose_dict[1][0]
non_art_first_1_props = {
"encounter_date": "2012-12-07T05:00:00Z",
"total_doses": 3,
"day_note": "non art first evening",
"day_index": 20,
"note": "",
"pact_id": "999999",
"provider": "ctsims",
"method": "pillbox",
"observed_date": "2012-11-17T05:00:00Z",
# "day_slot": 2,
"completed_date": "2012-12-16T22:00:28.847000Z",
"adherence": "partial",
"dose_number": 1,
# "doc_type": "CObservation",
"is_reconciliation": False,
"anchor_date": "2012-12-07T05:00:00Z",
"created_date": "2012-12-16T21:37:52.771000Z",
"is_art": False,
"_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759",
"doc_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759"
}
check_obs_props(non_art_first_1, non_art_first_1_props)
non_art_first_2 = day_data.nonart.dose_dict[2][0]
non_art_first_2_props = {
"encounter_date": "2012-12-07T05:00:00Z",
"total_doses": 3,
"day_note": "non art bedtime first",
"day_index": 20,
"note": "",
"pact_id": "999999",
"provider": "ctsims",
"method": "pillbox",
"observed_date": "2012-11-17T05:00:00Z",
# "day_slot": 3,
"completed_date": "2012-12-16T22:00:28.847000Z",
"adherence": "partial",
"dose_number": 2,
# "doc_type": "CObservation",
"is_reconciliation": False,
"anchor_date": "2012-12-07T05:00:00Z",
"created_date": "2012-12-16T21:37:52.771000Z",
"is_art": False,
"_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759",
"doc_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759"
}
check_obs_props(non_art_first_2, non_art_first_2_props)
if this_day.date() == (ANCHOR_DATE - timedelta(days=1)).date():
self.assertEquals(len(day_data.art.dose_dict.keys()),
2) # two doses, one for the answered, another for unchecked
art_slast = day_data.art.dose_dict[0][0]
art_slast_props = {
"encounter_date": "2012-12-07T05:00:00Z",
"total_doses": 2,
"day_note": "2nd to last last filled by questions",
"day_index": 1,
"note": "",
"pact_id": "999999",
"provider": "ctsims",
"method": "pillbox",
"observed_date": "2012-12-06T05:00:00Z",
"day_slot": -1,
"completed_date": "2012-12-16T22:00:28.847000Z",
"adherence": "partial",
"dose_number": 0,
# "doc_type": "CObservation",
"is_reconciliation": False,
"anchor_date": "2012-12-07T05:00:00Z",
"created_date": "2012-12-16T21:37:52.771000Z",
"is_art": True,
"_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759",
"doc_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759"
}
check_obs_props(art_slast, art_slast_props)
nonart_slast0 = day_data.nonart.dose_dict[0][0]
non_art0 = {
"encounter_date": "2012-12-07T05:00:00Z",
"total_doses": 3,
"day_note": "",
"day_index": 1,
"note": "",
"pact_id": "999999",
"provider": "ctsims",
"method": "direct",
"observed_date": "2012-12-06T05:00:00Z",
# "day_slot": -1,
"completed_date": "2012-12-16T22:00:28.847000Z",
"adherence": "empty",
"dose_number": 0,
# "doc_type": "CObservation",
"is_reconciliation": False,
"anchor_date": "2012-12-07T05:00:00Z",
"created_date": "2012-12-16T21:37:52.771000Z",
"is_art": False,
"_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759",
"doc_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759"
}
check_obs_props(nonart_slast0, non_art0)
nonart_slast1 = day_data.nonart.dose_dict[1][0]
non_art1 = {
"encounter_date": "2012-12-07T05:00:00Z",
"total_doses": 3,
"day_note": "non art noon second to last",
"day_index": 1,
"note": "",
"pact_id": "999999",
"provider": "ctsims",
"method": "pillbox",
"observed_date": "2012-12-06T05:00:00Z",
# "day_slot": -1,
"completed_date": "2012-12-16T22:00:28.847000Z",
"adherence": "partial",
"dose_number": 1,
# "doc_type": "CObservation",
"is_reconciliation": False,
"anchor_date": "2012-12-07T05:00:00Z",
"created_date": "2012-12-16T21:37:52.771000Z",
"is_art": False,
"_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759",
"doc_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759"
}
check_obs_props(nonart_slast1, non_art1)
nonart_slast2 = day_data.nonart.dose_dict[2][0]
non_art2 = {
"encounter_date": "2012-12-07T05:00:00Z",
"total_doses": 3,
"day_note": "art evening second to last",
"day_index": 1,
"note": "",
"pact_id": "999999",
"provider": "ctsims",
"method": "pillbox",
"observed_date": "2012-12-06T05:00:00Z",
# "day_slot": -1,
"completed_date": "2012-12-16T22:00:28.847000Z",
"adherence": "partial",
"dose_number": 2,
# "doc_type": "CObservation",
"is_reconciliation": False,
"anchor_date": "2012-12-07T05:00:00Z",
"created_date": "2012-12-16T21:37:52.771000Z",
"is_art": False,
"_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759",
"doc_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759"
}
check_obs_props(nonart_slast2, non_art2)
if this_day.date() == ANCHOR_DATE.date():
self.assertEqual(len(day_data.nonart.dose_dict[0]), 1)
non_art_last = day_data.nonart.dose_dict[0][0]
non_art_last_props = {
"encounter_date": "2012-12-07T05:00:00Z",
"total_doses": 3,
"day_note": "",
"day_index": 0,
"note": "Anchor same",
"pact_id": "999999",
"provider": "ctsims",
"method": "direct",
"observed_date": "2012-12-07T05:00:00Z",
#"day_slot": -1,
"completed_date": "2012-12-16T22:00:28.847000Z",
"adherence": "partial",
"dose_number": 0,
# "doc_type": "CObservation",
"is_reconciliation": False,
"anchor_date": "2012-12-07T05:00:00Z",
"created_date": "2012-12-16T21:37:52.771000Z",
"is_art": False,
"_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759",
"doc_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759"
}
check_obs_props(non_art_last, non_art_last_props)
non_art_last_noon = day_data.nonart.dose_dict[1][0]
non_art_last_noon_props = {
"encounter_date": "2012-12-07T05:00:00Z",
"total_doses": 3,
"day_note": "non art noon last",
"day_index": 0,
"note": "Anchor same",
"pact_id": "999999",
"provider": "ctsims",
"method": "pillbox",
"observed_date": "2012-12-07T05:00:00Z",
# "day_slot": -1,
"completed_date": "2012-12-16T22:00:28.847000Z",
"adherence": "partial",
"dose_number": 1,
# "doc_type": "CObservation",
"is_reconciliation": False,
"anchor_date": "2012-12-07T05:00:00Z",
"created_date": "2012-12-16T21:37:52.771000Z",
"is_art": False,
"_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759",
"doc_id": "a1811d7e-c968-4b63-aea5-6195ce0d8759"
}
check_obs_props(non_art_last_noon, non_art_last_noon_props)
#todo: check reconciliation?
pass
| puttarajubr/commcare-hq | custom/_legacy/pact/tests/dot_submission.py | Python | bsd-3-clause | 22,865 |
from __future__ import unicode_literals
from django.db import models
from .indian_states import STATE_CHOICES
from django.contrib.auth.models import User
class Market(models.Model):
"""
Market details, it consists of just Indian states
"""
class Meta:
unique_together = ('latitude', 'longitude')
def __str__(self):
return '%s' % (self.market_name)
market_id = models.AutoField(primary_key=True)
market_name = models.TextField(max_length=80, unique=True)
region = models.TextField(max_length=60)
state = models.CharField(max_length=2, choices=STATE_CHOICES)
latitude = models.DecimalField(max_digits=11, decimal_places=8)
longitude = models.DecimalField(max_digits=11, decimal_places=8)
moderator = models.ForeignKey(User)
class Item(models.Model):
"""
Item details, it can be fruit or vegetable
"""
def __str__(self):
return '%s' % (self.item_name)
item_name = models.CharField(max_length=30, unique=True)
item_id = models.AutoField(primary_key=True)
item_image_url = models.URLField(default='http://img.webmd.com/dtmcms/live/webmd/consumer_assets/site_images/articles/health_tools/12_powerhouse_vegetables_slideshow/intro_cream_of_crop.jpg')
class LocalMarketData(models.Model):
"""
Contains price for every item available in a given market.
"""
class Meta:
unique_together = ('item_id', 'market_id')
def __str__(self):
return '%s at %s' % (self.item_id, self.market_id)
item_id = models.ForeignKey(Item, on_delete=models.CASCADE)
market_id = models.ForeignKey(Market, on_delete=models.CASCADE)
price = models.PositiveIntegerField()
| sachinkumar123/approprate | cloudApp/interface/models.py | Python | mit | 1,688 |
#-------------------------------------------------------------------------------
#
# This file is part of pygimplib.
#
# Copyright (C) 2014, 2015 khalim19 <khalim19@gmail.com>
#
# pygimplib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pygimplib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pygimplib. If not, see <http://www.gnu.org/licenses/>.
#
#-------------------------------------------------------------------------------
"""
This module defines a class to filter objects according to specified filter
rules.
"""
#===============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
str = unicode
#===============================================================================
import inspect
from contextlib import contextmanager
#===============================================================================
class ObjectFilter(object):
"""
This class is a filter containing a set of rules that determines whether
a given object matches the rules or not (using the `is_match()` method).
Attributes:
* `match_type` (read-only) - Match type. Possible match types:
* MATCH_ALL - For `is_match()` to return True, the object must match
all rules.
* MATCH_ANY - For `is_match()` to return True, the object must match
at least one rule.
For greater flexibility, the filter can also contain nested `ObjectFilter`
objects, called "subfilters", each with their own set of rules and match type.
"""
__MATCH_TYPES = MATCH_ALL, MATCH_ANY = (0, 1)
def __init__(self, match_type):
self._match_type = match_type
# Key: function (rule_func)
# Value: tuple (rule_func_args) or ObjectFilter instance (a subfilter)
self._filter_items = {}
@property
def match_type(self):
return self._match_type
def has_rule(self, rule_func):
return rule_func in self._filter_items
def add_rule(self, rule_func, *rule_func_args):
"""
Add the specified rule as a function to the filter.
If `rule_func` already exists in the filter, nothing happens.
If you need to later remove the rule from the filter (using the
`remove_rule()` method), pass a named function rather than an inline lambda
expression. Alternatively, you can use `add_rule_temp()` for temporary
filters.
Parameters:
* `rule_func` - Function to filter objects by. The function must always have
at least one argument - the object to match (used by the `is_match()`
method).
* `*rule_func_args` - Arguments for the `rule_func` function.
Raises:
* `TypeError` - `rule_func` is not callable.
* `ValueError` - `rule_func` does not have at least one argument.
"""
if self.has_rule(rule_func):
return
if not callable(rule_func):
raise TypeError("not a function")
if len(inspect.getargspec(rule_func)[0]) < 1:
raise TypeError("function must have at least one argument (the object to match)")
self._filter_items[rule_func] = rule_func_args
def remove_rule(self, rule_func, raise_if_not_found=True):
"""
Remove the rule (`rule_func` function) from the filter.
Parameters:
* `rule_func` - Function to remove from the filter.
* `raise_if_not_found` - If True, raise `ValueError` if `rule_func` is not
found in the filter.
Raises:
* `ValueError` - `rule_func` is not found in the filter and
`raise_if_not_found` is True.
"""
if self.has_rule(rule_func):
del self._filter_items[rule_func]
else:
if raise_if_not_found:
raise ValueError("\"" + str(rule_func) + "\" not found in filter")
@contextmanager
def add_rule_temp(self, rule_func, *rule_func_args):
"""
Temporarily add a rule. Use as a context manager:
with filter.add_rule_temp(rule_func):
# do stuff
If `rule_func` already exists in the filter, the existing rule will not be
overridden and will not be removed.
Parameters:
* `rule_func` - Function to filter objects by. The function must always have
at least one argument - the object to match (used by the `is_match()`
method).
* `*rule_func_args` - Arguments for the `rule_func` function.
Raises:
* `TypeError` - `rule_func` is not callable.
* `ValueError` - `rule_func` does not have at least one argument.
"""
has_rule_already = self.has_rule(rule_func)
if not has_rule_already:
self.add_rule(rule_func, *rule_func_args)
try:
yield
finally:
if not has_rule_already:
self.remove_rule(rule_func)
@contextmanager
def remove_rule_temp(self, rule_func, raise_if_not_found=True):
"""
Temporarily remove a rule. Use as a context manager:
with filter.remove_rule_temp(rule_func):
# do stuff
Parameters:
* `rule_func` - Function to remove from the filter.
* `raise_if_not_found` - If True, raise `ValueError` if `rule_func` is not
in the filter.
Raises:
* `ValueError` - `rule_func` is not found in the filter and
`raise_if_not_found` is True.
"""
has_rule = self.has_rule(rule_func)
if not has_rule:
if raise_if_not_found:
raise ValueError("\"" + str(rule_func) + "\" not found in filter")
else:
rule_func_args = self._filter_items[rule_func]
self.remove_rule(rule_func)
try:
yield
finally:
if has_rule:
self.add_rule(rule_func, *rule_func_args)
def has_subfilter(self, subfilter_name):
return subfilter_name in self._filter_items
def add_subfilter(self, subfilter_name, subfilter):
"""
Add the specified subfilter (`ObjectFilter` instance) to the filter.
The subfilter can be later accessed by the `get_subfilter` method.
Raises:
* `ValueError` - `subfilter_name` already exists in the filter.
"""
if self.has_subfilter(subfilter_name):
raise ValueError("subfilter named \"" + str(subfilter_name) + "\" already exists in the filter")
if not isinstance(subfilter, ObjectFilter):
raise ValueError("subfilter named \"" + str(subfilter_name) + "\" is not a subfilter")
self._filter_items[subfilter_name] = subfilter
def get_subfilter(self, subfilter_name):
"""
Get the subfilter specified by its name.
Raises:
* `ValueError` - `subfilter_name` does not exist in the filter or the value
associated with `subfilter_name` is not a subfilter.
"""
if not self.has_subfilter(subfilter_name):
raise ValueError("subfilter named \"" + str(subfilter_name) + "\" not found in filter")
item = self._filter_items[subfilter_name]
return item
# Provide alias to `get_subfilter` for easier access.
__getitem__ = get_subfilter
def remove_subfilter(self, subfilter_name, raise_if_not_found=True):
"""
Remove the subfilter with the corresponding subfilter name.
Parameters:
* `subfilter name` - Subfilter name.
* `raise_if_not_found` - If True, raise `ValueError` if `subfilter_name`
is not found in the filter.
Raises:
* `ValueError` - `subfilter_name` is not found in the filter and
`raise_if_not_found` is True.
"""
if self.has_subfilter(subfilter_name):
del self._filter_items[subfilter_name]
else:
if raise_if_not_found:
raise ValueError("subfilter named \"" + str(subfilter_name) + "\" not found in filter")
@contextmanager
def add_subfilter_temp(self, subfilter_name, subfilter):
"""
Temporarily add a subfilter. Use as a context manager:
with filter.add_subfilter_temp(subfilter_name, subfilter):
# do stuff
Raises:
* `ValueError` - `subfilter_name` already exists in the filter.
"""
self.add_subfilter(subfilter_name, subfilter)
try:
yield
finally:
self.remove_subfilter(subfilter_name)
@contextmanager
def remove_subfilter_temp(self, subfilter_name, raise_if_not_found=True):
"""
Temporarily remove a subfilter. Use as a context manager:
with filter.remove_subfilter_temp(subfilter_name):
# do stuff
Parameters:
* `subfilter name` - Subfilter name.
* `raise_if_not_found` - If True, raise `ValueError` if `subfilter_name`
is not found in the filter.
Raises:
* `ValueError` - `subfilter_name` is not found in the filter and
`raise_if_not_found` is True.
"""
has_subfilter = self.has_subfilter(subfilter_name)
if not has_subfilter:
if raise_if_not_found:
raise ValueError("subfilter named \"" + str(subfilter_name) + "\" not found in filter")
else:
subfilter = self._filter_items[subfilter_name]
self.remove_subfilter(subfilter_name)
try:
yield
finally:
if has_subfilter:
self.add_subfilter(subfilter_name, subfilter)
def is_match(self, object_to_match):
"""
If `match_type` attribute is `MATCH_ALL`, return True if `object_to_match`
matches all specified filter rules and all top-level subfilters return True.
Otherwise return False.
If `match_type` attribute is `MATCH_ANY`, return True if `object_to_match`
matches at least one specified filter rule or at least one top-level
subfilter returns True. Otherwise return False.
If no filter rules are specified, return True.
"""
if not self._filter_items:
return True
if self._match_type == self.MATCH_ALL:
return self._is_match_all(object_to_match)
elif self._match_type == self.MATCH_ANY:
return self._is_match_any(object_to_match)
def _is_match_all(self, object_to_match):
is_match = True
for key, value in self._filter_items.items():
if isinstance(value, ObjectFilter):
is_match = is_match and value.is_match(object_to_match)
else:
# key = rule_func, value = rule_func_args
is_match = is_match and key(object_to_match, *value)
if not is_match:
break
return is_match
def _is_match_any(self, object_to_match):
is_match = False
for key, value in self._filter_items.items():
if isinstance(value, ObjectFilter):
is_match = is_match or value.is_match(object_to_match)
else:
# key = rule_func, value = rule_func_args
is_match = is_match or key(object_to_match, *value)
if is_match:
break
return is_match
| Buggaboo/gimp-plugin-export-layers | export_layers/pygimplib/objectfilter.py | Python | gpl-3.0 | 11,296 |
import unittest
import numpy as np
from numpy.testing import assert_array_equal
from dgw.evaluation.resampling import extend_point, shrink_to_a_single_point
class TestExtending(unittest.TestCase):
def test_extend_point(self):
a = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
ans = np.array([0, 1, 2, 3, 4, 4, 4, 5, 6, 7, 8, 9, 10])
assert_array_equal(ans, extend_point(a, 4, 3))
# multi-dim
a = np.array([[1, 2], [2, 3], [3, 4]])
ans = np.array([[1, 2], [2, 3], [2, 3], [3, 4]])
assert_array_equal(ans, extend_point(a, 1, 2))
def test_extend_point_left_boundary(self):
a = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
ans = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8])
assert_array_equal(ans, extend_point(a, 0, 4))
def test_extend_point_right_boundary(self):
a = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
ans = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 8, 8])
assert_array_equal(ans, extend_point(a, 8, 4))
def test_shrink_to_single_point(self):
a = np.array([0, 1, 2, 3, 3, 3, 3, 4, 5, 6])
ans = np.array([0, 1, 2, 3, 4, 5, 6])
assert_array_equal(ans, shrink_to_a_single_point(a, 3, 4))
b = np.array([0, 1, 2, 3, 8, 9, 10, 4, 5, 6])
ans = np.array([0, 1, 2, np.mean([3, 8, 9, 10]), 4, 5, 6])
assert_array_equal(ans, shrink_to_a_single_point(b, 3, 4))
# multi-dim
a = np.array([[1, 2], [2, 3], [2, 3], [3, 4]])
ans = np.array([[1, 2], [2, 3], [3, 4]])
assert_array_equal(ans, shrink_to_a_single_point(a, 1, 2))
def test_shrink_to_single_point_boundary(self):
a = np.array([0, 1, 2, 3, 4, 5, 6, 6])
ans = np.array([0, 1, 2, 3, 4, 5, 6]) # Ignore points that go out of bound
assert_array_equal(ans, shrink_to_a_single_point(a, 6, 4))
| lukauskas/dgw | dgw/evaluation/tests/test_resampling.py | Python | gpl-3.0 | 1,877 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Brian McClure
#
# django-media-helper is free software under terms of the MIT License.
#
# Fork of django-cleanup
from setuptools import setup, find_packages
description = """# **django-media-helper** #
When dealing with content from unacquainted sources(e.g., clients or designers)
one often gets images with absurd dimensions and/or filesizes: A 3000px-wide
play-button, a 10MB logo, etc. Media-helper attempts to mitigate this problem
by automating image-resizing, delivering the most appropriately sized image to
the browser.
It is also designed to be dropped into existing projects with minimal effort.
It's still in the alpha stage, but if you're careful it might make your life a
little bit easier while also speeding up your load times and reducing data
transfer."""
setup(
name = 'django-media-helper',
version = '0.3.2',
packages = find_packages(),
include_package_data=True,
requires = ['python (>= 2.7)', 'django (>= 1.8)', 'Pillow (>= 2.1.0)'],
description = 'An image resizing and management app for Django',
long_description = description,
author = 'Brian McClure',
author_email = 'django@jetbrains.com',
url = 'https://github.com/brmc/django-media-helper',
download_url = 'https://github.com/brmc/django-media-helper.git',
license = 'MIT License',
keywords = 'django, imaging, ajax',
classifiers = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| brmc/django-media-helper | setup.py | Python | mit | 1,794 |
#============================================================================
# Name : circ-pic.py
# Author : Luke Mondy
# ============================================================================
#
# Copyright (C) 2012 Mondy Luke
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ============================================================================
from __future__ import division
import sys
import Image, ImageDraw
import argparse
import numpy as np
from math import sqrt
HAVE_PYPRIND = True
try:
import pyprind
except ImportError:
HAVE_PYPRIND = False
LOGGING = False
def log(message):
global LOGGING
if LOGGING:
print message
sys.stdout.flush()
def getImage(image, scale=1.0, grey=True):
try:
log("Opening image: %s" % image)
im = Image.open(image)
except Exception as e:
error_msg = ("Image file you provided:\n{image}\ndoes not exist! Here's what the computer"
"says:\n{exception}".format(image=image, exception=e))
sys.exit(error_msg)
if scale != 1.0:
im = im.resize(tuple(int(i * scale) for i in im.size))
if grey:
im = im.convert('L')
return im
def overlapping(c1, c2):
# circle data type:
# (x, y, rad)
dist = sqrt( (c2[0] - c1[0])**2 + (c2[1] - c1[1])**2 ) # This sqrt is killin' me...
if c1[2] + c2[2] > dist:
return True
return False
def render(circles, path, params, imsize):
log("Rendering...")
if params['bgimg']:
bg = getImage(params['bgimg'], grey=False)
bgim = bg.resize(imsize)
bgpix = bgim.load()
col = params['bgcolour']
col = 255 if col > 255 else col
col = 0 if col < 0 else col
bgcolour = (col, col, col)
outline = (0, 0, 0)
if params['nooutline']:
outline = None
final = Image.new('RGB', imsize, bgcolour)
draw = ImageDraw.Draw(final)
im_x, im_y = imsize
for y in range(im_y):
for x in range(im_x):
circle_radius = circles[x,y]
if circle_radius != 0:
bb = (x - circle_radius, y - circle_radius,
x + circle_radius, y + circle_radius)
fill = bgpix[x, y] if params['bgimg'] else (255, 255, 255)
draw.ellipse(bb, fill=fill, outline=outline)
del draw
final.save(params['outimg'])
def circlerise(params):
global LOGGING
global HAVE_PYPRIND
interval = params['interval']
maxrad = params['maxrad']
scale = params['scale']
im = getImage(params['circimg'], scale)
pixels = im.load()
circles = np.zeros(im.size, int)
"""
=== Algorithm ===
For each pixel in the original image, determine its
"grey" brightness, and determine an appropriate radius
for that.
Now look in the local region for other circles (local
is determined by the max_radius of other circles + the
radius of the current potential circle).
If there is some circles nearby, check to see if the
new circle will overlap with it or not. If all nearby
circles won't overlap, then record the radius in a 2D
array that corresponds to the image.
"""
im_x, im_y = im.size
skips = 0
if LOGGING and HAVE_PYPRIND :
progress = pyprind.ProgBar(im_y, stream=1)
for y in range(0, im_y, interval):
prev_rad = 0
closeness = 0
for x in range(0, im_x, interval):
closeness += 1
# Determine radius
greyval = pixels[x, y]
radius = int(maxrad * (greyval/255))
if radius == 0:
radius = 1
# If we are still going to be inside the last circle
# placed on the same X row, save time and skip.
if prev_rad + radius >= closeness:
skips += 1
continue
bb = [x - radius - maxrad, # Define bounding box.
y - radius - maxrad,
x + radius + maxrad,
y + radius + maxrad]
if bb[0] < 0: # Ensure the bounding box is OK with
bb[0] = 0 # edges. We don't need to check the
if bb[1] < 0: # outer edges because it's OK for the
bb[1] = 0 # centre to be right on the edge.
if bb[2] >= im_x:
bb[2] = im_x - 1
if bb[3] >= im_y:
bb[3] = im_y - 1
c1 = (x, y, radius)
# Use bounding box and numpy to extract the local area around the
# circle. Then use numpy to do a boolean operating to give a
# true/false matrix of whether circles are nearby.
local_area = circles[bb[0]:bb[2], bb[1]:bb[3]]
circle_nearby = local_area != 0
coords_of_local_circles = np.where(circle_nearby)
radii_of_local_cirles = np.expand_dims(local_area[circle_nearby], axis=0) # Need the extra dim for next step
nrby_cirles = np.vstack([coords_of_local_circles, radii_of_local_cirles])
nrby_cirles = nrby_cirles.transpose()
any_overlaps_here = False
if nrby_cirles.shape[0] == 0:
circles[x,y] = radius
prev_rad = radius
closeness = 0
else:
for n in nrby_cirles:
c2 = (n[0]+bb[0], n[1]+bb[1], n[2])
overlap = overlapping(c1, c2)
if overlap:
any_overlaps_here = True
break
# Look if any nearby circles overlap. If any do, don't make
# a circle here.
if not any_overlaps_here:
circles[x, y] = radius
prev_rad = radius
closeness = 0
if LOGGING is True and HAVE_PYPRIND is True:
progress.update()
log("Avoided {skips} calculations".format(skips=skips))
render(circles, "", params, im.size)
def main(argv=None):
parser = argparse.ArgumentParser(description="Using imgcirc!")
addarg = parser.add_argument # just for cleaner code
addarg("--circimg", type=str, required=True,
help="The image that will make up the circles.", )
addarg("--interval", type=int, default=1,
help="Interval between pixels to look at in the circimg. 1 means all pixels.")
addarg("--bgimg", type=str,
help="An image to colour the circles with. Will be resized as needed.")
addarg("--outimg", type=str, required=True,
help="Filename for the outputted image.")
addarg("--maxrad", type=int, default=10,
help="Max radius of a circle (corresponds to a white pixel)")
addarg("--scale", type=float, default=1,
help="Percent to scale up the circimg (sometimes makes it look better).")
addarg("--bgcolour", type=int, default=255,
help="Grey-scale val from 0 to 255")
addarg("--nooutline", action='store_true', default=False,
help="When specified, no outline will be drawn on circles.")
addarg("--log", action='store_true', default=False,
help="Write progress to stdout.")
parsed_args = parser.parse_args()
params = dict(parsed_args.__dict__)
global LOGGING
if params["log"] is True:
LOGGING = True
log("Begin circlerising...")
circlerise(params)
if __name__ == "__main__":
sys.exit(main())
| OlympusMonds/PyCircleriser | PyCircleriser.py | Python | gpl-3.0 | 8,228 |
# Copyright (C) 2013 Che-Liang Chiou.
'''Compatibility layer of libclang cindex module.'''
from ctypes import c_uint
import clang.cindex as _cindex
from clang.cindex import (Index,
Cursor,
CursorKind,
Diagnostic,
RefQualifierKind,
Type,
TypeKind)
__all__ = ['Index', 'Cursor', 'CursorKind', 'Diagnostic',
'Type', 'TypeKind', 'LinkageKind', 'RefQualifierKind']
# Register libclang function.
_cindex.register_function(_cindex.conf.lib,
('clang_getCursorLinkage', [Cursor], c_uint), False)
class LinkageKind: # pylint: disable=R0903
'''Class represents linkage kind.'''
_linkage_kind_tags = ('INVALID',
'NO_LINKAGE',
'INTERNAL',
'UNIQUE_EXTERNAL',
'EXTERNAL')
def __init__(self, kind):
self.kind = kind
def __eq__(self, other):
return self.kind == other.kind
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return 'LinkageKind.%s' % self._linkage_kind_tags[self.kind]
LinkageKind.INVALID = LinkageKind(0)
LinkageKind.NO_LINKAGE = LinkageKind(1)
LinkageKind.INTERNAL = LinkageKind(2)
LinkageKind.UNIQUE_EXTERNAL = LinkageKind(3)
LinkageKind.EXTERNAL = LinkageKind(4)
def _cursor_get_num_arguments(self):
'''Call clang_Cursor_getNumArguments().'''
return _cindex.conf.lib.clang_Cursor_getNumArguments(self)
def _cursor_linkage_kind(self):
'''Call clang_getCursorLinkage().'''
return LinkageKind(_cindex.conf.lib.clang_getCursorLinkage(self))
Cursor.get_num_arguments = _cursor_get_num_arguments
Cursor.linkage_kind = property(_cursor_linkage_kind)
| anthrotype/ctypes-binding-generator | cbind/clang_cindex.py | Python | gpl-3.0 | 1,859 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from gevent import monkey
monkey.patch_all()
import json
import gc
from bottle import Bottle, request, response
from bottle.ext.mongo import MongoPlugin
from pandas import DataFrame
from mining.settings import PROJECT_PATH
from mining.utils import conf
from mining.utils._pandas import df_generate, DataFrameSearchColumn
from mining.db.datawarehouse import DataWarehouse
export_app = Bottle()
mongo = MongoPlugin(
uri=conf("mongodb")["uri"],
db=conf("mongodb")["db"],
json_mongo=True)
export_app.install(mongo)
@export_app.route('/data/<slug>.<ext>')
def data(mongodb, slug, ext='xls'):
DW = DataWarehouse()
element = mongodb['element'].find_one({'slug': slug})
element['page_limit'] = 50
if request.GET.get('limit', True) is False:
element['page_limit'] = 9999999999
data = DW.get(element.get('cube'))
columns = data.get('columns') or []
fields = columns
if request.GET.get('fields', None):
fields = request.GET.get('fields').split(',')
filters = [i[0] for i in request.GET.iteritems()
if len(i[0].split('filter__')) > 1]
df = DataFrame(data.get('data') or {}, columns=fields)
if len(filters) >= 1:
for f in filters:
s = f.split('__')
field = s[1]
operator = s[2]
value = request.GET.get(f)
if operator == 'like':
df = df[df[field].str.contains(value)]
elif operator == 'regex':
df = DataFrameSearchColumn(df, field, value, operator)
else:
df = df.query(df_generate(df, value, f))
groupby = []
if request.GET.get('groupby', None):
groupby = request.GET.get('groupby', "").split(',')
if len(groupby) >= 1:
df = DataFrame(df.groupby(groupby).grouper.get_group_levels())
if request.GET.get('orderby',
element.get('orderby', None)) and request.GET.get(
'orderby', element.get('orderby', None)) in fields:
orderby = request.GET.get('orderby', element.get('orderby', ''))
if type(orderby) == str:
orderby = orderby.split(',')
orderby__order = request.GET.get('orderby__order',
element.get('orderby__order', ''))
if type(orderby__order) == str:
orderby__order = orderby__order.split(',')
ind = 0
for orde in orderby__order:
if orde == '0':
orderby__order[ind] = False
else:
orderby__order[ind] = True
ind += 1
df = df.sort(orderby, ascending=orderby__order)
# CLEAN MEMORY
del filters, fields, columns
gc.collect()
file_name = '{}/assets/exports/openmining-{}.{}'.format(
PROJECT_PATH, element.get('cube'), ext)
if ext == 'csv':
df.to_csv(file_name, sep=";")
contenttype = 'text/csv'
else:
df.to_excel(file_name)
contenttype = 'application/vnd.ms-excel'
response.set_header('charset', 'utf-8')
response.set_header('Content-disposition', 'attachment; '
'filename={}.{}'.format(element.get('cube'), ext))
response.content_type = contenttype
ifile = open(file_name, "r")
o = ifile.read()
ifile.close()
return o
| mlgruby/mining | mining/controllers/export.py | Python | mit | 3,368 |
x=raw_input("Enter the value\n")
x=int(x)
print "1",
a=1
b=0
for i in range(1,x):
c=a+b
b=a
a=c
print c,
| parichitran/py-hw | Fibonacci.py | Python | apache-2.0 | 109 |
from __future__ import absolute_import
import jinja2
from flask import request, session, current_app
from wtforms.ext.csrf.session import SessionSecureForm
from wtforms.fields import HiddenField
class Form(SessionSecureForm):
"Implements a SessionSecureForm using app.SECRET_KEY and flask.session obj"
def __init__(self, formdata=None, obj=None, prefix='', csrf_enabled=None, **kwargs):
self.csrf_enabled = csrf_enabled
if csrf_enabled is None:
self.csrf_enabled = current_app.config.get('CSRF_ENABLED', True)
self.SECRET_KEY = current_app.config.get('CSRF_SESSION_KEY', '_csrf_token')
super(Form, self).__init__(formdata, obj, prefix, session, **kwargs)
def is_submitted(self):
"Check if request method is either PUT or POST"
return request and request.method in ("PUT", "POST")
def validate_on_submit(self):
"Call `form.validate()` if request method was either PUT or POST"
return self.is_submitted() and self.validate()
def validate_csrf_token(self, field):
if not self.csrf_enabled:
return True
return super(Form, self).validate_csrf_token(field)
def hidden_fields(self, *fields):
"hidden fields in a hidden DIV tag, in order to keep XHTML compliance."
if not fields:
fields = [f for f in self if isinstance(f, HiddenField)]
rv = [u'<div style="display:none;">']
for field in fields:
if isinstance(field, basestring):
field = getattr(self, field)
rv.append(unicode(field))
rv.append(u"</div>")
return jinja2.Markup(u"".join(rv))
| simonz05/flask-wtf | flask_wtf/forms.py | Python | bsd-3-clause | 1,675 |
# -*- coding: utf-8 -*-
'''
Provides the :class:`Arrow <arrow.arrow.Arrow>` class, an enhanced ``datetime``
replacement.
'''
from __future__ import absolute_import
from datetime import datetime, timedelta, tzinfo
from dateutil import tz as dateutil_tz
from dateutil.relativedelta import relativedelta
import calendar
import sys
from arrow import util, locales, parser, formatter
class Arrow(object):
'''An :class:`Arrow <arrow.arrow.Arrow>` object.
Implements the ``datetime`` interface, behaving as an aware ``datetime`` while implementing
additional functionality.
:param year: the calendar year.
:param month: the calendar month.
:param day: the calendar day.
:param hour: (optional) the hour. Defaults to 0.
:param minute: (optional) the minute, Defaults to 0.
:param second: (optional) the second, Defaults to 0.
:param microsecond: (optional) the microsecond. Defaults 0.
:param tzinfo: (optional) the ``tzinfo`` object. Defaults to ``None``.
If tzinfo is None, it is assumed to be UTC on creation.
Usage::
>>> import arrow
>>> arrow.Arrow(2013, 5, 5, 12, 30, 45)
<Arrow [2013-05-05T12:30:45+00:00]>
'''
resolution = datetime.resolution
_ATTRS = ['year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond']
_ATTRS_PLURAL = ['{0}s'.format(a) for a in _ATTRS]
def __init__(self, year, month, day, hour=0, minute=0, second=0, microsecond=0,
tzinfo=None):
if util.isstr(tzinfo):
tzinfo = parser.TzinfoParser.parse(tzinfo)
tzinfo = tzinfo or dateutil_tz.tzutc()
self._datetime = datetime(year, month, day, hour, minute, second,
microsecond, tzinfo)
# factories: single object, both original and from datetime.
@classmethod
def now(cls, tzinfo=None):
'''Constructs an :class:`Arrow <arrow.arrow.Arrow>` object, representing "now".
:param tzinfo: (optional) a ``tzinfo`` object. Defaults to local time.
'''
utc = datetime.utcnow().replace(tzinfo=dateutil_tz.tzutc())
dt = utc.astimezone(dateutil_tz.tzlocal() if tzinfo is None else tzinfo)
return cls(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second,
dt.microsecond, dt.tzinfo)
@classmethod
def utcnow(cls):
''' Constructs an :class:`Arrow <arrow.arrow.Arrow>` object, representing "now" in UTC
time.
'''
dt = datetime.utcnow()
return cls(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second,
dt.microsecond, dateutil_tz.tzutc())
@classmethod
def fromtimestamp(cls, timestamp, tzinfo=None):
''' Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a timestamp.
:param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either.
:param tzinfo: (optional) a ``tzinfo`` object. Defaults to local time.
'''
tzinfo = tzinfo or dateutil_tz.tzlocal()
timestamp = cls._get_timestamp_from_input(timestamp)
dt = datetime.fromtimestamp(timestamp, tzinfo)
return cls(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second,
dt.microsecond, tzinfo)
@classmethod
def utcfromtimestamp(cls, timestamp):
'''Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a timestamp, in UTC time.
:param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either.
'''
timestamp = cls._get_timestamp_from_input(timestamp)
dt = datetime.utcfromtimestamp(timestamp)
return cls(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second,
dt.microsecond, dateutil_tz.tzutc())
@classmethod
def fromdatetime(cls, dt, tzinfo=None):
''' Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a ``datetime`` and optional
``tzinfo`` object.
:param dt: the ``datetime``
:param tzinfo: (optional) a ``tzinfo`` object. Defaults to UTC.
'''
tzinfo = tzinfo or dt.tzinfo or dateutil_tz.tzutc()
return cls(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second,
dt.microsecond, tzinfo)
@classmethod
def fromdate(cls, date, tzinfo=None):
''' Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a ``date`` and optional
``tzinfo`` object. Time values are set to 0.
:param date: the ``date``
:param tzinfo: (optional) a ``tzinfo`` object. Defaults to UTC.
'''
tzinfo = tzinfo or dateutil_tz.tzutc()
return cls(date.year, date.month, date.day, tzinfo=tzinfo)
@classmethod
def strptime(cls, date_str, fmt, tzinfo=None):
''' Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a date string and format,
in the style of ``datetime.strptime``.
:param date_str: the date string.
:param fmt: the format string.
:param tzinfo: (optional) an optional ``tzinfo``
'''
dt = datetime.strptime(date_str, fmt)
tzinfo = tzinfo or dt.tzinfo
return cls(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second,
dt.microsecond, tzinfo)
# factories: ranges and spans
@classmethod
def range(cls, frame, start, end=None, tz=None, limit=None):
''' Returns an array of :class:`Arrow <arrow.arrow.Arrow>` objects, representing
an iteration of time between two inputs.
:param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...).
:param start: A datetime expression, the start of the range.
:param end: (optional) A datetime expression, the end of the range.
:param tz: (optional) A timezone expression. Defaults to UTC.
:param limit: (optional) A maximum number of tuples to return.
**NOTE**: the **end** or **limit** must be provided. Call with **end** alone to
return the entire range, with **limit** alone to return a maximum # of results from the
start, and with both to cap a range at a maximum # of results.
Recognized datetime expressions:
- An :class:`Arrow <arrow.arrow.Arrow>` object.
- A ``datetime`` object.
Recognized timezone expressions:
- A ``tzinfo`` object.
- A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'.
- A ``str`` in ISO-8601 style, as in '+07:00'.
- A ``str``, one of the following: 'local', 'utc', 'UTC'.
Usage:
>>> start = datetime(2013, 5, 5, 12, 30)
>>> end = datetime(2013, 5, 5, 17, 15)
>>> for r in arrow.Arrow.range('hour', start, end):
... print repr(r)
...
<Arrow [2013-05-05T12:30:00+00:00]>
<Arrow [2013-05-05T13:30:00+00:00]>
<Arrow [2013-05-05T14:30:00+00:00]>
<Arrow [2013-05-05T15:30:00+00:00]>
<Arrow [2013-05-05T16:30:00+00:00]>
'''
frame_relative = cls._get_frames(frame)[1]
tzinfo = cls._get_tzinfo(start.tzinfo if tz is None else tz)
start = cls._get_datetime(start).replace(tzinfo=tzinfo)
end, limit = cls._get_iteration_params(end, limit)
end = cls._get_datetime(end).replace(tzinfo=tzinfo)
current = cls.fromdatetime(start)
results = []
while current <= end and len(results) < limit:
results.append(current)
values = [getattr(current, f) for f in cls._ATTRS]
current = cls(*values, tzinfo=tzinfo) + relativedelta(**{frame_relative: 1})
return results
@classmethod
def span_range(cls, frame, start, end, tz=None, limit=None):
''' Returns an array of tuples, each :class:`Arrow <arrow.arrow.Arrow>` objects,
representing a series of timespans between two inputs.
:param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...).
:param start: A datetime expression, the start of the range.
:param end: (optional) A datetime expression, the end of the range.
:param tz: (optional) A timezone expression. Defaults to UTC.
:param limit: (optional) A maximum number of tuples to return.
**NOTE**: the **end** or **limit** must be provided. Call with **end** alone to
return the entire range, with **limit** alone to return a maximum # of results from the
start, and with both to cap a range at a maximum # of results.
Recognized datetime expressions:
- An :class:`Arrow <arrow.arrow.Arrow>` object.
- A ``datetime`` object.
Recognized timezone expressions:
- A ``tzinfo`` object.
- A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'.
- A ``str`` in ISO-8601 style, as in '+07:00'.
- A ``str``, one of the following: 'local', 'utc', 'UTC'.
Usage:
>>> start = datetime(2013, 5, 5, 12, 30)
>>> end = datetime(2013, 5, 5, 17, 15)
>>> for r in arrow.Arrow.span_range('hour', start, end):
... print r
...
(<Arrow [2013-05-05T12:00:00+00:00]>, <Arrow [2013-05-05T12:59:59.999999+00:00]>)
(<Arrow [2013-05-05T13:00:00+00:00]>, <Arrow [2013-05-05T13:59:59.999999+00:00]>)
(<Arrow [2013-05-05T14:00:00+00:00]>, <Arrow [2013-05-05T14:59:59.999999+00:00]>)
(<Arrow [2013-05-05T15:00:00+00:00]>, <Arrow [2013-05-05T15:59:59.999999+00:00]>)
(<Arrow [2013-05-05T16:00:00+00:00]>, <Arrow [2013-05-05T16:59:59.999999+00:00]>)
'''
_range = cls.range(frame, start, end, tz, limit)
return [r.span(frame) for r in _range]
# representations
def __repr__(self):
dt = self._datetime
attrs = ', '.join([str(i) for i in [dt.year, dt.month, dt.day, dt.hour, dt.minute,
dt.second, dt.microsecond]])
return '<{0} [{1}]>'.format(self.__class__.__name__, self.__str__())
def __str__(self):
return self._datetime.isoformat()
def __format__(self, formatstr):
if len(formatstr) > 0:
return self.format(formatstr)
return str(self)
def __hash__(self):
return self._datetime.__hash__()
# attributes & properties
def __getattr__(self, name):
if name == 'week':
return self.isocalendar()[1]
if not name.startswith('_'):
value = getattr(self._datetime, name, None)
if value is not None:
return value
return object.__getattribute__(self, name)
@property
def tzinfo(self):
''' Gets the ``tzinfo`` of the :class:`Arrow <arrow.arrow.Arrow>` object. '''
return self._datetime.tzinfo
@tzinfo.setter
def tzinfo(self, tzinfo):
''' Sets the ``tzinfo`` of the :class:`Arrow <arrow.arrow.Arrow>` object. '''
self._datetime = self._datetime.replace(tzinfo=tzinfo)
@property
def datetime(self):
''' Returns a datetime representation of the :class:`Arrow <arrow.arrow.Arrow>` object. '''
return self._datetime
@property
def naive(self):
''' Returns a naive datetime representation of the :class:`Arrow <arrow.arrow.Arrow>` object. '''
return self._datetime.replace(tzinfo=None)
@property
def timestamp(self):
''' Returns a timestamp representation of the :class:`Arrow <arrow.arrow.Arrow>` object. '''
return calendar.timegm(self._datetime.utctimetuple())
@property
def float_timestamp(self):
''' Returns a floating-point representation of the :class:`Arrow <arrow.arrow.Arrow>` object. '''
return self.timestamp + float(self.microsecond) / 1000000
# mutation and duplication.
def clone(self):
''' Returns a new :class:`Arrow <arrow.arrow.Arrow>` object, cloned from the current one.
Usage:
>>> arw = arrow.utcnow()
>>> cloned = arw.clone()
'''
return self.fromdatetime(self._datetime)
def replace(self, **kwargs):
''' Returns a new :class:`Arrow <arrow.arrow.Arrow>` object with attributes updated
according to inputs.
Use single property names to set their value absolutely:
>>> import arrow
>>> arw = arrow.utcnow()
>>> arw
<Arrow [2013-05-11T22:27:34.787885+00:00]>
>>> arw.replace(year=2014, month=6)
<Arrow [2014-06-11T22:27:34.787885+00:00]>
Use plural property names to shift their current value relatively:
>>> arw.replace(years=1, months=-1)
<Arrow [2014-04-11T22:27:34.787885+00:00]>
You can also provide a tzimezone expression can also be replaced:
>>> arw.replace(tzinfo=tz.tzlocal())
<Arrow [2013-05-11T22:27:34.787885-07:00]>
Recognized timezone expressions:
- A ``tzinfo`` object.
- A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'.
- A ``str`` in ISO-8601 style, as in '+07:00'.
- A ``str``, one of the following: 'local', 'utc', 'UTC'.
'''
absolute_kwargs = {}
relative_kwargs = {}
for key, value in kwargs.items():
if key in self._ATTRS:
absolute_kwargs[key] = value
elif key in self._ATTRS_PLURAL or key == 'weeks':
relative_kwargs[key] = value
elif key == 'week':
raise AttributeError('setting absolute week is not supported')
elif key !='tzinfo':
raise AttributeError()
current = self._datetime.replace(**absolute_kwargs)
current += relativedelta(**relative_kwargs)
tzinfo = kwargs.get('tzinfo')
if tzinfo is not None:
tzinfo = self._get_tzinfo(tzinfo)
current = current.replace(tzinfo=tzinfo)
return self.fromdatetime(current)
def to(self, tz):
''' Returns a new :class:`Arrow <arrow.arrow.Arrow>` object, converted to the target
timezone.
:param tz: an expression representing a timezone.
Recognized timezone expressions:
- A ``tzinfo`` object.
- A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'.
- A ``str`` in ISO-8601 style, as in '+07:00'.
- A ``str``, one of the following: 'local', 'utc', 'UTC'.
Usage::
>>> utc = arrow.utcnow()
>>> utc
<Arrow [2013-05-09T03:49:12.311072+00:00]>
>>> utc.to('US/Pacific')
<Arrow [2013-05-08T20:49:12.311072-07:00]>
>>> utc.to(tz.tzlocal())
<Arrow [2013-05-08T20:49:12.311072-07:00]>
>>> utc.to('-07:00')
<Arrow [2013-05-08T20:49:12.311072-07:00]>
>>> utc.to('local')
<Arrow [2013-05-08T20:49:12.311072-07:00]>
>>> utc.to('local').to('utc')
<Arrow [2013-05-09T03:49:12.311072+00:00]>
'''
if not isinstance(tz, tzinfo):
tz = parser.TzinfoParser.parse(tz)
dt = self._datetime.astimezone(tz)
return self.__class__(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second,
dt.microsecond, tz)
def span(self, frame):
''' Returns two new :class:`Arrow <arrow.arrow.Arrow>` objects, representing the timespan
of the :class:`Arrow <arrow.arrow.Arrow>` object in a given timeframe.
:param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...).
Usage::
>>> arrow.utcnow()
<Arrow [2013-05-09T03:32:36.186203+00:00]>
>>> arrow.utcnow().span('hour')
(<Arrow [2013-05-09T03:00:00+00:00]>, <Arrow [2013-05-09T03:59:59.999999+00:00]>)
>>> arrow.utcnow().span('day')
(<Arrow [2013-05-09T00:00:00+00:00]>, <Arrow [2013-05-09T23:59:59.999999+00:00]>)
'''
frame_absolute, frame_relative = self._get_frames(frame)
index = self._ATTRS.index('day' if frame_absolute == 'week' else frame_absolute)
frames = self._ATTRS[:index + 1]
values = [getattr(self, f) for f in frames]
for i in range(3 - len(values)):
values.append(1)
floor = self.__class__(*values, tzinfo=self.tzinfo)
if frame_absolute == 'week':
floor = floor + relativedelta(days=-(self.isoweekday() - 1))
ceil = floor + relativedelta(**{frame_relative: 1}) + relativedelta(microseconds=-1)
return floor, ceil
def floor(self, frame):
''' Returns a new :class:`Arrow <arrow.arrow.Arrow>` object, representing the "floor"
of the timespan of the :class:`Arrow <arrow.arrow.Arrow>` object in a given timeframe.
Equivalent to the first element in the 2-tuple returned by
:func:`span <arrow.arrow.Arrow.span>`.
:param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...).
Usage::
>>> arrow.utcnow().ceil('hour')
<Arrow [2013-05-09T03:00:00+00:00]>
'''
return self.span(frame)[0]
def ceil(self, frame):
''' Returns a new :class:`Arrow <arrow.arrow.Arrow>` object, representing the "ceiling"
of the timespan of the :class:`Arrow <arrow.arrow.Arrow>` object in a given timeframe.
Equivalent to the second element in the 2-tuple returned by
:func:`span <arrow.arrow.Arrow.span>`.
:param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...).
Usage::
>>> arrow.utcnow().ceil('hour')
<Arrow [2013-05-09T03:59:59.999999+00:00]>
'''
return self.span(frame)[1]
# string output and formatting.
def format(self, fmt, locale='en_us'):
''' Returns a string representation of the :class:`Arrow <arrow.arrow.Arrow>` object,
formatted according to a format string.
:param fmt: the format string.
Usage::
>>> arrow.utcnow().format('YYYY-MM-DD HH:mm:ss ZZ')
'2013-05-09 03:56:47 -00:00'
>>> arrow.utcnow().format('X')
'1368071882'
>>> arrow.utcnow().format('MMMM DD, YYYY')
'May 09, 2013'
'''
return formatter.DateTimeFormatter(locale).format(self._datetime, fmt)
def humanize(self, other=None, locale='en_us'):
''' Returns a localized, humanized representation of a relative difference in time.
:param other: (optional) an :class:`Arrow <arrow.arrow.Arrow>` or ``datetime`` object.
Defaults to now in the current :class:`Arrow <arrow.arrow.Arrow>` object's timezone.
:param locale: (optional) a ``str`` specifying a locale. Defaults to 'en_us'.
Usage::
>>> earlier = arrow.utcnow().replace(hours=-2)
>>> earlier.humanize()
'2 hours ago'
>>> later = later = earlier.replace(hours=4)
>>> later.humanize(earlier)
'in 4 hours'
'''
locale = locales.get_locale(locale)
if other is None:
utc = datetime.utcnow().replace(tzinfo=dateutil_tz.tzutc())
dt = utc.astimezone(self._datetime.tzinfo)
elif isinstance(other, Arrow):
dt = other._datetime
elif isinstance(other, datetime):
if other.tzinfo is None:
dt = other.replace(tzinfo=self._datetime.tzinfo)
else:
dt = other.astimezone(self._datetime.tzinfo)
else:
raise TypeError()
delta = int(util.total_seconds(self._datetime - dt))
sign = -1 if delta < 0 else 1
diff = abs(delta)
delta = diff
if diff < 10:
return locale.describe('now')
if diff < 45:
return locale.describe('seconds', sign)
elif diff < 90:
return locale.describe('minute', sign)
elif diff < 2700:
minutes = sign * int(max(delta / 60, 2))
return locale.describe('minutes', minutes)
elif diff < 5400:
return locale.describe('hour', sign)
elif diff < 79200:
hours = sign * int(max(delta / 3600, 2))
return locale.describe('hours', hours)
elif diff < 129600:
return locale.describe('day', sign)
elif diff < 2160000:
days = sign * int(max(delta / 86400, 2))
return locale.describe('days', days)
elif diff < 3888000:
return locale.describe('month', sign)
elif diff < 29808000:
self_months = self._datetime.year * 12 + self._datetime.month
other_months = dt.year * 12 + dt.month
months = sign * abs(other_months - self_months)
return locale.describe('months', months)
elif diff < 47260800:
return locale.describe('year', sign)
else:
years = sign * int(max(delta / 31536000, 2))
return locale.describe('years', years)
# math
def __add__(self, other):
if isinstance(other, (timedelta, relativedelta)):
return self.fromdatetime(self._datetime + other, self._datetime.tzinfo)
raise NotImplementedError()
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, timedelta):
return self.fromdatetime(self._datetime - other, self._datetime.tzinfo)
elif isinstance(other, datetime):
return self._datetime - other
elif isinstance(other, Arrow):
return self._datetime - other._datetime
raise NotImplementedError()
def __rsub__(self, other):
return self.__sub__(other)
# comparisons
def __eq__(self, other):
if not isinstance(other, (Arrow, datetime)):
return False
other = self._get_datetime(other)
return self._datetime == self._get_datetime(other)
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
if not isinstance(other, (Arrow, datetime)):
return False
return self._datetime > self._get_datetime(other)
def __ge__(self, other):
if not isinstance(other, (Arrow, datetime)):
return False
return self._datetime >= self._get_datetime(other)
def __lt__(self, other):
if not isinstance(other, (Arrow, datetime)):
return False
return self._datetime < self._get_datetime(other)
def __le__(self, other):
if not isinstance(other, (Arrow, datetime)):
return False
return self._datetime <= self._get_datetime(other)
# datetime methods
def date(self):
''' Returns a ``date`` object with the same year, month and day. '''
return self._datetime.date()
def time(self):
''' Returns a ``time`` object with the same hour, minute, second, microsecond. '''
return self._datetime.time()
def timetz(self):
''' Returns a ``time`` object with the same hour, minute, second, microsecond and tzinfo. '''
return self._datetime.timetz()
def astimezone(self, tz):
''' Returns a ``datetime`` object, adjusted to the specified tzinfo.
:param tz: a ``tzinfo`` object.
'''
return self._datetime.astimezone(tz)
def utcoffset(self):
''' Returns a ``timedelta`` object representing the whole number of minutes difference from UTC time. '''
return self._datetime.utcoffset()
def dst(self):
''' Returns the daylight savings time adjustment. '''
return self._datetime.dst()
def timetuple(self):
''' Returns a ``time.struct_time``, in the current timezone. '''
return self._datetime.timetuple()
def utctimetuple(self):
''' Returns a ``time.struct_time``, in UTC time. '''
return self._datetime.utctimetuple()
def toordinal(self):
''' Returns the proleptic Gregorian ordinal of the date. '''
return self._datetime.toordinal()
def weekday(self):
''' Returns the day of the week as an integer (0-6). '''
return self._datetime.weekday()
def isoweekday(self):
''' Returns the ISO day of the week as an integer (1-7). '''
return self._datetime.isoweekday()
def isocalendar(self):
''' Returns a 3-tuple, (ISO year, ISO week number, ISO weekday). '''
return self._datetime.isocalendar()
def isoformat(self, sep='T'):
'''Returns an ISO 8601 formatted representation of the date and time. '''
return self._datetime.isoformat(sep)
def ctime(self):
''' Returns a ctime formatted representation of the date and time. '''
return self._datetime.ctime()
def strftime(self, format):
''' Formats in the style of ``datetime.strptime``.
:param format: the format string.
'''
return self._datetime.strftime(format)
# internal tools.
@classmethod
def _get_tzinfo(cls, tz_expr):
if tz_expr is None:
return dateutil_tz.tzutc()
if isinstance(tz_expr, tzinfo):
return tz_expr
else:
try:
return parser.TzinfoParser.parse(tz_expr)
except parser.ParserError:
raise ValueError('\'{0}\' not recognized as a timezone')
@classmethod
def _get_datetime(cls, expr):
if isinstance(expr, Arrow):
return expr.datetime
if isinstance(expr, datetime):
return expr
try:
expr = float(expr)
return cls.utcfromtimestamp(expr).datetime
except:
raise ValueError('\'{0}\' not recognized as a timestamp or datetime')
@classmethod
def _get_frames(cls, name):
if name in cls._ATTRS:
return name, '{0}s'.format(name)
elif name in ['week', 'weeks']:
return 'week', 'weeks'
raise AttributeError()
@classmethod
def _get_iteration_params(cls, end, limit):
if end is None:
if limit is None:
raise Exception('one of \'end\' or \'limit\' is required')
return cls.max, limit
else:
return end, sys.maxsize
@classmethod
def _get_timestamp_from_input(cls, timestamp):
try:
return float(timestamp)
except:
raise ValueError('cannot parse \'{0}\' as a timestamp'.format(timestamp))
Arrow.min = Arrow.fromdatetime(datetime.min)
Arrow.max = Arrow.fromdatetime(datetime.max)
| wri/gfw-api | lib/arrow/arrow.py | Python | gpl-2.0 | 26,941 |
#!/usr/bin/env python
'''
log2cef
Main module
Copyright 2013 J.R. Murray (jr.murray@gmail.com)
This program is distributed under the terms of the GNU General Public License version 3.0.
Tested with Python 2.7 on Windows 8
Configuration files:
1. User input
Log file sources
Parser agent to use
2. Parser configuration(s)
Future setup:
1 file - standard (built in) agents
2 - user-defined agents
Future updates
Filtering (boolean)
Remote configuration / updates
Auto-update setting
Update log (log directory)
Email error notifications
Input types:
Regex
'''
import sys
import re
import socket
import os
import time
#import datetime
import ConfigParser
import logging
import cef
import cefagent
# Configuration - move to config file later
# Follow CEF specification 1:1
def is_valid_ip(ip):
# Validates IPv4 addresses
pattern = re.compile(r"""^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$""", re.VERBOSE | re.IGNORECASE)
return pattern.match(ip) is not None
def ipv4_int(ip):
# Convert IP address into an integer value
# which can be stored in a database.
# This allows < and > comparison.
if is_valid_ip(ip):
ipa = ip.split('.')
b1 = (256 * 256 * 256) * int(ipa[0])
b2 = 256 * 256 * int(ipa[1])
b3 = 256 * int(ipa[2])
b4 = int(ipa[3])
return b1 + b2 + b3 + b4
else:
return 0
def getFileSize(f):
f_stats = os.stat(f)
f_size = f_stats[6]
return f_size
def xml_escape(str):
str = str.replace("&","&")
str = str.replace("'", "'")
str = str.replace('"', """)
str = str.replace("<", "<")
str = str.replace(">", ">")
return str
def sanitize(str):
sanitizer = re.compile(r'[^\x20-\x7E]')
str = re.sub(sanitizer, '', str)
return str
def truncate(str, max_length):
return (str[:max_length-3] + '...') if len(str) > max_length else str
def email(sender, recipients, subject, message, smtp_server):
# recipients is an array
try:
# Determine the name of the script and capitalize the first letter
script_base = os.path.basename(sys.argv[0])
# Remove the file extension
script_name = os.path.splitext(script_base)[0]
# Capitalize
script_name = script_name.capitalize()
#email_text = "Messages generated during " + script_name + " script: \n"
email_text = message
email_msg = MIMEText(email_text)
#email_msg['Subject'] = script_name + ' Messages'
email_msg['Subject'] = subject
email_msg['From'] = sender
email_msg['To'] = recipients
email = SMTP(smtp_server)
email.sendmail(sender, recipients.split(","), email_msg.as_string())
except BaseException, e:
raise BaseException('Error sending mail: ' + str(e))
def syslog(message, host, port=514, utf8=True):
try:
# Convert message to UTF-8
if utf8:
message = message.encode('utf-8')
# Send syslog UDP packet to given host and port.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Jan 18 11:07:53 host message
now = datetime.now()
date_str = now.strftime("%b %d %H:%M:%S")
data = str(message)
#data = date_str + " " + local_host + " " + str(message)
#print data
sock.sendto(data, (host, port))
sock.close()
except:
pass
def xor_decryption(key, ciphertext):
loop_length = len(ciphertext)/2+1
plaintext = ''
for ptr in range (1,loop_length):
#'The first value to be XOr-ed comes from the data to be decrypted
cipher_ptr = ptr * 2
xor1 = int(ciphertext[cipher_ptr-2:cipher_ptr], 16)
#'The second value comes from the key
xor2 = int(ord(key[ptr % len(key)]))
plaintext = plaintext + chr(xor1 ^ xor2)
return plaintext
def we_are_frozen():
# Returns whether we are frozen via py2exe. This will affect how we find out where we are located.
return hasattr(sys, "frozen")
def module_path():
# This will get us the program's directory, even if we are frozen using py2exe
if we_are_frozen():
return os.path.dirname(unicode(sys.executable, sys.getfilesystemencoding( )))
return os.path.dirname(unicode(__file__, sys.getfilesystemencoding( )))
class InputFile(object):
def __init__(self):
self.path = None
self.handle = None
self.size = None
#self.start = True
self.cursor = 0
self.agent = None
self.filter = None
self.destinations = None
self.vendor = None
self.product = None
self.version = None
self.whole_file = False
self.date_format = None
self.log_timezone = 'UTC'
self.tz_dst = False
if __name__ == "__main__":
# API Keys
APIKeys = {}
# APIKeys['example.com'] = '4d38604a1586370cf2f13c87d201b378f42b290b3f5067db4d659f162d883de0'
month = {}
month["01"] = "Jan"
month["02"] = "Feb"
month["03"] = "Mar"
month["04"] = "Apr"
month["05"] = "May"
month["06"] = "Jun"
month["07"] = "Jul"
month["08"] = "Aug"
month["09"] = "Sep"
month["10"] = "Oct"
month["11"] = "Nov"
month["12"] = "Dec"
logging_levels = {}
logging_levels["DEBUG"] = 10
logging_levels["INFO"] = 20
logging_levels["WARNING"] = 30
logging_levels["ERROR"] = 40
logging_levels["CRITICAL"] = 50
log_file = 'log2cef.log'
log_dir = 'log'
conf_dir = 'config'
user_config = conf_dir + '\\config.ini'
agents_config = conf_dir + '\\agents.ini'
# Change the working directory to the same location as the exe/script
cwd = module_path()
os.chdir(cwd)
# Determine the name of myself
script_base = os.path.basename(sys.argv[0])
# Remove the file extension
script_name = os.path.splitext(script_base)[0]
# Set default values
config = ConfigParser.SafeConfigParser({
# Set parameters for script
'log_filename' : sys.argv[0].split('.')[0] + '.log',
'log_dir' : 'logs',
'data_dir' : 'data',
'smtp_server' : '127.0.0.1',
'email_sender' : 'log2cef@yourdomain.com',
'email_recipients' : 'security@yourdomain.com',
'syslog_hosts' : '127.0.0.1',
'debug' : 'False',
'whole_file' : 'False',
'logging_level' : 'INFO'})
'''
# Choose which interface to use for outbound socket-based communication
local_ips = [ i[4][0] for i in socket.getaddrinfo(socket.gethostname(), None) if i[0] == 2 ]
for i in local_ips:
if i.startswith('10.'):
local_ip = i
local_host = socket.gethostname()
'''
files = []
try:
global_section = 'DEFAULT'
#log_section = 'LOG'
# Test if the file exists
if os.path.exists(user_config):
config.read(user_config)
else:
print 'Config file not found. Exiting.'
sys.exit(1)
# Required settings
debug = config.getboolean(global_section, 'debug')
# User input settings
section = 'OUTPUT'
output_syslog = config.getboolean(section, 'output_syslog')
output_file = config.getboolean(section, 'output_file')
log_dir = config.get(section, 'log_dir')
data_dir = config.get(section, 'data_dir')
log_filename = config.get(section, 'log_filename')
output_filename = config.get(section, 'output_filename')
section = 'EMAIL'
smtp_server = config.get(section, 'smtp_server')
email_sender = config.get(section, 'email_sender')
email_recipients = config.get(section, 'email_recipients')
section = 'DEFAULT'
logging_level = config.get(section, 'logging_level')
logging_level = logging_levels[logging_level]
# Set the first file number / config section to read
file_num = 1
section = 'INPUT' + str(file_num)
# Loop through each input section
while(config.has_section('INPUT' + str(file_num))):
try:
n = InputFile()
n.path = config.get(section, 'log_path')
n.agent = config.get(section, 'log_agent')
n.filter = config.get(section, 'filter')
n.destinations = config.get(section, 'syslog_hosts')
n.vendor = config.get(section, 'vendor')
n.product = config.get(section, 'product')
n.version = config.get(section, 'version')
n.whole_file = config.getboolean(section, 'whole_file')
n.date_format = config.get(section, 'date_format')
n.log_timezone = config.get(section, 'log_timezone')
n.tz_dst = config.getboolean(section, 'tz_dst')
# If we are not processing the whole file, read it from the end.
if not n.whole_file:
n.cursor = n.handle.seek(n.size)
# Open the file and read the size of it
n.handle = open(n.path)
n.size = getFileSize(n.path)
# Append the new object to the array of input files
files.append(n)
except BaseException, e:
print "Error handling configuration section: " + section + ": " + str(e)
file_num += 1
section = 'INPUT' + str(file_num)
except BaseException, e:
print "Error reading config (using defaults): " + str(e)
# Concatenate the log dir and log file vars
log_file = log_dir + '\\' + log_filename
# Create the log folder if it does not exist
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# Logging levels = debug / info / warn / error / critical
logger = logging.getLogger('log2cef')
logger.setLevel(logging_level)
# Create file handler which logs even debug messages
log_fh = logging.FileHandler(log_file)
log_fh.setLevel(logging_level)
# Create console handler with a higher log level
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
# Create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
#console_handler.setFormatter(formatter)
log_fh.setFormatter(formatter)
#Add the handlers to logger
logger.addHandler(console_handler)
logger.addHandler(log_fh)
# Open output file
print data_dir
output_filename = data_dir + '\\' + output_filename
print output_filename
of = open(output_filename, 'w+')
# Create an instance of the log processor object
lp = cefagent.LogProcessor(agents_config)
while 1:
for f in files:
# Repeat for this file until we reach the end
while f.cursor != getFileSize(f.path):
message = None
logger.debug("Current file: " + f.path)
f.cursor = f.handle.tell()
#logger.debug("cursor = " + str(f.cursor) + " & size = " + str(getFileSize(f.path)))
line = f.handle.readline()
if not line:
#logger.debug("No line detected in file: " + f.path + "(" + str(f.cursor) + ")")
# If the file is smaller than the cursor value then it has been truncated; reset.
if getFileSize(f.path) < f.cursor:
file1.seek(0)
else:
f.handle.seek(f.cursor)
else:
# Parse the line of text into a CEF message
#logger.debug("Processing line from file " + f.path + " (" + str(f.cursor) + ") using agent " + f.agent + "\n" + line)
cef_data = lp.parse(f.agent, line, f.date_format, f.log_timezone, f.tz_dst)
cef_data.deviceVendor = f.vendor
cef_data.deviceProduct = f.product
cef_data.deviceVersion = f.version
# Placeholder for filtering logic
# Convert to plain text
message = cef_data.write()
print message
# Send the message to each destination
if output_syslog:
for d in f.destinations.split(','):
syslog(message, d)
# Write the message to the output file
if output_file:
of.write(message + '\n')
# Sleep if all files are fully read
time.sleep(0.5)
| jrzmurray/log2cef | log2cef.py | Python | gpl-3.0 | 11,270 |
"""
kombu.transport.beanstalk
=========================
Beanstalk transport.
:copyright: (c) 2010 - 2012 by David Ziegler.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import beanstalkc
import socket
from anyjson import loads, dumps
from Queue import Empty
from kombu.exceptions import StdConnectionError, StdChannelError
from . import virtual
DEFAULT_PORT = 11300
__author__ = 'David Ziegler <david.ziegler@gmail.com>'
class Channel(virtual.Channel):
_client = None
def _parse_job(self, job):
item, dest = None, None
if job:
try:
item = loads(job.body)
dest = job.stats()['tube']
except Exception:
job.bury()
else:
job.delete()
else:
raise Empty()
return item, dest
def _put(self, queue, message, **kwargs):
extra = {}
priority = message['properties']['delivery_info']['priority']
ttr = message['properties'].get('ttr')
if ttr is not None:
extra['ttr'] = ttr
self.client.use(queue)
self.client.put(dumps(message), priority=priority, **extra)
def _get(self, queue):
if queue not in self.client.watching():
self.client.watch(queue)
[self.client.ignore(active) for active in self.client.watching()
if active != queue]
job = self.client.reserve(timeout=1)
item, dest = self._parse_job(job)
return item
def _get_many(self, queues, timeout=1):
# timeout of None will cause beanstalk to timeout waiting
# for a new request
if timeout is None:
timeout = 1
watching = self.client.watching()
[self.client.watch(active) for active in queues
if active not in watching]
[self.client.ignore(active) for active in watching
if active not in queues]
job = self.client.reserve(timeout=timeout)
return self._parse_job(job)
def _purge(self, queue):
if queue not in self.client.watching():
self.client.watch(queue)
[self.client.ignore(active)
for active in self.client.watching()
if active != queue]
count = 0
while 1:
job = self.client.reserve(timeout=1)
if job:
job.delete()
count += 1
else:
break
return count
def _size(self, queue):
return 0
def _open(self):
conninfo = self.connection.client
host = conninfo.hostname or 'localhost'
port = conninfo.port or DEFAULT_PORT
conn = beanstalkc.Connection(host=host, port=port)
conn.connect()
return conn
def close(self):
if self._client is not None:
return self._client.close()
super(Channel, self).close()
@property
def client(self):
if self._client is None:
self._client = self._open()
return self._client
class Transport(virtual.Transport):
Channel = Channel
polling_interval = 1
default_port = DEFAULT_PORT
connection_errors = (StdConnectionError,
socket.error,
beanstalkc.SocketError,
IOError)
channel_errors = (StdChannelError,
socket.error,
IOError,
beanstalkc.SocketError,
beanstalkc.BeanstalkcException)
driver_type = 'beanstalk'
driver_name = 'beanstalkc'
def driver_version(self):
return beanstalkc.__version__
| depop/kombu | kombu/transport/beanstalk.py | Python | bsd-3-clause | 3,697 |
from gettext import gettext as _
import sys
import pulp.common.tags as tag_utils
from pulp.bindings.exceptions import PulpServerException
from pulp.client import parsers
from pulp.client.extensions.extensions import PulpCliSection, PulpCliFlag, PulpCliOption
# Guidance for render_document_list on how to display task info
TASK_DETAILS_DOC_ORDER = ['operations', 'resources', 'state', 'start_time', 'finish_time',
'result', 'task_id']
TASK_LIST_DOC_ORDER = ['operations', 'resources', 'state', 'start_time', 'finish_time', 'task_id']
def initialize(context):
# Add root level section for all tasks in Pulp
all_tasks_section = AllTasksSection(context, 'tasks', _('list and cancel server-side tasks'))
context.cli.add_section(all_tasks_section)
# Add repo level section for only repo tasks
repo_tasks_section = RepoTasksSection(context, 'tasks', _(
'list and cancel tasks related to a specific repository')
)
repo_section = context.cli.find_section('repo')
repo_section.add_subsection(repo_tasks_section)
class BaseTasksSection(PulpCliSection):
"""
Base class for handling tasks in the Pulp server. This should be subclassed
to provide consistent functionality for a subset of tasks.
"""
all_flag = PulpCliFlag('--all', _('if specified, all tasks in all states are shown'),
aliases=['-a'])
state_option = PulpCliOption('--state',
_('comma-separated list of tasks states desired to be '
'shown. Example: "running,waiting,canceled,successful,failed". '
'Do not include spaces'), aliases=['-s'], required=False,
parse_func=parsers.csv)
def __init__(self, context, name, description):
PulpCliSection.__init__(self, name, description)
self.context = context
# Store the command instances as instance variables so the subclasses
# can manipulate them if necessary
self.list_command = self.create_command(
'list', _('lists tasks queued (waiting) or running on the server'), self.list
)
self.cancel_command = self.create_command('cancel', _('cancel one or more tasks'),
self.cancel)
self.cancel_command.create_option('--task-id', _('identifies the task to cancel'),
required=True)
self.details_command = self.create_command('details', _(
'displays more detailed information about a specific task'), self.details
)
self.details_command.create_option('--task-id', _('identifies the task'), required=True)
def list(self, **kwargs):
"""
Displays a list of tasks. The list of tasks is driven by the
retrieve_tasks method which should be overridden to provide the
correct behavior.
"""
self.context.prompt.render_title('Tasks')
if kwargs.get(self.all_flag.keyword) and kwargs.get(self.state_option.keyword):
msg = _('These arguments cannot be used together')
self.context.prompt.render_failure_message(msg)
return
task_objects = self.retrieve_tasks(**kwargs)
# Easy out clause
if len(task_objects) is 0:
self.context.prompt.render_paragraph('No tasks found')
return
# Parse each task object into a document to be displayed using the
# prompt utilities
task_documents = []
for task in task_objects:
# Interpret task values
state, start_time, finish_time, result = self.parse_state(task)
actions, resources = self.parse_tags(task)
task_doc = {
'operations': ', '.join(actions),
'resources': ', '.join(resources),
'task_id': task.task_id,
'state': state,
'start_time': start_time,
'finish_time': finish_time,
}
task_documents.append(task_doc)
self.context.prompt.render_document_list(task_documents, order=TASK_LIST_DOC_ORDER)
def details(self, **kwargs):
"""
Displays detailed information about a single task. The task ID must
be in kwargs under "task-id".
"""
self.context.prompt.render_title('Task Details')
task_id = kwargs['task-id']
response = self.context.server.tasks.get_task(task_id)
task = response.response_body
# Interpret task values
state, start_time, finish_time, result = self.parse_state(task)
actions, resources = self.parse_tags(task)
# Assemble document to be displayed
task_doc = {
'operations': ', '.join(actions),
'resources': ', '.join(resources),
'task_id': task.task_id,
'state': state,
'start_time': start_time,
'finish_time': finish_time,
'result': result,
'progress_report': task.progress_report,
}
if task.exception:
task_doc['exception'] = task.exception
if task.traceback:
task_doc['traceback'] = task.traceback
self.context.prompt.render_document(task_doc, order=TASK_DETAILS_DOC_ORDER)
def cancel(self, **kwargs):
"""
Attempts to cancel a task. Only unstarted tasks and those that support
cancellation (sync, publish) can be canceled. If a task does not support
cancelling, a not implemented error (501) will be raised from the server.
We should handle that gracefully to explain to the user what happend.
Otherwise, all other errors should bubble up to the exception middleware
as usual.
"""
task_id = kwargs['task-id']
try:
self.context.server.tasks.cancel_task(task_id)
self.context.prompt.render_success_message(_('Task cancel is successfully initiated.'))
except PulpServerException, e:
# A 501 has a bit of a special meaning here that's not used in the
# exception middleware, so handle it here.
if e.http_status == 501:
msg = _('The requested task does not support cancellation.')
self.context.prompt.render_failure_message(msg)
return
else:
raise e, None, sys.exc_info()[2]
@staticmethod
def parse_state(task):
"""
Uses the state of the task to return user-friendly descriptions of the
state and task timing values.
@param task: object representation of the task
@type task: Task
@return: tuple of state, start time, finish time, and result
@rtype: (str, str, str, str)
"""
state = _('Unknown')
result = _('Unknown')
start_time = task.start_time or _('Unstarted')
finish_time = task.finish_time or _('Incomplete')
if task.is_waiting():
state = _('Waiting')
result = _('Incomplete')
elif task.is_running():
state = _('Running')
result = _('Incomplete')
elif task.is_completed():
if task.was_successful():
state = _('Successful')
# Use the result value or pretty text if there was none
result = task.result or _('N/A')
elif task.was_failure():
state = _('Failed')
result = task.result or _('N/A')
elif task.was_skipped():
state = _('Skipped')
start_time = _('N/A')
finish_time = _('N/A')
result = _('N/A')
elif task.was_cancelled():
state = _('Canceled')
result = _('N/A')
return state, start_time, finish_time, result
@staticmethod
def parse_tags(task):
"""
Uses the tags entry in the task to render a user-friendly display of
the actions and resources involved in the task.
@param task: object representation of the task
@type task: Task
@return: tuple of list of actions and list of resources involved
@rtype: ([], [])
"""
actions = []
resources = []
for t in task.tags:
if tag_utils.is_resource_tag(t):
resource_type, resource_id = tag_utils.parse_resource_tag(t)
resources.append('%s (%s)' % (resource_id, resource_type))
else:
tag_value = tag_utils.parse_value(t)
actions.append(tag_value)
return actions, resources
def retrieve_tasks(self):
"""
Override this with the specific call to the server to retrieve just
the desired tasks.
@return: response from the server
"""
raise NotImplementedError()
class AllTasksSection(BaseTasksSection):
FIELDS = ('tags', 'task_id', 'state', 'start_time', 'finish_time')
def __init__(self, context, name, description):
BaseTasksSection.__init__(self, context, name, description)
self.list_command.add_option(self.all_flag)
self.list_command.add_option(self.state_option)
def retrieve_tasks(self, **kwargs):
"""
:return: list of pulp.bindings.responses.Task instances
:rtype: list
"""
if kwargs.get(self.all_flag.keyword):
tasks = self.context.server.tasks_search.search(fields=self.FIELDS)
elif kwargs.get(self.state_option.keyword):
tasks = self.context.server.tasks_search.search(
filters={'state': {'$in': kwargs[self.state_option.keyword]}}, fields=self.FIELDS)
else:
tasks = self.context.server.tasks_search.search(
filters={'state': {'$in': ['running', 'waiting']}}, fields=self.FIELDS)
return tasks
class RepoTasksSection(BaseTasksSection):
def __init__(self, context, name, description):
BaseTasksSection.__init__(self, context, name, description)
self.list_command.create_option('--repo-id', _('identifies the repository to display'),
required=True)
def retrieve_tasks(self, **kwargs):
"""
:return: list of pulp.bindings.responses.Task instances
:rtype: list
"""
repo_id = kwargs['repo-id']
response = self.context.server.tasks.get_repo_tasks(repo_id)
return response.response_body
| credativ/pulp | client_admin/pulp/client/admin/tasks.py | Python | gpl-2.0 | 10,673 |
# -*- coding: utf-8 -*-
# Generated by Anselmo on 2019-06-25 12:42
from __future__ import unicode_literals
from django.db import migrations, models
def load_mod_passado_from_fixture(apps, schema_editor):
from django.core.management import call_command
call_command("loaddata", "0002_fo2_mod_passado_loaddata")
def delete_mod_passado(apps, schema_editor):
ModeloPassado = apps.get_model("comercial", "ModeloPassado")
ModeloPassado.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('comercial', '0001_initial'),
]
operations = [
migrations.RunPython(
load_mod_passado_from_fixture, delete_mod_passado),
]
| anselmobd/fo2 | src/comercial/migrations/0002_fo2_mod_passado_loaddata.py | Python | mit | 702 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved
# Copyright (c) 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
API and utilities for nova-network interactions.
"""
import copy
import functools
import inspect
import time
import typing as ty
from keystoneauth1 import loading as ks_loading
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import uuidutils
from nova.accelerator import cyborg
from nova.compute import utils as compute_utils
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.network import constants
from nova.network import model as network_model
from nova import objects
from nova.objects import fields as obj_fields
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova.pci import utils as pci_utils
from nova.pci import whitelist as pci_whitelist
from nova.policies import servers as servers_policies
from nova import profiler
from nova import service_auth
from nova import utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
_SESSION = None
_ADMIN_AUTH = None
def reset_state():
global _ADMIN_AUTH
global _SESSION
_ADMIN_AUTH = None
_SESSION = None
def _load_auth_plugin(conf):
auth_plugin = ks_loading.load_auth_from_conf_options(conf,
nova.conf.neutron.NEUTRON_GROUP)
if auth_plugin:
return auth_plugin
if conf.neutron.auth_type is None:
# If we're coming in through a REST API call for something like
# creating a server, the end user is going to get a 500 response
# which is accurate since the system is mis-configured, but we should
# leave a breadcrumb for the operator that is checking the logs.
LOG.error('The [neutron] section of your nova configuration file '
'must be configured for authentication with the networking '
'service endpoint. See the networking service install guide '
'for details: '
'https://docs.openstack.org/neutron/latest/install/')
err_msg = _('Unknown auth type: %s') % conf.neutron.auth_type
raise neutron_client_exc.Unauthorized(message=err_msg)
def get_binding_profile(port):
"""Convenience method to get the binding:profile from the port
The binding:profile in the port is undefined in the networking service
API and is dependent on backend configuration. This means it could be
an empty dict, None, or have some values.
:param port: dict port response body from the networking service API
:returns: The port binding:profile dict; empty if not set on the port
"""
return port.get(constants.BINDING_PROFILE, {}) or {}
def update_instance_cache_with_nw_info(impl, context, instance, nw_info=None):
if instance.deleted:
LOG.debug('Instance is deleted, no further info cache update',
instance=instance)
return
try:
if not isinstance(nw_info, network_model.NetworkInfo):
nw_info = None
if nw_info is None:
nw_info = impl._get_instance_nw_info(context, instance)
LOG.debug('Updating instance_info_cache with network_info: %s',
nw_info, instance=instance)
# NOTE(comstud): The save() method actually handles updating or
# creating the instance. We don't need to retrieve the object
# from the DB first.
ic = objects.InstanceInfoCache.new(context, instance.uuid)
ic.network_info = nw_info
ic.save()
instance.info_cache = ic
except exception.InstanceNotFound as e:
# The instance could have moved during a cross-cell migration when we
# receive an external event from neutron. Avoid logging a traceback
# when it happens.
msg = str(e)
if e.__class__.__name__.endswith('_Remote'):
# If this exception was raised remotely over RPC, the traceback(s)
# will be appended to the message. Truncate it in that case.
msg = utils.safe_truncate(msg.split('\n', 1)[0], 255)
LOG.info('Failed storing info cache due to: %s. '
'The instance may have moved to another cell during a '
'cross-cell migration', msg, instance=instance)
raise exception.InstanceNotFound(message=msg)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed storing info cache', instance=instance)
def refresh_cache(f):
"""Decorator to update the instance_info_cache
Requires context and instance as function args
"""
argspec = inspect.getfullargspec(f)
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
try:
# get the instance from arguments (or raise ValueError)
instance = kwargs.get('instance')
if not instance:
instance = args[argspec.args.index('instance') - 2]
except ValueError:
msg = _('instance is a required argument to use @refresh_cache')
raise Exception(msg)
with lockutils.lock('refresh_cache-%s' % instance.uuid):
# We need to call the wrapped function with the lock held to ensure
# that it can call _get_instance_nw_info safely.
res = f(self, context, *args, **kwargs)
update_instance_cache_with_nw_info(self, context, instance,
nw_info=res)
# return the original function's return value
return res
return wrapper
@profiler.trace_cls("neutron_api")
class ClientWrapper(clientv20.Client):
"""A Neutron client wrapper class.
Wraps the callable methods, catches Unauthorized,Forbidden from Neutron and
convert it to a 401,403 for Nova clients.
"""
def __init__(self, base_client, admin):
# Expose all attributes from the base_client instance
self.__dict__ = base_client.__dict__
self.base_client = base_client
self.admin = admin
def __getattribute__(self, name):
obj = object.__getattribute__(self, name)
if callable(obj):
obj = object.__getattribute__(self, 'proxy')(obj)
return obj
def proxy(self, obj):
def wrapper(*args, **kwargs):
try:
ret = obj(*args, **kwargs)
except neutron_client_exc.Unauthorized:
if not self.admin:
# Token is expired so Neutron is raising a
# unauthorized exception, we should convert it to
# raise a 401 to make client to handle a retry by
# regenerating a valid token and trying a new
# attempt.
raise exception.Unauthorized()
# In admin context if token is invalid Neutron client
# should be able to regenerate a valid by using the
# Neutron admin credential configuration located in
# nova.conf.
LOG.error("Neutron client was not able to generate a "
"valid admin token, please verify Neutron "
"admin credential located in nova.conf")
raise exception.NeutronAdminCredentialConfigurationInvalid()
except neutron_client_exc.Forbidden as e:
raise exception.Forbidden(str(e))
return ret
return wrapper
def _get_auth_plugin(context, admin=False):
# NOTE(dprince): In the case where no auth_token is present we allow use of
# neutron admin tenant credentials if it is an admin context. This is to
# support some services (metadata API) where an admin context is used
# without an auth token.
global _ADMIN_AUTH
if admin or (context.is_admin and not context.auth_token):
if not _ADMIN_AUTH:
_ADMIN_AUTH = _load_auth_plugin(CONF)
return _ADMIN_AUTH
if context.auth_token:
return service_auth.get_auth_plugin(context)
# We did not get a user token and we should not be using
# an admin token so log an error
raise exception.Unauthorized()
def _get_session():
global _SESSION
if not _SESSION:
_SESSION = ks_loading.load_session_from_conf_options(
CONF, nova.conf.neutron.NEUTRON_GROUP)
return _SESSION
def get_client(context, admin=False):
auth_plugin = _get_auth_plugin(context, admin=admin)
session = _get_session()
client_args = dict(session=session,
auth=auth_plugin,
global_request_id=context.global_id,
connect_retries=CONF.neutron.http_retries)
# NOTE(efried): We build an adapter
# to pull conf options
# to pass to neutronclient
# which uses them to build an Adapter.
# This should be unwound at some point.
adap = utils.get_ksa_adapter(
'network', ksa_auth=auth_plugin, ksa_session=session)
client_args = dict(client_args,
service_type=adap.service_type,
service_name=adap.service_name,
interface=adap.interface,
region_name=adap.region_name,
endpoint_override=adap.endpoint_override)
return ClientWrapper(clientv20.Client(**client_args),
admin=admin or context.is_admin)
def _is_not_duplicate(item, items, items_list_name, instance):
present = item in items
# The expectation from this function's perspective is that the
# item is not part of the items list so if it is part of it
# we should at least log it as a warning
if present:
LOG.warning("%(item)s already exists in list: %(list_name)s "
"containing: %(items)s. ignoring it",
{'item': item,
'list_name': items_list_name,
'items': items},
instance=instance)
return not present
def _ensure_no_port_binding_failure(port):
binding_vif_type = port.get('binding:vif_type')
if binding_vif_type == network_model.VIF_TYPE_BINDING_FAILED:
raise exception.PortBindingFailed(port_id=port['id'])
class API:
"""API for interacting with the neutron 2.x API."""
def __init__(self):
self.last_neutron_extension_sync = None
self.extensions = {}
self.pci_whitelist = pci_whitelist.Whitelist(
CONF.pci.passthrough_whitelist)
def _update_port_with_migration_profile(
self, instance, port_id, port_profile, admin_client):
try:
updated_port = admin_client.update_port(
port_id, {'port': {constants.BINDING_PROFILE: port_profile}})
return updated_port
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.error("Unable to update binding profile "
"for port: %(port)s due to failure: %(error)s",
{'port': port_id, 'error': ex},
instance=instance)
def _clear_migration_port_profile(
self, context, instance, admin_client, ports):
for p in ports:
# If the port already has a migration profile and if
# it is to be torn down, then we need to clean up
# the migration profile.
port_profile = get_binding_profile(p)
if not port_profile:
continue
if constants.MIGRATING_ATTR in port_profile:
del port_profile[constants.MIGRATING_ATTR]
LOG.debug("Removing port %s migration profile", p['id'],
instance=instance)
self._update_port_with_migration_profile(
instance, p['id'], port_profile, admin_client)
def _setup_migration_port_profile(
self, context, instance, host, admin_client, ports):
# Migrating to a new host
for p in ports:
# If the host hasn't changed, there is nothing to do.
# But if the destination host is different than the
# current one, please update the port_profile with
# the 'migrating_to'(constants.MIGRATING_ATTR) key pointing to
# the given 'host'.
host_id = p.get(constants.BINDING_HOST_ID)
if host_id != host:
port_profile = get_binding_profile(p)
# If the "migrating_to" attribute already points at the given
# host, then skip the port update call since we're not changing
# anything.
if host != port_profile.get(constants.MIGRATING_ATTR):
port_profile[constants.MIGRATING_ATTR] = host
self._update_port_with_migration_profile(
instance, p['id'], port_profile, admin_client)
LOG.debug("Port %(port_id)s updated with migration "
"profile %(profile_data)s successfully",
{'port_id': p['id'],
'profile_data': port_profile},
instance=instance)
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures.
:param context: The user request context.
:param instance: The instance with attached ports.
:param host: Optional host used to control the setup. If provided and
is not the same as the current instance.host, this method assumes
the instance is being migrated and sets the "migrating_to"
attribute in the binding profile for the attached ports.
:param teardown: Whether or not network information for the ports
should be cleaned up. If True, at a minimum the "migrating_to"
attribute is cleared in the binding profile for the ports. If a
host is also provided, then port bindings for that host are
deleted when teardown is True as long as the host does not match
the current instance.host.
:raises: nova.exception.PortBindingDeletionFailed if host is not None,
teardown is True, and port binding deletion fails.
"""
# Check if the instance is migrating to a new host.
port_migrating = host and (instance.host != host)
# If the port is migrating to a new host or if it is a
# teardown on the original host, then proceed.
if port_migrating or teardown:
search_opts = {'device_id': instance.uuid,
'tenant_id': instance.project_id,
constants.BINDING_HOST_ID: instance.host}
# Now get the port details to process the ports
# binding profile info.
data = self.list_ports(context, **search_opts)
ports = data['ports']
admin_client = get_client(context, admin=True)
if teardown:
# Reset the port profile
self._clear_migration_port_profile(
context, instance, admin_client, ports)
# If a host was provided, delete any bindings between that
# host and the ports as long as the host isn't the same as
# the current instance.host.
has_binding_ext = self.has_port_binding_extension(
client=admin_client)
if port_migrating and has_binding_ext:
self._delete_port_bindings(context, ports, host)
elif port_migrating:
# Setup the port profile
self._setup_migration_port_profile(
context, instance, host, admin_client, ports)
def _delete_port_bindings(self, context, ports, host):
"""Attempt to delete all port bindings on the host.
:param context: The user request context.
:param ports: list of port dicts to cleanup; the 'id' field is required
per port dict in the list
:param host: host from which to delete port bindings
:raises: PortBindingDeletionFailed if port binding deletion fails.
"""
client = get_client(context, admin=True)
failed_port_ids = []
for port in ports:
# This call is safe in that 404s for non-existing
# bindings are ignored.
try:
client.delete_port_binding(port['id'], host)
except neutron_client_exc.NeutronClientException as exc:
# We can safely ignore 404s since we're trying to delete
# the thing that wasn't found anyway, but for everything else
# we should log an error
if exc.status_code == 404:
continue
failed_port_ids.append(port['id'])
LOG.exception(
"Failed to delete binding for port %(port_id)s on host "
"%(host)s", {'port_id': port['id'], 'host': host})
if failed_port_ids:
raise exception.PortBindingDeletionFailed(
port_id=','.join(failed_port_ids), host=host)
def _get_available_networks(self, context, project_id,
net_ids=None, neutron=None,
auto_allocate=False):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If net_ids specified, it searches networks with requested IDs only.
"""
if not neutron:
neutron = get_client(context)
if net_ids:
# If user has specified to attach instance only to specific
# networks then only add these to **search_opts. This search will
# also include 'shared' networks.
search_opts = {'id': net_ids}
nets = neutron.list_networks(**search_opts).get('networks', [])
else:
# (1) Retrieve non-public network list owned by the tenant.
search_opts = {'tenant_id': project_id, 'shared': False}
if auto_allocate:
# The auto-allocated-topology extension may create complex
# network topologies and it does so in a non-transactional
# fashion. Therefore API users may be exposed to resources that
# are transient or partially built. A client should use
# resources that are meant to be ready and this can be done by
# checking their admin_state_up flag.
search_opts['admin_state_up'] = True
nets = neutron.list_networks(**search_opts).get('networks', [])
# (2) Retrieve public network list.
search_opts = {'shared': True}
nets += neutron.list_networks(**search_opts).get('networks', [])
_ensure_requested_network_ordering(
lambda x: x['id'],
nets,
net_ids)
return nets
def _cleanup_created_port(self, port_client, port_id, instance):
try:
port_client.delete_port(port_id)
except neutron_client_exc.NeutronClientException:
LOG.exception(
'Failed to delete port %(port_id)s while cleaning up after an '
'error.', {'port_id': port_id},
instance=instance)
def _create_port_minimal(self, context, port_client, instance, network_id,
fixed_ip=None, security_group_ids=None):
"""Attempts to create a port for the instance on the given network.
:param context: The request context.
:param port_client: The client to use to create the port.
:param instance: Create the port for the given instance.
:param network_id: Create the port on the given network.
:param fixed_ip: Optional fixed IP to use from the given network.
:param security_group_ids: Optional list of security group IDs to
apply to the port.
:returns: The created port.
:raises PortLimitExceeded: If neutron fails with an OverQuota error.
:raises NoMoreFixedIps: If neutron fails with
IpAddressGenerationFailure error.
:raises: PortBindingFailed: If port binding failed.
:raises NetworksWithQoSPolicyNotSupported: if the created port has
resource request.
"""
# Set the device_id so it's clear who this port was created for,
# and to stop other instances trying to use it
port_req_body = {'port': {'device_id': instance.uuid}}
try:
if fixed_ip:
port_req_body['port']['fixed_ips'] = [
{'ip_address': str(fixed_ip)}]
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance.project_id
if security_group_ids:
port_req_body['port']['security_groups'] = security_group_ids
port_response = port_client.create_port(port_req_body)
port = port_response['port']
port_id = port['id']
# NOTE(gibi): Checking if the created port has resource request as
# such ports are currently not supported as they would at least
# need resource allocation manipulation in placement but might also
# need a new scheduling if resource on this host is not available.
if self._has_resource_request(context, port, port_client):
msg = (
"The auto-created port %(port_id)s is being deleted due "
"to its network having QoS policy.")
LOG.info(msg, {'port_id': port_id})
self._cleanup_created_port(port_client, port_id, instance)
# NOTE(gibi): This limitation regarding server create can be
# removed when the port creation is moved to the conductor. But
# this code also limits attaching a network that has QoS
# minimum bandwidth rule.
raise exception.NetworksWithQoSPolicyNotSupported(
instance_uuid=instance.uuid, network_id=network_id)
try:
_ensure_no_port_binding_failure(port)
except exception.PortBindingFailed:
with excutils.save_and_reraise_exception():
port_client.delete_port(port_id)
LOG.debug('Successfully created port: %s', port_id,
instance=instance)
return port
except neutron_client_exc.InvalidIpForNetworkClient:
LOG.warning('Neutron error: %(ip)s is not a valid IP address '
'for network %(network_id)s.',
{'ip': fixed_ip, 'network_id': network_id},
instance=instance)
msg = (_('Fixed IP %(ip)s is not a valid ip address for '
'network %(network_id)s.') %
{'ip': fixed_ip, 'network_id': network_id})
raise exception.InvalidInput(reason=msg)
except (neutron_client_exc.IpAddressInUseClient,
neutron_client_exc.IpAddressAlreadyAllocatedClient):
LOG.warning('Neutron error: Fixed IP %s is '
'already in use.', fixed_ip, instance=instance)
msg = _("Fixed IP %s is already in use.") % fixed_ip
raise exception.FixedIpAlreadyInUse(message=msg)
except neutron_client_exc.OverQuotaClient:
LOG.warning(
'Neutron error: Port quota exceeded in tenant: %s',
port_req_body['port']['tenant_id'], instance=instance)
raise exception.PortLimitExceeded()
except neutron_client_exc.IpAddressGenerationFailureClient:
LOG.warning('Neutron error: No more fixed IPs in network: %s',
network_id, instance=instance)
raise exception.NoMoreFixedIps(net=network_id)
except neutron_client_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception('Neutron error creating port on network %s',
network_id, instance=instance)
def _update_port(self, port_client, instance, port_id,
port_req_body):
try:
port_response = port_client.update_port(port_id, port_req_body)
port = port_response['port']
_ensure_no_port_binding_failure(port)
LOG.debug('Successfully updated port: %s', port_id,
instance=instance)
return port
except neutron_client_exc.MacAddressInUseClient:
mac_address = port_req_body['port'].get('mac_address')
network_id = port_req_body['port'].get('network_id')
LOG.warning('Neutron error: MAC address %(mac)s is already '
'in use on network %(network)s.',
{'mac': mac_address, 'network': network_id},
instance=instance)
raise exception.PortInUse(port_id=mac_address)
except neutron_client_exc.HostNotCompatibleWithFixedIpsClient:
network_id = port_req_body['port'].get('network_id')
LOG.warning('Neutron error: Tried to bind a port with '
'fixed_ips to a host in the wrong segment on '
'network %(network)s.',
{'network': network_id}, instance=instance)
raise exception.FixedIpInvalidOnHost(port_id=port_id)
def _check_external_network_attach(self, context, nets):
"""Check if attaching to external network is permitted."""
if not context.can(servers_policies.NETWORK_ATTACH_EXTERNAL,
fatal=False):
for net in nets:
# Perform this check here rather than in validate_networks to
# ensure the check is performed every time
# allocate_for_instance is invoked
if net.get('router:external') and not net.get('shared'):
raise exception.ExternalNetworkAttachForbidden(
network_uuid=net['id'])
def _unbind_ports(self, context, ports,
neutron, port_client=None):
"""Unbind the given ports by clearing their device_id,
device_owner and dns_name.
:param context: The request context.
:param ports: list of port IDs.
:param neutron: neutron client for the current context.
:param port_client: The client with appropriate karma for
updating the ports.
"""
if port_client is None:
# Requires admin creds to set port bindings
port_client = get_client(context, admin=True)
# it is a dict of network dicts as returned by the neutron client keyed
# by network UUID
networks: ty.Dict[str, ty.Dict] = {}
for port_id in ports:
# A port_id is optional in the NetworkRequest object so check here
# in case the caller forgot to filter the list.
if port_id is None:
continue
port_req_body: ty.Dict[str, ty.Any] = {
'port': {
'device_id': '',
'device_owner': '',
constants.BINDING_HOST_ID: None,
}
}
try:
port = self._show_port(
context, port_id, neutron_client=neutron,
fields=[constants.BINDING_PROFILE, 'network_id'])
except exception.PortNotFound:
LOG.debug('Unable to show port %s as it no longer '
'exists.', port_id)
return
except Exception:
# NOTE: In case we can't retrieve the binding:profile or
# network info assume that they are empty
LOG.exception("Unable to get binding:profile for port '%s'",
port_id)
port_profile = {}
network: dict = {}
else:
port_profile = get_binding_profile(port)
net_id = port.get('network_id')
if net_id in networks:
network = networks[net_id]
else:
network = neutron.show_network(net_id,
fields=['dns_domain']
).get('network')
networks[net_id] = network
# Unbind Port device
if port_profile.get('arq_uuid'):
"""Delete device profile by arq uuid."""
cyclient = cyborg.get_client(context)
cyclient.delete_arqs_by_uuid([port_profile['arq_uuid']])
LOG.debug('Delete ARQs %s for port %s',
port_profile['arq_uuid'], port_id)
# NOTE: We're doing this to remove the binding information
# for the physical device but don't want to overwrite the other
# information in the binding profile.
for profile_key in ('pci_vendor_info', 'pci_slot',
constants.ALLOCATION, 'arq_uuid',
'physical_network', 'card_serial_number',
'vf_num', 'pf_mac_address'):
if profile_key in port_profile:
del port_profile[profile_key]
port_req_body['port'][constants.BINDING_PROFILE] = port_profile
# NOTE: For internal DNS integration (network does not have a
# dns_domain), or if we cannot retrieve network info, we use the
# admin client to reset dns_name.
if (
self.has_dns_extension(client=port_client) and
not network.get('dns_domain')
):
port_req_body['port']['dns_name'] = ''
try:
port_client.update_port(port_id, port_req_body)
except neutron_client_exc.PortNotFoundClient:
LOG.debug('Unable to unbind port %s as it no longer '
'exists.', port_id)
except Exception:
LOG.exception("Unable to clear device ID for port '%s'",
port_id)
# NOTE: For external DNS integration, we use the neutron client
# with user's context to reset the dns_name since the recordset is
# under user's zone.
self._reset_port_dns_name(network, port_id, neutron)
def _validate_requested_port_ids(self, context, instance, neutron,
requested_networks):
"""Processes and validates requested networks for allocation.
Iterates over the list of NetworkRequest objects, validating the
request and building sets of ports and networks to
use for allocating ports for the instance.
:param context: The user request context.
:type context: nova.context.RequestContext
:param instance: allocate networks on this instance
:type instance: nova.objects.Instance
:param neutron: neutron client session
:type neutron: neutronclient.v2_0.client.Client
:param requested_networks: List of user-requested networks and/or ports
:type requested_networks: nova.objects.NetworkRequestList
:returns: tuple of:
- ports: dict mapping of port id to port dict
- ordered_networks: list of nova.objects.NetworkRequest objects
for requested networks (either via explicit network request
or the network for an explicit port request)
:raises nova.exception.PortNotFound: If a requested port is not found
in Neutron.
:raises nova.exception.PortNotUsable: If a requested port is not owned
by the same tenant that the instance is created under.
:raises nova.exception.PortInUse: If a requested port is already
attached to another instance.
:raises nova.exception.PortNotUsableDNS: If a requested port has a
value assigned to its dns_name attribute.
"""
ports = {}
ordered_networks = []
# If we're asked to auto-allocate the network then there won't be any
# ports or real neutron networks to lookup, so just return empty
# results.
if requested_networks and not requested_networks.auto_allocate:
for request in requested_networks:
# Process a request to use a pre-existing neutron port.
if request.port_id:
# Make sure the port exists.
port = self._show_port(context, request.port_id,
neutron_client=neutron)
# Make sure the instance has access to the port.
if port['tenant_id'] != instance.project_id:
raise exception.PortNotUsable(port_id=request.port_id,
instance=instance.uuid)
# Make sure the port isn't already attached to another
# instance.
if port.get('device_id'):
raise exception.PortInUse(port_id=request.port_id)
# Make sure that if the user assigned a value to the port's
# dns_name attribute, it is equal to the instance's
# hostname
if port.get('dns_name'):
if port['dns_name'] != instance.hostname:
raise exception.PortNotUsableDNS(
port_id=request.port_id,
instance=instance.uuid, value=port['dns_name'],
hostname=instance.hostname)
# Make sure the port is usable
_ensure_no_port_binding_failure(port)
# If requesting a specific port, automatically process
# the network for that port as if it were explicitly
# requested.
request.network_id = port['network_id']
ports[request.port_id] = port
# Process a request to use a specific neutron network.
if request.network_id:
ordered_networks.append(request)
return ports, ordered_networks
def _clean_security_groups(self, security_groups):
"""Cleans security groups requested from Nova API
Neutron already passes a 'default' security group when
creating ports so it's not necessary to specify it to the
request.
"""
if not security_groups:
security_groups = []
elif security_groups == [constants.DEFAULT_SECGROUP]:
security_groups = []
return security_groups
def _process_security_groups(self, instance, neutron, security_groups):
"""Processes and validates requested security groups for allocation.
Iterates over the list of requested security groups, validating the
request and filtering out the list of security group IDs to use for
port allocation.
:param instance: allocate networks on this instance
:type instance: nova.objects.Instance
:param neutron: neutron client session
:type neutron: neutronclient.v2_0.client.Client
:param security_groups: list of requested security group name or IDs
to use when allocating new ports for the instance
:return: list of security group IDs to use when allocating new ports
:raises nova.exception.NoUniqueMatch: If multiple security groups
are requested with the same name.
:raises nova.exception.SecurityGroupNotFound: If a requested security
group is not in the tenant-filtered list of available security
groups in Neutron.
"""
security_group_ids = []
# TODO(arosen) Should optimize more to do direct query for security
# group if len(security_groups) == 1
if len(security_groups):
# NOTE(slaweq): fields other than name and id aren't really needed
# so asking only about those fields will allow Neutron to not
# prepare list of rules for each found security group. That may
# speed processing of this request a lot in case when tenant has
# got many security groups
sg_fields = ['id', 'name']
search_opts = {'tenant_id': instance.project_id}
user_security_groups = neutron.list_security_groups(
fields=sg_fields, **search_opts).get('security_groups')
for security_group in security_groups:
name_match = None
uuid_match = None
for user_security_group in user_security_groups:
if user_security_group['name'] == security_group:
# If there was a name match in a previous iteration
# of the loop, we have a conflict.
if name_match:
raise exception.NoUniqueMatch(
_("Multiple security groups found matching"
" '%s'. Use an ID to be more specific.") %
security_group)
name_match = user_security_group['id']
if user_security_group['id'] == security_group:
uuid_match = user_security_group['id']
# If a user names the security group the same as
# another's security groups uuid, the name takes priority.
if name_match:
security_group_ids.append(name_match)
elif uuid_match:
security_group_ids.append(uuid_match)
else:
raise exception.SecurityGroupNotFound(
security_group_id=security_group)
return security_group_ids
def _validate_requested_network_ids(self, context, instance, neutron,
requested_networks, ordered_networks):
"""Check requested networks using the Neutron API.
Check the user has access to the network they requested, and that
it is a suitable network to connect to. This includes getting the
network details for any ports that have been passed in, because the
request will have been updated with the network_id in
_validate_requested_port_ids.
If the user has not requested any ports or any networks, we get back
a full list of networks the user has access to, and if there is only
one network, we update ordered_networks so we will connect the
instance to that network.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param neutron: neutron client
:param requested_networks: nova.objects.NetworkRequestList, list of
user-requested networks and/or ports; may be empty
:param ordered_networks: output from _validate_requested_port_ids
that will be used to create and update ports
:returns: dict, keyed by network ID, of networks to use
:raises InterfaceAttachFailedNoNetwork: If no specific networks were
requested and none are available.
:raises NetworkAmbiguous: If no specific networks were requested but
more than one is available.
:raises ExternalNetworkAttachForbidden: If the policy rules forbid
the request context from using an external non-shared network but
one was requested (or available).
"""
# Get networks from Neutron
# If net_ids is empty, this actually returns all available nets
auto_allocate = requested_networks and requested_networks.auto_allocate
net_ids = [request.network_id for request in ordered_networks]
nets = self._get_available_networks(context, instance.project_id,
net_ids, neutron=neutron,
auto_allocate=auto_allocate)
if not nets:
if requested_networks:
# There are no networks available for the project to use and
# none specifically requested, so check to see if we're asked
# to auto-allocate the network.
if auto_allocate:
# During validate_networks we checked to see if
# auto-allocation is available so we don't need to do that
# again here.
nets = [self._auto_allocate_network(instance, neutron)]
else:
# NOTE(chaochin): If user specifies a network id and the
# network can not be found, raise NetworkNotFound error.
for request in requested_networks:
if not request.port_id and request.network_id:
raise exception.NetworkNotFound(
network_id=request.network_id)
else:
# no requested nets and user has no available nets
return {}
# if this function is directly called without a requested_network param
if (not requested_networks or
requested_networks.is_single_unspecified or
requested_networks.auto_allocate):
# If no networks were requested and none are available, consider
# it a bad request.
if not nets:
raise exception.InterfaceAttachFailedNoNetwork(
project_id=instance.project_id)
# bug/1267723 - if no network is requested and more
# than one is available then raise NetworkAmbiguous Exception
if len(nets) > 1:
msg = _("Multiple possible networks found, use a Network "
"ID to be more specific.")
raise exception.NetworkAmbiguous(msg)
ordered_networks.append(
objects.NetworkRequest(network_id=nets[0]['id']))
# NOTE(melwitt): check external net attach permission after the
# check for ambiguity, there could be another
# available net which is permitted bug/1364344
self._check_external_network_attach(context, nets)
return {net['id']: net for net in nets}
def _create_ports_for_instance(self, context, instance, ordered_networks,
nets, neutron, security_group_ids):
"""Create port for network_requests that don't have a port_id
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param ordered_networks: objects.NetworkRequestList in requested order
:param nets: a dict of network_id to networks returned from neutron
:param neutron: neutronclient built from users request context
:param security_group_ids: a list of security group IDs to be applied
to any ports created
:returns a list of pairs (NetworkRequest, created_port_uuid); note that
created_port_uuid will be None for the pair where a pre-existing
port was part of the user request
"""
created_port_ids = []
requests_and_created_ports = []
for request in ordered_networks:
network = nets.get(request.network_id)
# if network_id did not pass validate_networks() and not available
# here then skip it safely not continuing with a None Network
if not network:
continue
try:
port_security_enabled = network.get(
'port_security_enabled', True)
if port_security_enabled:
if not network.get('subnets'):
# Neutron can't apply security groups to a port
# for a network without L3 assignments.
LOG.debug('Network with port security enabled does '
'not have subnets so security groups '
'cannot be applied: %s',
network, instance=instance)
raise exception.SecurityGroupCannotBeApplied()
else:
if security_group_ids:
# We don't want to apply security groups on port
# for a network defined with
# 'port_security_enabled=False'.
LOG.debug('Network has port security disabled so '
'security groups cannot be applied: %s',
network, instance=instance)
raise exception.SecurityGroupCannotBeApplied()
created_port_id = None
if not request.port_id:
# create minimal port, if port not already created by user
created_port = self._create_port_minimal(
context, neutron, instance, request.network_id,
request.address, security_group_ids)
created_port_id = created_port['id']
created_port_ids.append(created_port_id)
requests_and_created_ports.append((
request, created_port_id))
except Exception:
with excutils.save_and_reraise_exception():
if created_port_ids:
self._delete_ports(
neutron, instance, created_port_ids)
return requests_and_created_ports
def _has_resource_request(self, context, port, neutron):
resource_request = port.get(constants.RESOURCE_REQUEST) or {}
if self.has_extended_resource_request_extension(context, neutron):
return bool(resource_request.get(constants.REQUEST_GROUPS, []))
else:
return bool(resource_request)
def instance_has_extended_resource_request(self, instance_uuid):
# NOTE(gibi): We need to use an admin context to query neutron ports as
# neutron does not fill the resource_request field in the port response
# if we query with a non admin context.
admin_context = nova_context.get_admin_context()
if not self.has_extended_resource_request_extension(admin_context):
# Short circuit if the extended resource request API extension is
# not available
return False
# So neutron supports the extended resource request but does the
# instance has a port with such request
search_opts = {'device_id': instance_uuid,
'fields': [constants.RESOURCE_REQUEST]}
ports = self.list_ports(
admin_context, **search_opts).get('ports', [])
for port in ports:
resource_request = port.get(constants.RESOURCE_REQUEST) or {}
if resource_request.get(constants.REQUEST_GROUPS, []):
return True
return False
def get_binding_profile_allocation(
self,
context: nova_context.RequestContext,
port_id: str,
resource_provider_mapping: ty.Dict[str, ty.List[str]],
) -> ty.Union[None, str, ty.Dict[str, str]]:
"""Calculate the value of the allocation key of the binding:profile
based on the allocated resources.
:param context: the request context
:param port_id: the uuid of the neutron port
:param resource_provider_mapping: the mapping returned by the placement
defining which request group get allocated from which resource
providers
:returns: None if the port has no resource request. Returns a single
RP UUID if the port has a legacy resource request. Returns a dict
of request group id: resource provider UUID mapping if the port has
an extended resource request.
"""
# We need to use an admin client as the port.resource_request is admin
# only
neutron_admin = get_client(context, admin=True)
neutron = get_client(context)
port = self._show_port(context, port_id, neutron_client=neutron_admin)
if self._has_resource_request(context, port, neutron):
return self._get_binding_profile_allocation(
context, port, neutron, resource_provider_mapping)
else:
return None
def _get_binding_profile_allocation(
self, context, port, neutron, resource_provider_mapping
):
# TODO(gibi): remove this condition and the else branch once Nova does
# not need to support old Neutron sending the legacy resource request
# extension
if self.has_extended_resource_request_extension(
context, neutron
):
# The extended resource request format also means that a
# port has more than a one request groups
request_groups = port.get(
constants.RESOURCE_REQUEST, {}).get(
constants.REQUEST_GROUPS, [])
# Each request group id from the port needs to be mapped to
# a single provider id from the provider mappings. Each
# group from the port is mapped to a numbered request group
# in placement so we can assume that they are mapped to
# a single provider and therefore the provider mapping list
# has a single provider id.
allocation = {
group['id']: resource_provider_mapping[group['id']][0]
for group in request_groups
}
else:
# This is the legacy resource request format where a port
# is mapped to a single request group
# NOTE(gibi): In the resource provider mapping there can be
# more than one RP fulfilling a request group. But resource
# requests of a Neutron port is always mapped to a
# numbered request group that is always fulfilled by one
# resource provider. So we only pass that single RP UUID
# here.
allocation = resource_provider_mapping[
port['id']][0]
return allocation
def allocate_for_instance(self, context, instance,
requested_networks,
security_groups=None, bind_host_id=None,
resource_provider_mapping=None,
network_arqs=None):
"""Allocate network resources for the instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param requested_networks: objects.NetworkRequestList object.
:param security_groups: None or security groups to allocate for
instance.
:param bind_host_id: the host ID to attach to the ports being created.
:param resource_provider_mapping: a dict keyed by ids of the entities
(for example Neutron port) requesting resources for this instance
mapped to a list of resource provider UUIDs that are fulfilling
such a resource request.
:param network_arqs: dict keyed by arq uuid, of ARQs allocated to
ports.
:returns: network info as from get_instance_nw_info()
"""
LOG.debug('allocate_for_instance()', instance=instance)
if not instance.project_id:
msg = _('empty project id for instance %s')
raise exception.InvalidInput(
reason=msg % instance.uuid)
# We do not want to create a new neutron session for each call
neutron = get_client(context)
# We always need admin_client to build nw_info,
# we sometimes need it when updating ports
admin_client = get_client(context, admin=True)
#
# Validate ports and networks with neutron. The requested_ports_dict
# variable is a dict, keyed by port ID, of ports that were on the user
# request and may be empty. The ordered_networks variable is a list of
# NetworkRequest objects for any networks or ports specifically
# requested by the user, which again may be empty.
#
# NOTE(gibi): we use the admin_client here to ensure that the returned
# ports has the resource_request attribute filled as later we use this
# information to decide when to add allocation key to the port binding.
# See bug 1849657.
requested_ports_dict, ordered_networks = (
self._validate_requested_port_ids(
context, instance, admin_client, requested_networks))
nets = self._validate_requested_network_ids(
context, instance, neutron, requested_networks, ordered_networks)
if not nets:
LOG.debug("No network configured", instance=instance)
return network_model.NetworkInfo([])
# Validate requested security groups
security_groups = self._clean_security_groups(security_groups)
security_group_ids = self._process_security_groups(
instance, neutron, security_groups)
# Tell Neutron which resource provider fulfills the ports' resource
# request.
# We only consider pre-created ports here as ports created
# below based on requested networks are not scheduled to have their
# resource request fulfilled.
for port in requested_ports_dict.values():
# only communicate the allocations if the port has resource
# requests
if self._has_resource_request(context, port, neutron):
profile = get_binding_profile(port)
profile[constants.ALLOCATION] = (
self._get_binding_profile_allocation(
context, port, neutron, resource_provider_mapping))
port[constants.BINDING_PROFILE] = profile
# Create ports from the list of ordered_networks. The returned
# requests_and_created_ports variable is a list of 2-item tuples of
# the form (NetworkRequest, created_port_id). Note that a tuple pair
# will have None for the created_port_id if the NetworkRequest already
# contains a port_id, meaning the user requested a specific
# pre-existing port so one wasn't created here. The ports will be
# updated later in _update_ports_for_instance to be bound to the
# instance and compute host.
requests_and_created_ports = self._create_ports_for_instance(
context, instance, ordered_networks, nets, neutron,
security_group_ids)
#
# Update existing and newly created ports
#
ordered_nets, ordered_port_ids, preexisting_port_ids, \
created_port_ids = self._update_ports_for_instance(
context, instance,
neutron, admin_client, requests_and_created_ports, nets,
bind_host_id, requested_ports_dict, network_arqs)
#
# Perform a full update of the network_info_cache,
# including re-fetching lots of the required data from neutron
#
nw_info = self.get_instance_nw_info(
context, instance, networks=ordered_nets,
port_ids=ordered_port_ids,
admin_client=admin_client,
preexisting_port_ids=preexisting_port_ids)
# Only return info about ports we processed in this run, which might
# have been pre-existing neutron ports or ones that nova created. In
# the initial allocation case (server create), this will be everything
# we processed, and in later runs will only be what was processed that
# time. For example, if the instance was created with port A and
# then port B was attached in this call, only port B would be returned.
# Thus, this filtering only affects the attach case.
return network_model.NetworkInfo([vif for vif in nw_info
if vif['id'] in created_port_ids +
preexisting_port_ids])
def _update_ports_for_instance(self, context, instance, neutron,
admin_client, requests_and_created_ports, nets,
bind_host_id, requested_ports_dict, network_arqs):
"""Update ports from network_requests.
Updates the pre-existing ports and the ones created in
``_create_ports_for_instance`` with ``device_id``, ``device_owner``,
optionally ``mac_address`` and, depending on the
loaded extensions, ``rxtx_factor``, ``binding:host_id``, ``dns_name``.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param neutron: client using user context
:param admin_client: client using admin context
:param requests_and_created_ports: [(NetworkRequest, created_port_id)];
Note that created_port_id will be None for any user-requested
pre-existing port.
:param nets: a dict of network_id to networks returned from neutron
:param bind_host_id: a string for port['binding:host_id']
:param requested_ports_dict: dict, keyed by port ID, of ports requested
by the user
:param network_arqs: dict keyed by arq uuid, of ARQs allocated to
ports.
:returns: tuple with the following::
* list of network dicts in their requested order
* list of port IDs in their requested order - note that does not
mean the port was requested by the user, it could be a port
created on a network requested by the user
* list of pre-existing port IDs requested by the user
* list of created port IDs
"""
# We currently require admin creds to set port bindings.
port_client = admin_client
preexisting_port_ids = []
created_port_ids = []
ports_in_requested_order = []
nets_in_requested_order = []
created_vifs = [] # this list is for cleanups if we fail
for request, created_port_id in requests_and_created_ports:
vifobj = objects.VirtualInterface(context)
vifobj.instance_uuid = instance.uuid
vifobj.tag = request.tag if 'tag' in request else None
network = nets.get(request.network_id)
# if network_id did not pass validate_networks() and not available
# here then skip it safely not continuing with a None Network
if not network:
continue
nets_in_requested_order.append(network)
zone = 'compute:%s' % instance.availability_zone
port_req_body = {'port': {'device_id': instance.uuid,
'device_owner': zone}}
if (requested_ports_dict and
request.port_id in requested_ports_dict and
get_binding_profile(requested_ports_dict[request.port_id])):
port_req_body['port'][constants.BINDING_PROFILE] = \
get_binding_profile(requested_ports_dict[request.port_id])
try:
port_arq = None
if network_arqs:
port_arq = network_arqs.get(request.arq_uuid, None)
self._populate_neutron_extension_values(
context, instance, request.pci_request_id, port_req_body,
network=network, neutron=neutron,
bind_host_id=bind_host_id,
port_arq=port_arq)
self._populate_pci_mac_address(instance,
request.pci_request_id, port_req_body)
if created_port_id:
port_id = created_port_id
created_port_ids.append(port_id)
else:
port_id = request.port_id
ports_in_requested_order.append(port_id)
# After port is created, update other bits
updated_port = self._update_port(
port_client, instance, port_id, port_req_body)
# NOTE(danms): The virtual_interfaces table enforces global
# uniqueness on MAC addresses, which clearly does not match
# with neutron's view of the world. Since address is a 255-char
# string we can namespace it with our port id. Using '/' should
# be safely excluded from MAC address notations as well as
# UUIDs. We can stop doing this now that we've removed
# nova-network, but we need to leave the read translation in
# for longer than that of course.
vifobj.address = '%s/%s' % (updated_port['mac_address'],
updated_port['id'])
vifobj.uuid = port_id
vifobj.create()
created_vifs.append(vifobj)
if not created_port_id:
# only add if update worked and port create not called
preexisting_port_ids.append(port_id)
self._update_port_dns_name(context, instance, network,
ports_in_requested_order[-1],
neutron)
except Exception:
with excutils.save_and_reraise_exception():
self._unbind_ports(context,
preexisting_port_ids,
neutron, port_client)
self._delete_ports(neutron, instance, created_port_ids)
for vif in created_vifs:
vif.destroy()
return (nets_in_requested_order, ports_in_requested_order,
preexisting_port_ids, created_port_ids)
def _refresh_neutron_extensions_cache(self, client):
"""Refresh the neutron extensions cache when necessary."""
if (not self.last_neutron_extension_sync or
((time.time() - self.last_neutron_extension_sync) >=
CONF.neutron.extension_sync_interval)):
extensions_list = client.list_extensions()['extensions']
self.last_neutron_extension_sync = time.time()
self.extensions.clear()
self.extensions = {ext['alias']: ext for ext in extensions_list}
def _has_extension(self, extension, context=None, client=None):
"""Check if the provided neutron extension is enabled.
:param extension: The alias of the extension to check
:param client: keystoneauth1.adapter.Adapter
:param context: nova.context.RequestContext
:returns: True if the neutron extension is available, else False
"""
if client is None:
client = get_client(context)
self._refresh_neutron_extensions_cache(client)
return extension in self.extensions
def has_multi_provider_extension(self, context=None, client=None):
"""Check if the 'multi-provider' extension is enabled.
This extension allows administrative users to define multiple physical
bindings for a logical network.
"""
return self._has_extension(constants.MULTI_PROVIDER, context, client)
def has_dns_extension(self, context=None, client=None):
"""Check if the 'dns-integration' extension is enabled.
This extension adds the 'dns_name' and 'dns_assignment' attributes to
port resources.
"""
return self._has_extension(constants.DNS_INTEGRATION, context, client)
# TODO(gibi): Remove all branches where this is False after Neutron made
# the this extension mandatory. In Xena this extension will be optional to
# support the scenario where Neutron upgraded first. So Neutron can mark
# this mandatory earliest in Yoga.
def has_extended_resource_request_extension(
self, context=None, client=None,
):
return self._has_extension(
constants.RESOURCE_REQUEST_GROUPS, context, client,
)
def has_vnic_index_extension(self, context=None, client=None):
"""Check if the 'vnic-index' extension is enabled.
This extension is provided by the VMWare NSX neutron plugin.
"""
return self._has_extension(constants.VNIC_INDEX, context, client)
def has_fip_port_details_extension(self, context=None, client=None):
"""Check if the 'fip-port-details' extension is enabled.
This extension adds the 'port_details' attribute to floating IPs.
"""
return self._has_extension(constants.FIP_PORT_DETAILS, context, client)
def has_substr_port_filtering_extension(self, context=None, client=None):
"""Check if the 'ip-substring-filtering' extension is enabled.
This extension adds support for filtering ports by using part of an IP
address.
"""
return self._has_extension(
constants.SUBSTR_PORT_FILTERING, context, client
)
def has_segment_extension(self, context=None, client=None):
"""Check if the neutron 'segment' extension is enabled.
This extension exposes information about L2 segments of a network.
"""
return self._has_extension(
constants.SEGMENT, context, client,
)
def has_port_binding_extension(self, context=None, client=None):
"""Check if the neutron 'binding-extended' extension is enabled.
This extensions exposes port bindings of a virtual port to external
application.
This extension allows nova to bind a port to multiple hosts at the same
time, like during live migration.
"""
return self._has_extension(
constants.PORT_BINDING_EXTENDED, context, client
)
def bind_ports_to_host(self, context, instance, host,
vnic_types=None, port_profiles=None):
"""Attempts to bind the ports from the instance on the given host
If the ports are already actively bound to another host, like the
source host during live migration, then the new port bindings will
be inactive, assuming $host is the destination host for the live
migration.
In the event of an error, any ports which were successfully bound to
the host should have those host bindings removed from the ports.
This method should not be used if "has_port_binding_extension"
returns False.
:param context: the user request context
:type context: nova.context.RequestContext
:param instance: the instance with a set of ports
:type instance: nova.objects.Instance
:param host: the host on which to bind the ports which
are attached to the instance
:type host: str
:param vnic_types: optional dict for the host port binding
:type vnic_types: dict of <port_id> : <vnic_type>
:param port_profiles: optional dict per port ID for the host port
binding profile.
note that the port binding profile is mutable
via the networking "Port Binding" API so callers that
pass in a profile should ensure they have the latest
version from neutron with their changes merged,
which can be determined using the "revision_number"
attribute of the port.
:type port_profiles: dict of <port_id> : <port_profile>
:raises: PortBindingFailed if any of the ports failed to be bound to
the destination host
:returns: dict, keyed by port ID, of a new host port
binding dict per port that was bound
"""
# Get the current ports off the instance. This assumes the cache is
# current.
network_info = instance.get_network_info()
if not network_info:
# The instance doesn't have any ports so there is nothing to do.
LOG.debug('Instance does not have any ports.', instance=instance)
return {}
client = get_client(context, admin=True)
bindings_by_port_id: ty.Dict[str, ty.Any] = {}
for vif in network_info:
# Now bind each port to the destination host and keep track of each
# port that is bound to the resulting binding so we can rollback in
# the event of a failure, or return the results if everything is OK
port_id = vif['id']
binding = dict(host=host)
if vnic_types is None or port_id not in vnic_types:
binding['vnic_type'] = vif['vnic_type']
else:
binding['vnic_type'] = vnic_types[port_id]
if port_profiles is None or port_id not in port_profiles:
binding['profile'] = vif['profile']
else:
binding['profile'] = port_profiles[port_id]
data = {'binding': binding}
try:
binding = client.create_port_binding(port_id, data)['binding']
except neutron_client_exc.NeutronClientException:
# Something failed, so log the error and rollback any
# successful bindings.
LOG.error('Binding failed for port %s and host %s.',
port_id, host, instance=instance, exc_info=True)
for rollback_port_id in bindings_by_port_id:
try:
client.delete_port_binding(rollback_port_id, host)
except neutron_client_exc.NeutronClientException as exc:
if exc.status_code != 404:
LOG.warning('Failed to remove binding for port %s '
'on host %s.', rollback_port_id, host,
instance=instance)
raise exception.PortBindingFailed(port_id=port_id)
bindings_by_port_id[port_id] = binding
return bindings_by_port_id
def delete_port_binding(self, context, port_id, host):
"""Delete the port binding for the given port ID and host
This method should not be used if "has_port_binding_extension"
returns False.
:param context: The request context for the operation.
:param port_id: The ID of the port with a binding to the host.
:param host: The host from which port bindings should be deleted.
:raises: nova.exception.PortBindingDeletionFailed if a non-404 error
response is received from neutron.
"""
client = get_client(context, admin=True)
try:
client.delete_port_binding(port_id, host)
except neutron_client_exc.NeutronClientException as exc:
# We can safely ignore 404s since we're trying to delete
# the thing that wasn't found anyway.
if exc.status_code != 404:
LOG.error(
'Unexpected error trying to delete binding for port %s '
'and host %s.', port_id, host, exc_info=True)
raise exception.PortBindingDeletionFailed(
port_id=port_id, host=host)
def _get_vf_pci_device_profile(self, pci_dev):
"""Get VF-specific fields to add to the PCI device profile.
This data can be useful, e.g. for off-path networking backends that
need to do the necessary plumbing in order to set a VF up for packet
forwarding.
"""
vf_profile: ty.Dict[str, ty.Union[str, int]] = {}
try:
pf_mac = pci_utils.get_mac_by_pci_address(pci_dev.parent_addr)
except (exception.PciDeviceNotFoundById) as e:
LOG.debug(
"Could not determine PF MAC address for a VF with"
" addr %(addr)s, error: %(e)s",
{"addr": pci_dev.address, "e": e})
# NOTE(dmitriis): we do not raise here since not all PFs will
# have netdevs even when VFs are netdevs (see LP: #1915255). The
# rest of the fields (VF number and card serial) are not enough
# to fully identify the VF so they are not populated either.
return vf_profile
try:
vf_num = pci_utils.get_vf_num_by_pci_address(
pci_dev.address)
except exception.PciDeviceNotFoundById as e:
# This is unlikely to happen because the kernel has a common SR-IOV
# code that creates physfn symlinks, however, it would be better
# to avoid raising an exception here and simply warn an operator
# that things did not go as planned.
LOG.warning(
"Could not determine a VF logical number for a VF"
" with addr %(addr)s, error: %(e)s", {
"addr": pci_dev.address, "e": e})
return vf_profile
card_serial_number = pci_dev.card_serial_number
if card_serial_number:
vf_profile.update({
'card_serial_number': card_serial_number
})
vf_profile.update({
'pf_mac_address': pf_mac,
'vf_num': vf_num,
})
return vf_profile
def _get_pci_device_profile(self, pci_dev):
dev_spec = self.pci_whitelist.get_devspec(pci_dev)
if dev_spec:
dev_profile = {
'pci_vendor_info': "%s:%s"
% (pci_dev.vendor_id, pci_dev.product_id),
'pci_slot': pci_dev.address,
'physical_network': dev_spec.get_tags().get(
'physical_network'
),
}
if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_VF:
dev_profile.update(
self._get_vf_pci_device_profile(pci_dev))
return dev_profile
raise exception.PciDeviceNotFound(node_id=pci_dev.compute_node_id,
address=pci_dev.address)
def _populate_neutron_binding_profile(self, instance, pci_request_id,
port_req_body,
port_arq):
"""Populate neutron binding:profile.
Populate it with SR-IOV related information
:raises PciDeviceNotFound: If a claimed PCI device for the given
pci_request_id cannot be found on the instance.
"""
if pci_request_id:
pci_devices = pci_manager.get_instance_pci_devs(
instance, pci_request_id)
if not pci_devices:
# The pci_request_id likely won't mean much except for tracing
# through the logs since it is generated per request.
LOG.error('Unable to find PCI device using PCI request ID in '
'list of claimed instance PCI devices: %s. Is the '
'[pci]/passthrough_whitelist configuration correct?',
# Convert to a primitive list to stringify it.
list(instance.pci_devices), instance=instance)
raise exception.PciDeviceNotFound(
_('PCI device not found for request ID %s.') %
pci_request_id)
pci_dev = pci_devices.pop()
profile = copy.deepcopy(get_binding_profile(port_req_body['port']))
profile.update(self._get_pci_device_profile(pci_dev))
port_req_body['port'][constants.BINDING_PROFILE] = profile
if port_arq:
# PCI SRIOV device according port ARQ
profile = copy.deepcopy(get_binding_profile(port_req_body['port']))
profile.update(cyborg.get_arq_pci_device_profile(port_arq))
port_req_body['port'][constants.BINDING_PROFILE] = profile
@staticmethod
def _populate_pci_mac_address(instance, pci_request_id, port_req_body):
"""Add the updated MAC address value to the update_port request body.
Currently this is done only for PF passthrough.
"""
if pci_request_id is not None:
pci_devs = pci_manager.get_instance_pci_devs(
instance, pci_request_id)
if len(pci_devs) != 1:
# NOTE(ndipanov): We shouldn't ever get here since
# InstancePCIRequest instances built from network requests
# only ever index a single device, which needs to be
# successfully claimed for this to be called as part of
# allocate_networks method
LOG.error("PCI request %s does not have a "
"unique device associated with it. Unable to "
"determine MAC address",
pci_request_id, instance=instance)
return
pci_dev = pci_devs[0]
if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_PF:
try:
mac = pci_utils.get_mac_by_pci_address(pci_dev.address)
except exception.PciDeviceNotFoundById as e:
LOG.error(
"Could not determine MAC address for %(addr)s, "
"error: %(e)s",
{"addr": pci_dev.address, "e": e}, instance=instance)
else:
port_req_body['port']['mac_address'] = mac
def _populate_neutron_extension_values(self, context, instance,
pci_request_id, port_req_body,
network=None, neutron=None,
bind_host_id=None,
port_arq=None):
"""Populate neutron extension values for the instance.
If the extensions loaded contain QOS_QUEUE then pass the rxtx_factor.
"""
if neutron is None:
neutron = get_client(context)
port_req_body['port'][constants.BINDING_HOST_ID] = bind_host_id
self._populate_neutron_binding_profile(instance,
pci_request_id,
port_req_body,
port_arq)
if self.has_dns_extension(client=neutron):
# If the DNS integration extension is enabled in Neutron, most
# ports will get their dns_name attribute set in the port create or
# update requests in allocate_for_instance. So we just add the
# dns_name attribute to the payload of those requests. The
# exception is when the port binding extension is enabled in
# Neutron and the port is on a network that has a non-blank
# dns_domain attribute. This case requires to be processed by
# method _update_port_dns_name
if (not network.get('dns_domain')):
port_req_body['port']['dns_name'] = instance.hostname
def _update_port_dns_name(self, context, instance, network, port_id,
neutron):
"""Update an instance port dns_name attribute with instance.hostname.
The dns_name attribute of a port on a network with a non-blank
dns_domain attribute will be sent to the external DNS service
(Designate) if DNS integration is enabled in Neutron. This requires the
assignment of the dns_name to the port to be done with a Neutron client
using the user's context. allocate_for_instance uses a port with admin
context if the port binding extensions is enabled in Neutron. In this
case, we assign in this method the dns_name attribute to the port with
an additional update request. Only a very small fraction of ports will
require this additional update request.
"""
if self.has_dns_extension(client=neutron) and network.get(
'dns_domain'):
try:
port_req_body = {'port': {'dns_name': instance.hostname}}
neutron.update_port(port_id, port_req_body)
except neutron_client_exc.BadRequest:
LOG.warning('Neutron error: Instance hostname '
'%(hostname)s is not a valid DNS name',
{'hostname': instance.hostname}, instance=instance)
msg = (_('Instance hostname %(hostname)s is not a valid DNS '
'name') % {'hostname': instance.hostname})
raise exception.InvalidInput(reason=msg)
def _reset_port_dns_name(self, network, port_id, client):
"""Reset an instance port dns_name attribute to empty when using
external DNS service.
_unbind_ports uses a client with admin context to reset the dns_name if
the DNS extension is enabled and network does not have dns_domain set.
When external DNS service is enabled, we use this method to make the
request with a Neutron client using user's context, so that the DNS
record can be found under user's zone and domain.
"""
if self.has_dns_extension(client=client) and network.get(
'dns_domain'):
try:
port_req_body = {'port': {'dns_name': ''}}
client.update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception("Failed to reset dns_name for port %s", port_id)
def _delete_ports(self, neutron, instance, ports, raise_if_fail=False):
exceptions = []
for port in ports:
try:
neutron.delete_port(port)
except neutron_client_exc.NeutronClientException as e:
if e.status_code == 404:
LOG.warning("Port %s does not exist", port,
instance=instance)
else:
exceptions.append(e)
LOG.warning("Failed to delete port %s for instance.",
port, instance=instance, exc_info=True)
if len(exceptions) > 0 and raise_if_fail:
raise exceptions[0]
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocate all network resources related to the instance."""
LOG.debug('deallocate_for_instance()', instance=instance)
search_opts = {'device_id': instance.uuid}
neutron = get_client(context)
data = neutron.list_ports(**search_opts)
ports = {port['id'] for port in data.get('ports', [])}
requested_networks = kwargs.get('requested_networks') or []
# NOTE(danms): Temporary and transitional
if isinstance(requested_networks, objects.NetworkRequestList):
requested_networks = requested_networks.as_tuples()
ports_to_skip = set([port_id for nets, fips, port_id, pci_request_id,
arq_uuid, device_profile in requested_networks])
# NOTE(boden): requested_networks only passed in when deallocating
# from a failed build / spawn call. Therefore we need to include
# preexisting ports when deallocating from a standard delete op
# in which case requested_networks is not provided.
ports_to_skip |= set(self._get_preexisting_port_ids(instance))
ports = set(ports) - ports_to_skip
# Reset device_id and device_owner for the ports that are skipped
self._unbind_ports(context, ports_to_skip, neutron)
# Delete the rest of the ports
self._delete_ports(neutron, instance, ports, raise_if_fail=True)
# deallocate vifs (mac addresses)
objects.VirtualInterface.delete_by_instance_uuid(
context, instance.uuid)
# NOTE(arosen): This clears out the network_cache only if the instance
# hasn't already been deleted. This is needed when an instance fails to
# launch and is rescheduled onto another compute node. If the instance
# has already been deleted this call does nothing.
update_instance_cache_with_nw_info(self, context, instance,
network_model.NetworkInfo([]))
def deallocate_port_for_instance(self, context, instance, port_id):
"""Remove a specified port from the instance.
:param context: the request context
:param instance: the instance object the port is detached from
:param port_id: the UUID of the port being detached
:return: A NetworkInfo, port_allocation tuple where the
port_allocation is a dict which contains the resource
allocation of the port per resource provider uuid. E.g.:
{
rp_uuid: {
"resources": {
"NET_BW_EGR_KILOBIT_PER_SEC": 10000,
"NET_BW_IGR_KILOBIT_PER_SEC": 20000,
}
}
}
Note that right now this dict only contains a single key as a
neutron port only allocates from a single resource provider.
"""
# We need to use an admin client as the port.resource_request is admin
# only
neutron_admin = get_client(context, admin=True)
neutron = get_client(context)
port_allocation: ty.Dict = {}
try:
# NOTE(gibi): we need to read the port resource information from
# neutron here as we might delete the port below
port = neutron_admin.show_port(port_id)['port']
except exception.PortNotFound:
LOG.debug('Unable to determine port %s resource allocation '
'information as the port no longer exists.', port_id)
port = None
preexisting_ports = self._get_preexisting_port_ids(instance)
if port_id in preexisting_ports:
self._unbind_ports(context, [port_id], neutron)
else:
self._delete_ports(neutron, instance, [port_id],
raise_if_fail=True)
# Delete the VirtualInterface for the given port_id.
vif = objects.VirtualInterface.get_by_uuid(context, port_id)
if vif:
self._delete_nic_metadata(instance, vif)
vif.destroy()
else:
LOG.debug('VirtualInterface not found for port: %s',
port_id, instance=instance)
if port:
# if there is resource associated to this port then that needs to
# be deallocated so lets return info about such allocation
resource_request = port.get(constants.RESOURCE_REQUEST) or {}
profile = get_binding_profile(port)
if self.has_extended_resource_request_extension(context, neutron):
# new format
groups = resource_request.get(constants.REQUEST_GROUPS)
if groups:
allocated_rps = profile.get(constants.ALLOCATION)
for group in groups:
allocated_rp = allocated_rps[group['id']]
port_allocation[allocated_rp] = {
"resources": group.get("resources", {})
}
else:
# legacy format
allocated_rp = profile.get(constants.ALLOCATION)
if resource_request and allocated_rp:
port_allocation = {
allocated_rp: {
"resources": resource_request.get("resources", {})
}
}
else:
# Check the info_cache. If the port is still in the info_cache and
# in that cache there is allocation in the profile then we suspect
# that the port is disappeared without deallocating the resources.
for vif in instance.get_network_info():
if vif['id'] == port_id:
profile = vif.get('profile') or {}
rp_uuid = profile.get(constants.ALLOCATION)
if rp_uuid:
LOG.warning(
'Port %s disappeared during deallocate but it had '
'resource allocation on resource provider %s. '
'Resource allocation for this port may be '
'leaked.', port_id, rp_uuid, instance=instance)
break
return self.get_instance_nw_info(context, instance), port_allocation
def _delete_nic_metadata(self, instance, vif):
if not instance.device_metadata:
# nothing to delete
return
for device in instance.device_metadata.devices:
if (isinstance(device, objects.NetworkInterfaceMetadata) and
device.mac == vif.address):
instance.device_metadata.devices.remove(device)
instance.save()
break
def list_ports(self, context, **search_opts):
"""List ports for the client based on search options."""
return get_client(context).list_ports(**search_opts)
def show_port(self, context, port_id):
"""Return the port for the client given the port id.
:param context: Request context.
:param port_id: The id of port to be queried.
:returns: A dict containing port data keyed by 'port', e.g.
::
{'port': {'port_id': 'abcd',
'fixed_ip_address': '1.2.3.4'}}
"""
return dict(port=self._show_port(context, port_id))
def _show_port(self, context, port_id, neutron_client=None, fields=None):
"""Return the port for the client given the port id.
:param context: Request context.
:param port_id: The id of port to be queried.
:param neutron_client: A neutron client.
:param fields: The condition fields to query port data.
:returns: A dict of port data.
e.g. {'port_id': 'abcd', 'fixed_ip_address': '1.2.3.4'}
"""
if not neutron_client:
neutron_client = get_client(context)
try:
if fields:
result = neutron_client.show_port(port_id, fields=fields)
else:
result = neutron_client.show_port(port_id)
return result.get('port')
except neutron_client_exc.PortNotFoundClient:
raise exception.PortNotFound(port_id=port_id)
except neutron_client_exc.Unauthorized:
raise exception.Forbidden()
except neutron_client_exc.NeutronClientException as exc:
msg = (_("Failed to access port %(port_id)s: %(reason)s") %
{'port_id': port_id, 'reason': exc})
raise exception.NovaException(message=msg)
def get_instance_nw_info(self, context, instance, **kwargs):
"""Returns all network info related to an instance."""
with lockutils.lock('refresh_cache-%s' % instance.uuid):
result = self._get_instance_nw_info(context, instance, **kwargs)
update_instance_cache_with_nw_info(self, context, instance,
nw_info=result)
return result
def _get_instance_nw_info(self, context, instance, networks=None,
port_ids=None, admin_client=None,
preexisting_port_ids=None,
refresh_vif_id=None, force_refresh=False,
**kwargs):
# NOTE(danms): This is an inner method intended to be called
# by other code that updates instance nwinfo. It *must* be
# called with the refresh_cache-%(instance_uuid) lock held!
if force_refresh:
LOG.debug('Forcefully refreshing network info cache for instance',
instance=instance)
elif refresh_vif_id:
LOG.debug('Refreshing network info cache for port %s',
refresh_vif_id, instance=instance)
else:
LOG.debug('Building network info cache for instance',
instance=instance)
# Ensure that we have an up to date copy of the instance info cache.
# Otherwise multiple requests could collide and cause cache
# corruption.
compute_utils.refresh_info_cache_for_instance(context, instance)
nw_info = self._build_network_info_model(context, instance, networks,
port_ids, admin_client,
preexisting_port_ids,
refresh_vif_id,
force_refresh=force_refresh)
return network_model.NetworkInfo.hydrate(nw_info)
def _gather_port_ids_and_networks(self, context, instance, networks=None,
port_ids=None, neutron=None):
"""Return an instance's complete list of port_ids and networks.
The results are based on the instance info_cache in the nova db, not
the instance's current list of ports in neutron.
"""
if ((networks is None and port_ids is not None) or
(port_ids is None and networks is not None)):
message = _("This method needs to be called with either "
"networks=None and port_ids=None or port_ids and "
"networks as not none.")
raise exception.NovaException(message=message)
ifaces = instance.get_network_info()
# This code path is only done when refreshing the network_cache
if port_ids is None:
port_ids = [iface['id'] for iface in ifaces]
net_ids = [iface['network']['id'] for iface in ifaces]
if networks is None:
networks = self._get_available_networks(context,
instance.project_id,
net_ids, neutron)
# an interface was added/removed from instance.
else:
# Prepare the network ids list for validation purposes
networks_ids = [network['id'] for network in networks]
# Validate that interface networks doesn't exist in networks.
# Though this issue can and should be solved in methods
# that prepare the networks list, this method should have this
# ignore-duplicate-networks/port-ids mechanism to reduce the
# probability of failing to boot the VM.
networks = networks + [
{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces
if _is_not_duplicate(iface['network']['id'],
networks_ids,
"networks",
instance)]
# Include existing interfaces so they are not removed from the db.
# Validate that the interface id is not in the port_ids
port_ids = [iface['id'] for iface in ifaces
if _is_not_duplicate(iface['id'],
port_ids,
"port_ids",
instance)] + port_ids
return networks, port_ids
@refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Add a fixed IP to the instance from specified network."""
neutron = get_client(context)
search_opts = {'network_id': network_id}
data = neutron.list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
if not ipam_subnets:
raise exception.NetworkNotFoundForInstance(
instance_id=instance.uuid)
zone = 'compute:%s' % instance.availability_zone
search_opts = {'device_id': instance.uuid,
'device_owner': zone,
'network_id': network_id}
data = neutron.list_ports(**search_opts)
ports = data['ports']
for p in ports:
for subnet in ipam_subnets:
fixed_ips = p['fixed_ips']
fixed_ips.append({'subnet_id': subnet['id']})
port_req_body = {'port': {'fixed_ips': fixed_ips}}
try:
neutron.update_port(p['id'], port_req_body)
return self._get_instance_nw_info(context, instance)
except Exception as ex:
msg = ("Unable to update port %(portid)s on subnet "
"%(subnet_id)s with failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'],
'subnet_id': subnet['id'],
'exception': ex}, instance=instance)
raise exception.NetworkNotFoundForInstance(
instance_id=instance.uuid)
@refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Remove a fixed IP from the instance."""
neutron = get_client(context)
zone = 'compute:%s' % instance.availability_zone
search_opts = {'device_id': instance.uuid,
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
data = neutron.list_ports(**search_opts)
ports = data['ports']
for p in ports:
fixed_ips = p['fixed_ips']
new_fixed_ips = []
for fixed_ip in fixed_ips:
if fixed_ip['ip_address'] != address:
new_fixed_ips.append(fixed_ip)
port_req_body = {'port': {'fixed_ips': new_fixed_ips}}
try:
neutron.update_port(p['id'], port_req_body)
except Exception as ex:
msg = ("Unable to update port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'], 'exception': ex},
instance=instance)
return self._get_instance_nw_info(context, instance)
raise exception.FixedIpNotFoundForInstance(
instance_uuid=instance.uuid, ip=address)
def _get_physnet_tunneled_info(self, context, neutron, net_id):
"""Retrieve detailed network info.
:param context: The request context.
:param neutron: The neutron client object.
:param net_id: The ID of the network to retrieve information for.
:return: A tuple containing the physnet name, if defined, and the
tunneled status of the network. If the network uses multiple
segments, the first segment that defines a physnet value will be
used for the physnet name.
"""
if self.has_multi_provider_extension(client=neutron):
network = neutron.show_network(net_id,
fields='segments').get('network')
segments = network.get('segments', {})
for net in segments:
# NOTE(vladikr): In general, "multi-segments" network is a
# combination of L2 segments. The current implementation
# contains a vxlan and vlan(s) segments, where only a vlan
# network will have a physical_network specified, but may
# change in the future. The purpose of this method
# is to find a first segment that provides a physical network.
# TODO(vladikr): Additional work will be required to handle the
# case of multiple vlan segments associated with different
# physical networks.
physnet_name = net.get('provider:physical_network')
if physnet_name:
return physnet_name, False
# Raising here as at least one segment should
# have a physical network provided.
if segments:
msg = (_("None of the segments of network %s provides a "
"physical_network") % net_id)
raise exception.NovaException(message=msg)
net = neutron.show_network(
net_id, fields=['provider:physical_network',
'provider:network_type']).get('network')
return (net.get('provider:physical_network'),
net.get('provider:network_type') in constants.L3_NETWORK_TYPES)
@staticmethod
def _get_trusted_mode_from_port(port):
"""Returns whether trusted mode is requested
If port binding does not provide any information about trusted
status this function is returning None
"""
value = get_binding_profile(port).get('trusted')
if value is not None:
# This allows the user to specify things like '1' and 'yes' in
# the port binding profile and we can handle it as a boolean.
return strutils.bool_from_string(value)
@staticmethod
def _is_remote_managed(vnic_type):
"""Determine if the port is remote_managed or not by VNIC type.
:param str vnic_type: The VNIC type to assess.
:return: A boolean indicator whether the NIC is remote managed or not.
:rtype: bool
"""
return vnic_type == network_model.VNIC_TYPE_REMOTE_MANAGED
def is_remote_managed_port(self, context, port_id):
"""Determine if a port has a REMOTE_MANAGED VNIC type.
:param context: The request context
:param port_id: The id of the Neutron port
"""
port = self.show_port(context, port_id)['port']
return self._is_remote_managed(
port.get('binding:vnic_type', network_model.VNIC_TYPE_NORMAL)
)
# NOTE(sean-k-mooney): we might want to have this return a
# nova.network.model.VIF object instead in the future.
def _get_port_vnic_info(self, context, neutron, port_id):
"""Retrieve port vNIC info
:param context: The request context
:param neutron: The Neutron client
:param port_id: The id of port to be queried
:return: A tuple of vNIC type, trusted status, network ID, resource
request of the port if any and port numa affintiy policy,
and device_profile.
Trusted status only affects SR-IOV ports and will always be
None for other port types. If no port numa policy is
requested by a port, None will be returned.
"""
fields = ['binding:vnic_type', constants.BINDING_PROFILE,
'network_id', constants.RESOURCE_REQUEST,
constants.NUMA_POLICY, 'device_profile']
port = self._show_port(
context, port_id, neutron_client=neutron, fields=fields)
network_id = port.get('network_id')
trusted = None
vnic_type = port.get('binding:vnic_type',
network_model.VNIC_TYPE_NORMAL)
if vnic_type in network_model.VNIC_TYPES_SRIOV:
trusted = self._get_trusted_mode_from_port(port)
# NOTE(gibi): Get the port resource_request which may or may not be
# set depending on neutron configuration, e.g. if QoS rules are
# applied to the port/network and the port-resource-request API
# extension is enabled.
resource_request = port.get(constants.RESOURCE_REQUEST, None)
numa_policy = port.get(constants.NUMA_POLICY, None)
device_profile = port.get("device_profile", None)
return (vnic_type, trusted, network_id, resource_request,
numa_policy, device_profile)
def support_create_with_resource_request(self, context):
"""Returns false if neutron is configured with extended resource
request which is not currently supported.
This function is only here temporarily to help mocking this check in
the functional test environment.
"""
return not (self.has_extended_resource_request_extension(context))
def create_resource_requests(
self, context, requested_networks, pci_requests=None,
affinity_policy=None):
"""Retrieve all information for the networks passed at the time of
creating the server.
:param context: The request context.
:param requested_networks: The networks requested for the server.
:type requested_networks: nova.objects.NetworkRequestList
:param pci_requests: The list of PCI requests to which additional PCI
requests created here will be added.
:type pci_requests: nova.objects.InstancePCIRequests
:param affinity_policy: requested pci numa affinity policy
:type affinity_policy: nova.objects.fields.PCINUMAAffinityPolicy
:returns: A three tuple with an instance of ``objects.NetworkMetadata``
for use by the scheduler or None, a list of RequestGroup
objects representing the resource needs of each requested port and
a RequestLevelParam object that contains global scheduling
instructions not specific to any of the RequestGroups
"""
if not requested_networks or requested_networks.no_allocate:
return None, [], None
physnets = set()
tunneled = False
neutron = get_client(context, admin=True)
has_extended_resource_request_extension = (
self.has_extended_resource_request_extension(context, neutron))
resource_requests = []
request_level_params = objects.RequestLevelParams()
for request_net in requested_networks:
physnet = None
trusted = None
tunneled_ = False
vnic_type = network_model.VNIC_TYPE_NORMAL
pci_request_id = None
requester_id = None
port_numa_policy = None
if request_net.port_id:
# InstancePCIRequest.requester_id is semantically linked
# to a port with a resource_request.
requester_id = request_net.port_id
(vnic_type, trusted, network_id, resource_request,
port_numa_policy, device_profile) = self._get_port_vnic_info(
context, neutron, request_net.port_id)
physnet, tunneled_ = self._get_physnet_tunneled_info(
context, neutron, network_id)
if vnic_type in network_model.VNIC_TYPES_ACCELERATOR:
# get request groups from cyborg profile
if not device_profile:
err = ('No device profile for port %s.'
% (request_net.port_id))
raise exception.DeviceProfileError(
name=device_profile, msg=err)
cyclient = cyborg.get_client(context)
dp_groups = cyclient.get_device_profile_groups(
device_profile)
dev_num = cyborg.get_device_amount_of_dp_groups(dp_groups)
if dev_num > 1:
err_msg = 'request multiple devices for single port.'
raise exception.DeviceProfileError(name=device_profile,
msg=err_msg)
dp_request_groups = (cyclient.get_device_request_groups(
dp_groups, owner=request_net.port_id))
LOG.debug("device_profile request group(ARQ): %s",
dp_request_groups)
# keep device_profile to avoid get vnic info again
request_net.device_profile = device_profile
resource_requests.extend(dp_request_groups)
if resource_request:
if has_extended_resource_request_extension:
# need to handle the new resource request format
# NOTE(gibi): explicitly orphan the RequestGroup by
# setting context=None as we never intended to save it
# to the DB.
resource_requests.extend(
objects.RequestGroup.from_extended_port_request(
context=None,
port_resource_request=resource_request))
request_level_params.extend_with(
objects.RequestLevelParams.from_port_request(
port_resource_request=resource_request))
else:
# keep supporting the old format of the
# resource_request
# NOTE(gibi): explicitly orphan the RequestGroup by
# setting context=None as we never intended to save it
# to the DB.
resource_requests.append(
objects.RequestGroup.from_port_request(
context=None,
port_uuid=request_net.port_id,
port_resource_request=resource_request))
elif request_net.network_id and not request_net.auto_allocate:
network_id = request_net.network_id
physnet, tunneled_ = self._get_physnet_tunneled_info(
context, neutron, network_id)
# All tunneled traffic must use the same logical NIC so we just
# need to know if there is one or more tunneled networks present.
tunneled = tunneled or tunneled_
# ...conversely, there can be multiple physnets, which will
# generally be mapped to different NICs, and some requested
# networks may use the same physnet. As a result, we need to know
# the *set* of physnets from every network requested
if physnet:
physnets.add(physnet)
if vnic_type in network_model.VNIC_TYPES_SRIOV:
# TODO(moshele): To differentiate between the SR-IOV legacy
# and SR-IOV ovs hardware offload we will leverage the nic
# feature based scheduling in nova. This mean we will need
# libvirt to expose the nic feature. At the moment
# there is a limitation that deployers cannot use both
# SR-IOV modes (legacy and ovs) in the same deployment.
spec = {
pci_request.PCI_NET_TAG: physnet,
# Convert the value to string since tags are compared as
# string values case-insensitively.
pci_request.PCI_REMOTE_MANAGED_TAG:
str(self._is_remote_managed(vnic_type)),
}
dev_type = pci_request.DEVICE_TYPE_FOR_VNIC_TYPE.get(vnic_type)
if dev_type:
spec[pci_request.PCI_DEVICE_TYPE_TAG] = dev_type
if trusted is not None:
# We specifically have requested device on a pool
# with a tag trusted set to true or false. We
# convert the value to string since tags are
# compared in that way.
spec[pci_request.PCI_TRUSTED_TAG] = str(trusted)
request = objects.InstancePCIRequest(
count=1,
spec=[spec],
request_id=uuidutils.generate_uuid(),
requester_id=requester_id)
# NOTE(sean-k-mooney): port NUMA policies take precedence
# over image and flavor policies.
numa_policy = port_numa_policy or affinity_policy
if numa_policy:
request.numa_policy = numa_policy
pci_requests.requests.append(request)
pci_request_id = request.request_id
# Add pci_request_id into the requested network
request_net.pci_request_id = pci_request_id
return (
objects.NetworkMetadata(physnets=physnets, tunneled=tunneled),
resource_requests,
request_level_params
)
def _can_auto_allocate_network(self, context, neutron):
"""Helper method to determine if we can auto-allocate networks
:param context: nova request context
:param neutron: neutron client
:returns: True if it's possible to auto-allocate networks, False
otherwise.
"""
# run the dry-run validation, which will raise a 409 if not ready
try:
neutron.validate_auto_allocated_topology_requirements(
context.project_id)
LOG.debug('Network auto-allocation is available for project '
'%s', context.project_id)
return True
except neutron_client_exc.Conflict as ex:
LOG.debug('Unable to auto-allocate networks. %s',
str(ex))
return False
def _auto_allocate_network(self, instance, neutron):
"""Automatically allocates a network for the given project.
:param instance: create the network for the project that owns this
instance
:param neutron: neutron client
:returns: Details of the network that was created.
:raises: nova.exception.UnableToAutoAllocateNetwork
:raises: nova.exception.NetworkNotFound
"""
project_id = instance.project_id
LOG.debug('Automatically allocating a network for project %s.',
project_id, instance=instance)
try:
topology = neutron.get_auto_allocated_topology(
project_id)['auto_allocated_topology']
except neutron_client_exc.Conflict:
raise exception.UnableToAutoAllocateNetwork(project_id=project_id)
try:
network = neutron.show_network(topology['id'])['network']
except neutron_client_exc.NetworkNotFoundClient:
# This shouldn't happen since we just created the network, but
# handle it anyway.
LOG.error('Automatically allocated network %(network_id)s '
'was not found.', {'network_id': topology['id']},
instance=instance)
raise exception.UnableToAutoAllocateNetwork(project_id=project_id)
LOG.debug('Automatically allocated network: %s', network,
instance=instance)
return network
def _ports_needed_per_instance(self, context, neutron, requested_networks):
# TODO(danms): Remove me when all callers pass an object
if requested_networks and isinstance(requested_networks[0], tuple):
requested_networks = objects.NetworkRequestList.from_tuples(
requested_networks)
ports_needed_per_instance = 0
if (requested_networks is None or len(requested_networks) == 0 or
requested_networks.auto_allocate):
nets = self._get_available_networks(context, context.project_id,
neutron=neutron)
if len(nets) > 1:
# Attaching to more than one network by default doesn't
# make sense, as the order will be arbitrary and the guest OS
# won't know which to configure
msg = _("Multiple possible networks found, use a Network "
"ID to be more specific.")
raise exception.NetworkAmbiguous(msg)
if not nets and (
requested_networks and requested_networks.auto_allocate):
# If there are no networks available to this project and we
# were asked to auto-allocate a network, check to see that we
# can do that first.
LOG.debug('No networks are available for project %s; checking '
'to see if we can automatically allocate a network.',
context.project_id)
if not self._can_auto_allocate_network(context, neutron):
raise exception.UnableToAutoAllocateNetwork(
project_id=context.project_id)
ports_needed_per_instance = 1
else:
net_ids_requested = []
for request in requested_networks:
if request.port_id:
port = self._show_port(context, request.port_id,
neutron_client=neutron)
if port.get('device_id', None):
raise exception.PortInUse(port_id=request.port_id)
deferred_ip = port.get('ip_allocation') == 'deferred'
ipless_port = port.get('ip_allocation') == 'none'
# NOTE(carl_baldwin) A deferred IP port doesn't have an
# address here. If it fails to get one later when nova
# updates it with host info, Neutron will error which
# raises an exception.
# NOTE(sbauza): We don't need to validate the
# 'connectivity' attribute of the port's
# 'binding:vif_details' to ensure it's 'l2', as Neutron
# already verifies it.
if (
not (deferred_ip or ipless_port) and
not port.get('fixed_ips')
):
raise exception.PortRequiresFixedIP(
port_id=request.port_id)
request.network_id = port['network_id']
else:
ports_needed_per_instance += 1
net_ids_requested.append(request.network_id)
# NOTE(jecarey) There is currently a race condition.
# That is, if you have more than one request for a specific
# fixed IP at the same time then only one will be allocated
# the ip. The fixed IP will be allocated to only one of the
# instances that will run. The second instance will fail on
# spawn. That instance will go into error state.
# TODO(jecarey) Need to address this race condition once we
# have the ability to update mac addresses in Neutron.
if request.address:
# TODO(jecarey) Need to look at consolidating list_port
# calls once able to OR filters.
search_opts = {'network_id': request.network_id,
'fixed_ips': 'ip_address=%s' % (
request.address),
'fields': 'device_id'}
existing_ports = neutron.list_ports(
**search_opts)['ports']
if existing_ports:
i_uuid = existing_ports[0]['device_id']
raise exception.FixedIpAlreadyInUse(
address=request.address,
instance_uuid=i_uuid)
# Now check to see if all requested networks exist
if net_ids_requested:
nets = self._get_available_networks(
context, context.project_id, net_ids_requested,
neutron=neutron)
for net in nets:
if not net.get('subnets'):
raise exception.NetworkRequiresSubnet(
network_uuid=net['id'])
if len(nets) != len(net_ids_requested):
requested_netid_set = set(net_ids_requested)
returned_netid_set = set([net['id'] for net in nets])
lostid_set = requested_netid_set - returned_netid_set
if lostid_set:
id_str = ''
for _id in lostid_set:
id_str = id_str and id_str + ', ' + _id or _id
raise exception.NetworkNotFound(network_id=id_str)
return ports_needed_per_instance
def get_requested_resource_for_instance(
self,
context: nova_context.RequestContext,
instance_uuid: str
) -> ty.Tuple[
ty.List['objects.RequestGroup'], 'objects.RequestLevelParams']:
"""Collect resource requests from the ports associated to the instance
:param context: nova request context
:param instance_uuid: The UUID of the instance
:return: A two tuple with a list of RequestGroup objects and a
RequestLevelParams object.
"""
# NOTE(gibi): We need to use an admin client as otherwise a non admin
# initiated resize causes that neutron does not fill the
# resource_request field of the port and this will lead to resource
# allocation issues. See bug 1849695
neutron = get_client(context, admin=True)
# get the ports associated to this instance
data = neutron.list_ports(
device_id=instance_uuid, fields=['id', constants.RESOURCE_REQUEST])
resource_requests = []
request_level_params = objects.RequestLevelParams()
extended_rr = self.has_extended_resource_request_extension(
context, neutron)
for port in data.get('ports', []):
resource_request = port.get(constants.RESOURCE_REQUEST)
if extended_rr and resource_request:
resource_requests.extend(
objects.RequestGroup.from_extended_port_request(
context=None,
port_resource_request=port['resource_request']))
request_level_params.extend_with(
objects.RequestLevelParams.from_port_request(
port_resource_request=resource_request))
else:
# keep supporting the old format of the resource_request
if resource_request:
# NOTE(gibi): explicitly orphan the RequestGroup by setting
# context=None as we never intended to save it to the DB.
resource_requests.append(
objects.RequestGroup.from_port_request(
context=None, port_uuid=port['id'],
port_resource_request=port['resource_request']))
return resource_requests, request_level_params
def validate_networks(self, context, requested_networks, num_instances):
"""Validate that the tenant can use the requested networks.
Return the number of instances than can be successfully allocated
with the requested network configuration.
"""
LOG.debug('validate_networks() for %s', requested_networks)
neutron = get_client(context)
ports_needed_per_instance = self._ports_needed_per_instance(
context, neutron, requested_networks)
# Note(PhilD): Ideally Nova would create all required ports as part of
# network validation, but port creation requires some details
# from the hypervisor. So we just check the quota and return
# how many of the requested number of instances can be created
if ports_needed_per_instance:
quotas = neutron.show_quota(context.project_id)['quota']
if quotas.get('port', -1) == -1:
# Unlimited Port Quota
return num_instances
# We only need the port count so only ask for ids back.
params = dict(tenant_id=context.project_id, fields=['id'])
ports = neutron.list_ports(**params)['ports']
free_ports = quotas.get('port') - len(ports)
if free_ports < 0:
msg = (_("The number of defined ports: %(ports)d "
"is over the limit: %(quota)d") %
{'ports': len(ports),
'quota': quotas.get('port')})
raise exception.PortLimitExceeded(msg)
ports_needed = ports_needed_per_instance * num_instances
if free_ports >= ports_needed:
return num_instances
else:
return free_ports // ports_needed_per_instance
return num_instances
def _get_instance_uuids_by_ip(self, context, address):
"""Retrieve instance uuids associated with the given IP address.
:returns: A list of dicts containing the uuids keyed by 'instance_uuid'
e.g. [{'instance_uuid': uuid}, ...]
"""
search_opts = {"fixed_ips": 'ip_address=%s' % address}
data = get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
return [{'instance_uuid': port['device_id']} for port in ports
if port['device_id']]
def _get_port_id_by_fixed_address(self, client,
instance, address):
"""Return port_id from a fixed address."""
zone = 'compute:%s' % instance.availability_zone
search_opts = {'device_id': instance.uuid,
'device_owner': zone}
data = client.list_ports(**search_opts)
ports = data['ports']
port_id = None
for p in ports:
for ip in p['fixed_ips']:
if ip['ip_address'] == address:
port_id = p['id']
break
if not port_id:
raise exception.FixedIpNotFoundForAddress(address=address)
return port_id
@refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associate a floating IP with a fixed IP."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = get_client(context)
port_id = self._get_port_id_by_fixed_address(client, instance,
fixed_address)
fip = self._get_floating_ip_by_address(client, floating_address)
param = {'port_id': port_id,
'fixed_ip_address': fixed_address}
try:
client.update_floatingip(fip['id'], {'floatingip': param})
except neutron_client_exc.Conflict as e:
raise exception.FloatingIpAssociateFailed(str(e))
# If the floating IP was associated with another server, try to refresh
# the cache for that instance to avoid a window of time where multiple
# servers in the API say they are using the same floating IP.
if fip['port_id']:
# Trap and log any errors from
# _update_inst_info_cache_for_disassociated_fip but not let them
# raise back up to the caller since this refresh is best effort.
try:
self._update_inst_info_cache_for_disassociated_fip(
context, instance, client, fip)
except Exception as e:
LOG.warning('An error occurred while trying to refresh the '
'network info cache for an instance associated '
'with port %s. Error: %s', fip['port_id'], e)
def _update_inst_info_cache_for_disassociated_fip(self, context,
instance, client, fip):
"""Update the network info cache when a floating IP is re-assigned.
:param context: nova auth RequestContext
:param instance: The instance to which the floating IP is now assigned
:param client: ClientWrapper instance for using the Neutron API
:param fip: dict for the floating IP that was re-assigned where the
the ``port_id`` value represents the port that was
associated with another server.
"""
port = self._show_port(context, fip['port_id'],
neutron_client=client)
orig_instance_uuid = port['device_id']
msg_dict = dict(address=fip['floating_ip_address'],
instance_id=orig_instance_uuid)
LOG.info('re-assign floating IP %(address)s from '
'instance %(instance_id)s', msg_dict,
instance=instance)
orig_instance = self._get_instance_by_uuid_using_api_db(
context, orig_instance_uuid)
if orig_instance:
# purge cached nw info for the original instance; pass the
# context from the instance in case we found it in another cell
update_instance_cache_with_nw_info(
self, orig_instance._context, orig_instance)
else:
# Leave a breadcrumb about not being able to refresh the
# the cache for the original instance.
LOG.info('Unable to refresh the network info cache for '
'instance %s after disassociating floating IP %s. '
'If the instance still exists, its info cache may '
'be healed automatically.',
orig_instance_uuid, fip['id'])
@staticmethod
def _get_instance_by_uuid_using_api_db(context, instance_uuid):
"""Look up the instance by UUID
This method is meant to be used sparingly since it tries to find
the instance by UUID in the cell-targeted context. If the instance
is not found, this method will try to determine if it's not found
because it is deleted or if it is just in another cell. Therefore
it assumes to have access to the API database and should only be
called from methods that are used in the control plane services.
:param context: cell-targeted nova auth RequestContext
:param instance_uuid: UUID of the instance to find
:returns: Instance object if the instance was found, else None.
"""
try:
return objects.Instance.get_by_uuid(context, instance_uuid)
except exception.InstanceNotFound:
# The instance could be deleted or it could be in another cell.
# To determine if its in another cell, check the instance
# mapping in the API DB.
try:
inst_map = objects.InstanceMapping.get_by_instance_uuid(
context, instance_uuid)
except exception.InstanceMappingNotFound:
# The instance is gone so just return.
return
# We have the instance mapping, look up the instance in the
# cell the instance is in.
with nova_context.target_cell(
context, inst_map.cell_mapping) as cctxt:
try:
return objects.Instance.get_by_uuid(cctxt, instance_uuid)
except exception.InstanceNotFound:
# Alright it's really gone.
return
def get_all(self, context):
"""Get all networks for client."""
client = get_client(context)
return client.list_networks().get('networks')
def get(self, context, network_uuid):
"""Get specific network for client."""
client = get_client(context)
try:
return client.show_network(network_uuid).get('network') or {}
except neutron_client_exc.NetworkNotFoundClient:
raise exception.NetworkNotFound(network_id=network_uuid)
def get_fixed_ip_by_address(self, context, address):
"""Return instance uuids given an address."""
uuid_maps = self._get_instance_uuids_by_ip(context, address)
if len(uuid_maps) == 1:
return uuid_maps[0]
elif not uuid_maps:
raise exception.FixedIpNotFoundForAddress(address=address)
else:
raise exception.FixedIpAssociatedWithMultipleInstances(
address=address)
def get_floating_ip(self, context, id):
"""Return floating IP object given the floating IP id."""
client = get_client(context)
try:
fip = client.show_floatingip(id)['floatingip']
except neutron_client_exc.NeutronClientException as e:
if e.status_code == 404:
raise exception.FloatingIpNotFound(id=id)
with excutils.save_and_reraise_exception():
LOG.exception('Unable to access floating IP %s', id)
# retrieve and cache the network details now since many callers need
# the network name which isn't present in the response from neutron
network_uuid = fip['floating_network_id']
try:
fip['network_details'] = client.show_network(
network_uuid)['network']
except neutron_client_exc.NetworkNotFoundClient:
raise exception.NetworkNotFound(network_id=network_uuid)
# ...and retrieve the port details for the same reason, but only if
# they're not already there because the fip-port-details extension is
# present
if not self.has_fip_port_details_extension(client=client):
port_id = fip['port_id']
try:
fip['port_details'] = client.show_port(
port_id)['port']
except neutron_client_exc.PortNotFoundClient:
# it's possible to create floating IPs without a port
fip['port_details'] = None
return fip
def get_floating_ip_by_address(self, context, address):
"""Return a floating IP given an address."""
client = get_client(context)
fip = self._get_floating_ip_by_address(client, address)
# retrieve and cache the network details now since many callers need
# the network name which isn't present in the response from neutron
network_uuid = fip['floating_network_id']
try:
fip['network_details'] = client.show_network(
network_uuid)['network']
except neutron_client_exc.NetworkNotFoundClient:
raise exception.NetworkNotFound(network_id=network_uuid)
# ...and retrieve the port details for the same reason, but only if
# they're not already there because the fip-port-details extension is
# present
if not self.has_fip_port_details_extension(client=client):
port_id = fip['port_id']
try:
fip['port_details'] = client.show_port(
port_id)['port']
except neutron_client_exc.PortNotFoundClient:
# it's possible to create floating IPs without a port
fip['port_details'] = None
return fip
def get_floating_ip_pools(self, context):
"""Return floating IP pools a.k.a. external networks."""
client = get_client(context)
data = client.list_networks(**{constants.NET_EXTERNAL: True})
return data['networks']
def get_floating_ips_by_project(self, context):
client = get_client(context)
project_id = context.project_id
fips = self._safe_get_floating_ips(client, tenant_id=project_id)
if not fips:
return fips
# retrieve and cache the network details now since many callers need
# the network name which isn't present in the response from neutron
networks = {net['id']: net for net in self._get_available_networks(
context, project_id, [fip['floating_network_id'] for fip in fips],
client)}
for fip in fips:
network_uuid = fip['floating_network_id']
if network_uuid not in networks:
raise exception.NetworkNotFound(network_id=network_uuid)
fip['network_details'] = networks[network_uuid]
# ...and retrieve the port details for the same reason, but only if
# they're not already there because the fip-port-details extension is
# present
if not self.has_fip_port_details_extension(client=client):
ports = {port['id']: port for port in client.list_ports(
**{'tenant_id': project_id})['ports']}
for fip in fips:
port_id = fip['port_id']
if port_id in ports:
fip['port_details'] = ports[port_id]
else:
# it's possible to create floating IPs without a port
fip['port_details'] = None
return fips
def get_instance_id_by_floating_address(self, context, address):
"""Return the instance id a floating IP's fixed IP is allocated to."""
client = get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
return None
try:
port = self._show_port(context, fip['port_id'],
neutron_client=client)
except exception.PortNotFound:
# NOTE: Here is a potential race condition between _show_port() and
# _get_floating_ip_by_address(). fip['port_id'] shows a port which
# is the server instance's. At _get_floating_ip_by_address(),
# Neutron returns the list which includes the instance. Just after
# that, the deletion of the instance happens and Neutron returns
# 404 on _show_port().
LOG.debug('The port(%s) is not found', fip['port_id'])
return None
return port['device_id']
def get_vifs_by_instance(self, context, instance):
return objects.VirtualInterfaceList.get_by_instance_uuid(context,
instance.uuid)
def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
search_opts = {constants.NET_EXTERNAL: True, 'fields': 'id'}
if uuidutils.is_uuid_like(name_or_id):
search_opts.update({'id': name_or_id})
else:
search_opts.update({'name': name_or_id})
data = client.list_networks(**search_opts)
nets = data['networks']
if len(nets) == 1:
return nets[0]['id']
elif len(nets) == 0:
raise exception.FloatingIpPoolNotFound()
else:
msg = (_("Multiple floating IP pools matches found for name '%s'")
% name_or_id)
raise exception.NovaException(message=msg)
def allocate_floating_ip(self, context, pool=None):
"""Add a floating IP to a project from a pool."""
client = get_client(context)
pool = pool or CONF.neutron.default_floating_pool
pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool)
param = {'floatingip': {'floating_network_id': pool_id}}
try:
fip = client.create_floatingip(param)
except (neutron_client_exc.IpAddressGenerationFailureClient,
neutron_client_exc.ExternalIpAddressExhaustedClient) as e:
raise exception.NoMoreFloatingIps(str(e))
except neutron_client_exc.OverQuotaClient as e:
raise exception.FloatingIpLimitExceeded(str(e))
except neutron_client_exc.BadRequest as e:
raise exception.FloatingIpBadRequest(str(e))
return fip['floatingip']['floating_ip_address']
def _safe_get_floating_ips(self, client, **kwargs):
"""Get floating IP gracefully handling 404 from Neutron."""
try:
return client.list_floatingips(**kwargs)['floatingips']
# If a neutron plugin does not implement the L3 API a 404 from
# list_floatingips will be raised.
except neutron_client_exc.NotFound:
return []
except neutron_client_exc.NeutronClientException as e:
# bug/1513879 neutron client is currently using
# NeutronClientException when there is no L3 API
if e.status_code == 404:
return []
with excutils.save_and_reraise_exception():
LOG.exception('Unable to access floating IP for %s',
', '.join(['%s %s' % (k, v)
for k, v in kwargs.items()]))
def _get_floating_ip_by_address(self, client, address):
"""Get floating IP from floating IP address."""
if not address:
raise exception.FloatingIpNotFoundForAddress(address=address)
fips = self._safe_get_floating_ips(client, floating_ip_address=address)
if len(fips) == 0:
raise exception.FloatingIpNotFoundForAddress(address=address)
elif len(fips) > 1:
raise exception.FloatingIpMultipleFoundForAddress(address=address)
return fips[0]
def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port):
"""Get floating IPs from fixed IP and port."""
return self._safe_get_floating_ips(client, fixed_ip_address=fixed_ip,
port_id=port)
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Remove a floating IP with the given address from a project."""
# Note(amotoki): We cannot handle a case where multiple pools
# have overlapping IP address range. In this case we cannot use
# 'address' as a unique key.
# This is a limitation of the current nova.
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
self._release_floating_ip(context, address)
def disassociate_and_release_floating_ip(self, context, instance,
floating_ip):
"""Removes (deallocates) and deletes the floating IP.
This api call was added to allow this to be done in one operation
if using neutron.
"""
@refresh_cache
def _release_floating_ip_and_refresh_cache(self, context, instance,
floating_ip):
self._release_floating_ip(
context, floating_ip['floating_ip_address'],
raise_if_associated=False)
if instance:
_release_floating_ip_and_refresh_cache(self, context, instance,
floating_ip)
else:
self._release_floating_ip(
context, floating_ip['floating_ip_address'],
raise_if_associated=False)
def _release_floating_ip(self, context, address,
raise_if_associated=True):
client = get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if raise_if_associated and fip['port_id']:
raise exception.FloatingIpAssociated(address=address)
try:
client.delete_floatingip(fip['id'])
except neutron_client_exc.NotFound:
raise exception.FloatingIpNotFoundForAddress(
address=address
)
@refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociate a floating IP from the instance."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = get_client(context)
fip = self._get_floating_ip_by_address(client, address)
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance.
If the instance has port bindings on the destination compute host,
they are activated in this method which will atomically change the
source compute host port binding to inactive and also change the port
"binding:host_id" attribute to the destination host.
If there are no binding resources for the attached ports on the given
destination host, this method is a no-op.
:param context: The user request context.
:param instance: The instance being migrated.
:param migration: dict with required keys::
"source_compute": The name of the source compute host.
"dest_compute": The name of the destination compute host.
:raises: nova.exception.PortBindingActivationFailed if any port binding
activation fails
"""
if not self.has_port_binding_extension(context):
# If neutron isn't new enough yet for the port "binding-extended"
# API extension, we just no-op. The port binding host will be
# be updated in migrate_instance_finish, which is functionally OK,
# it's just not optimal.
LOG.debug('Neutron is not new enough to perform early destination '
'host port binding activation. Port bindings will be '
'updated later.', instance=instance)
return
client = get_client(context, admin=True)
dest_host = migration['dest_compute']
for vif in instance.get_network_info():
# Not all compute migration flows use the port binding-extended
# API yet, so first check to see if there is a binding for the
# port and destination host.
try:
binding = client.show_port_binding(
vif['id'], dest_host
)['binding']
except neutron_client_exc.NeutronClientException as exc:
if exc.status_code != 404:
# We don't raise an exception here because we assume that
# port bindings will be updated correctly when
# migrate_instance_finish runs
LOG.error(
'Unexpected error trying to get binding info '
'for port %s and destination host %s.',
vif['id'], dest_host, exc_info=True)
continue
# ...but if there is no port binding record for the destination
# host, we can safely assume none of the ports attached to the
# instance are using the binding-extended API in this flow and
# exit early.
return
if binding['status'] == 'ACTIVE':
# We might be racing with another thread that's handling
# post-migrate operations and already activated the port
# binding for the destination host.
LOG.debug(
'Port %s binding to destination host %s is already ACTIVE',
vif['id'], dest_host, instance=instance)
continue
try:
# This is a bit weird in that we don't PUT and update the
# status to ACTIVE, it's more like a POST action method in the
# compute API.
client.activate_port_binding(vif['id'], dest_host)
LOG.debug(
'Activated binding for port %s and host %s',
vif['id'], dest_host)
except neutron_client_exc.NeutronClientException as exc:
# A 409 means the port binding is already active, which
# shouldn't happen if the caller is doing things in the correct
# order.
if exc.status_code == 409:
LOG.warning(
'Binding for port %s and host %s is already active',
vif['id'], dest_host, exc_info=True)
continue
# Log the details, raise an exception.
LOG.error(
'Unexpected error trying to activate binding '
'for port %s and host %s.',
vif['id'], dest_host, exc_info=True)
raise exception.PortBindingActivationFailed(
port_id=vif['id'], host=dest_host)
# TODO(mriedem): Do we need to call
# _clear_migration_port_profile? migrate_instance_finish
# would normally take care of clearing the "migrating_to"
# attribute on each port when updating the port's
# binding:host_id to point to the destination host.
def migrate_instance_finish(
self, context, instance, migration, provider_mappings):
"""Finish migrating the network of an instance.
:param context: nova auth request context
:param instance: Instance object being migrated
:param migration: Migration object for the operation; used to determine
the phase of the migration which dictates what to do with claimed
PCI devices for SR-IOV ports
:param provider_mappings: a dict of list of resource provider uuids
keyed by port uuid
"""
self._update_port_binding_for_instance(
context, instance, migration.dest_compute, migration=migration,
provider_mappings=provider_mappings)
def _nw_info_get_ips(self, client, port):
network_IPs = []
for fixed_ip in port['fixed_ips']:
fixed = network_model.FixedIP(address=fixed_ip['ip_address'])
floats = self._get_floating_ips_by_fixed_and_port(
client, fixed_ip['ip_address'], port['id'])
for ip in floats:
fip = network_model.IP(address=ip['floating_ip_address'],
type='floating')
fixed.add_floating_ip(fip)
network_IPs.append(fixed)
return network_IPs
def _nw_info_get_subnets(self, context, port, network_IPs, client=None):
subnets = self._get_subnets_from_port(context, port, client)
for subnet in subnets:
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
return subnets
def _nw_info_build_network(self, context, port, networks, subnets):
# TODO(stephenfin): Pass in an existing admin client if available.
neutron = get_client(context, admin=True)
network_name = None
network_mtu = None
for net in networks:
if port['network_id'] == net['id']:
network_name = net['name']
tenant_id = net['tenant_id']
network_mtu = net.get('mtu')
break
else:
tenant_id = port['tenant_id']
LOG.warning("Network %(id)s not matched with the tenants "
"network! The ports tenant %(tenant_id)s will be "
"used.",
{'id': port['network_id'], 'tenant_id': tenant_id})
bridge = None
ovs_interfaceid = None
# Network model metadata
should_create_bridge = None
vif_type = port.get('binding:vif_type')
port_details = port.get('binding:vif_details', {})
if vif_type in [network_model.VIF_TYPE_OVS,
network_model.VIF_TYPE_AGILIO_OVS]:
bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
CONF.neutron.ovs_bridge)
ovs_interfaceid = port['id']
elif vif_type == network_model.VIF_TYPE_BRIDGE:
bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
"brq" + port['network_id'])
should_create_bridge = True
elif vif_type == network_model.VIF_TYPE_DVS:
# The name of the DVS port group will contain the neutron
# network id
bridge = port['network_id']
elif (vif_type == network_model.VIF_TYPE_VHOSTUSER and
port_details.get(network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)):
bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
CONF.neutron.ovs_bridge)
ovs_interfaceid = port['id']
elif (vif_type == network_model.VIF_TYPE_VHOSTUSER and
port_details.get(network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG,
False)):
bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
"brq" + port['network_id'])
# Prune the bridge name if necessary. For the DVS this is not done
# as the bridge is a '<network-name>-<network-UUID>'.
if bridge is not None and vif_type != network_model.VIF_TYPE_DVS:
bridge = bridge[:network_model.NIC_NAME_LEN]
physnet, tunneled = self._get_physnet_tunneled_info(
context, neutron, port['network_id'])
network = network_model.Network(
id=port['network_id'],
bridge=bridge,
injected=CONF.flat_injected,
label=network_name,
tenant_id=tenant_id,
mtu=network_mtu,
physical_network=physnet,
tunneled=tunneled
)
network['subnets'] = subnets
if should_create_bridge is not None:
network['should_create_bridge'] = should_create_bridge
return network, ovs_interfaceid
def _get_preexisting_port_ids(self, instance):
"""Retrieve the preexisting ports associated with the given instance.
These ports were not created by nova and hence should not be
deallocated upon instance deletion.
"""
net_info = instance.get_network_info()
if not net_info:
LOG.debug('Instance cache missing network info.',
instance=instance)
return [vif['id'] for vif in net_info
if vif.get('preserve_on_delete')]
def _build_vif_model(self, context, client, current_neutron_port,
networks, preexisting_port_ids):
"""Builds a ``nova.network.model.VIF`` object based on the parameters
and current state of the port in Neutron.
:param context: Request context.
:param client: Neutron client.
:param current_neutron_port: The current state of a Neutron port
from which to build the VIF object model.
:param networks: List of dicts which represent Neutron networks
associated with the ports currently attached to a given server
instance.
:param preexisting_port_ids: List of IDs of ports attached to a
given server instance which Nova did not create and therefore
should not delete when the port is detached from the server.
:return: nova.network.model.VIF object which represents a port in the
instance network info cache.
"""
vif_active = False
if (current_neutron_port['admin_state_up'] is False or
current_neutron_port['status'] == 'ACTIVE'):
vif_active = True
network_IPs = self._nw_info_get_ips(client,
current_neutron_port)
subnets = self._nw_info_get_subnets(context,
current_neutron_port,
network_IPs, client)
devname = "tap" + current_neutron_port['id']
devname = devname[:network_model.NIC_NAME_LEN]
network, ovs_interfaceid = (
self._nw_info_build_network(context, current_neutron_port,
networks, subnets))
preserve_on_delete = (current_neutron_port['id'] in
preexisting_port_ids)
return network_model.VIF(
id=current_neutron_port['id'],
address=current_neutron_port['mac_address'],
network=network,
vnic_type=current_neutron_port.get('binding:vnic_type',
network_model.VNIC_TYPE_NORMAL),
type=current_neutron_port.get('binding:vif_type'),
profile=get_binding_profile(current_neutron_port),
details=current_neutron_port.get('binding:vif_details'),
ovs_interfaceid=ovs_interfaceid,
devname=devname,
active=vif_active,
preserve_on_delete=preserve_on_delete,
delegate_create=True,
)
def _build_network_info_model(self, context, instance, networks=None,
port_ids=None, admin_client=None,
preexisting_port_ids=None,
refresh_vif_id=None, force_refresh=False):
"""Return list of ordered VIFs attached to instance.
:param context: Request context.
:param instance: Instance we are returning network info for.
:param networks: List of networks being attached to an instance.
If value is None this value will be populated
from the existing cached value.
:param port_ids: List of port_ids that are being attached to an
instance in order of attachment. If value is None
this value will be populated from the existing
cached value.
:param admin_client: A neutron client for the admin context.
:param preexisting_port_ids: List of port_ids that nova didn't
allocate and there shouldn't be deleted when
an instance is de-allocated. Supplied list will
be added to the cached list of preexisting port
IDs for this instance.
:param refresh_vif_id: Optional port ID to refresh within the existing
cache rather than the entire cache. This can be
triggered via a "network-changed" server external event
from Neutron.
:param force_refresh: If ``networks`` and ``port_ids`` are both None,
by default the instance.info_cache will be used to
populate the network info. Pass ``True`` to force
collection of ports and networks from neutron directly.
"""
search_opts = {'tenant_id': instance.project_id,
'device_id': instance.uuid, }
if admin_client is None:
client = get_client(context, admin=True)
else:
client = admin_client
data = client.list_ports(**search_opts)
current_neutron_ports = data.get('ports', [])
if preexisting_port_ids is None:
preexisting_port_ids = []
preexisting_port_ids = set(
preexisting_port_ids + self._get_preexisting_port_ids(instance))
current_neutron_port_map = {}
for current_neutron_port in current_neutron_ports:
current_neutron_port_map[current_neutron_port['id']] = (
current_neutron_port)
# Figure out what kind of operation we're processing. If we're given
# a single port to refresh then we try to optimize and update just the
# information for that VIF in the existing cache rather than try to
# rebuild the entire thing.
if refresh_vif_id is not None:
# TODO(mriedem): Consider pulling this out into it's own method.
nw_info = instance.get_network_info()
if nw_info:
current_neutron_port = current_neutron_port_map.get(
refresh_vif_id)
if current_neutron_port:
# Get the network for the port.
networks = self._get_available_networks(
context, instance.project_id,
[current_neutron_port['network_id']], client)
# Build the VIF model given the latest port information.
refreshed_vif = self._build_vif_model(
context, client, current_neutron_port, networks,
preexisting_port_ids)
for index, vif in enumerate(nw_info):
if vif['id'] == refresh_vif_id:
# Update the existing entry.
nw_info[index] = refreshed_vif
LOG.debug('Updated VIF entry in instance network '
'info cache for port %s.',
refresh_vif_id, instance=instance)
break
else:
# If it wasn't in the existing cache, add it.
nw_info.append(refreshed_vif)
LOG.debug('Added VIF to instance network info cache '
'for port %s.', refresh_vif_id,
instance=instance)
else:
# This port is no longer associated with the instance, so
# simply remove it from the nw_info cache.
for index, vif in enumerate(nw_info):
if vif['id'] == refresh_vif_id:
LOG.info('Port %s from network info_cache is no '
'longer associated with instance in '
'Neutron. Removing from network '
'info_cache.', refresh_vif_id,
instance=instance)
del nw_info[index]
break
return nw_info
# else there is no existing cache and we need to build it
# Determine if we're doing a full refresh (_heal_instance_info_cache)
# or if we are refreshing because we have attached/detached a port.
# TODO(mriedem); we should leverage refresh_vif_id in the latter case
# since we are unnecessarily rebuilding the entire cache for one port
nw_info_refresh = networks is None and port_ids is None
if nw_info_refresh and force_refresh:
# Use the current set of ports from neutron rather than the cache.
port_ids = self._get_ordered_port_list(context, instance,
current_neutron_ports)
net_ids = [
current_neutron_port_map.get(port_id, {}).get('network_id')
for port_id in port_ids]
# This is copied from _gather_port_ids_and_networks.
networks = self._get_available_networks(
context, instance.project_id, net_ids, client)
else:
# We are refreshing the full cache using the existing cache rather
# than what is currently in neutron.
networks, port_ids = self._gather_port_ids_and_networks(
context, instance, networks, port_ids, client)
nw_info = network_model.NetworkInfo()
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
if current_neutron_port:
vif = self._build_vif_model(
context, client, current_neutron_port, networks,
preexisting_port_ids)
nw_info.append(vif)
elif nw_info_refresh:
LOG.info('Port %s from network info_cache is no '
'longer associated with instance in Neutron. '
'Removing from network info_cache.', port_id,
instance=instance)
return nw_info
def _get_ordered_port_list(self, context, instance, current_neutron_ports):
"""Returns ordered port list using nova virtual_interface data."""
# a dict, keyed by port UUID, of the port's "index"
# so that we can order the returned port UUIDs by the
# original insertion order followed by any newly-attached
# ports
port_uuid_to_index_map = {}
port_order_list = []
ports_without_order = []
# Get set of ports from nova vifs
vifs = self.get_vifs_by_instance(context, instance)
for port in current_neutron_ports:
# NOTE(mjozefcz): For each port check if we have its index from
# nova virtual_interfaces objects. If not - it seems
# to be a new port - add it at the end of list.
# Find port index if it was attached before.
for vif in vifs:
if vif.uuid == port['id']:
port_uuid_to_index_map[port['id']] = vif.id
break
if port['id'] not in port_uuid_to_index_map:
# Assume that it's new port and add it to the end of port list.
ports_without_order.append(port['id'])
# Lets sort created port order_list by given index.
port_order_list = sorted(port_uuid_to_index_map,
key=lambda k: port_uuid_to_index_map[k])
# Add ports without order to the end of list
port_order_list.extend(ports_without_order)
return port_order_list
def _get_subnets_from_port(self, context, port, client=None):
"""Return the subnets for a given port."""
fixed_ips = port['fixed_ips']
# No fixed_ips for the port means there is no subnet associated
# with the network the port is created on.
# Since list_subnets(id=[]) returns all subnets visible for the
# current tenant, returned subnets may contain subnets which is not
# related to the port. To avoid this, the method returns here.
if not fixed_ips:
return []
if not client:
client = get_client(context)
search_opts = {'id': list(set(ip['subnet_id'] for ip in fixed_ips))}
data = client.list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
subnets = []
for subnet in ipam_subnets:
subnet_dict = {'cidr': subnet['cidr'],
'gateway': network_model.IP(
address=subnet['gateway_ip'],
type='gateway'),
}
if subnet.get('ipv6_address_mode'):
subnet_dict['ipv6_address_mode'] = subnet['ipv6_address_mode']
# attempt to populate DHCP server field
dhcp_search_opts = {
'network_id': subnet['network_id'],
'device_owner': 'network:dhcp'}
data = client.list_ports(**dhcp_search_opts)
dhcp_ports = data.get('ports', [])
for p in dhcp_ports:
for ip_pair in p['fixed_ips']:
if ip_pair['subnet_id'] == subnet['id']:
subnet_dict['dhcp_server'] = ip_pair['ip_address']
break
# NOTE(arnaudmorin): If enable_dhcp is set on subnet, but, for
# some reason neutron did not have any DHCP port yet, we still
# want the network_info to be populated with a valid dhcp_server
# value. This is mostly useful for the metadata API (which is
# relying on this value to give network_data to the instance).
#
# This will also help some providers which are using external
# DHCP servers not handled by neutron.
# In this case, neutron will never create any DHCP port in the
# subnet.
#
# Also note that we cannot set the value to None because then the
# value would be discarded by the metadata API.
# So the subnet gateway will be used as fallback.
if subnet.get('enable_dhcp') and 'dhcp_server' not in subnet_dict:
subnet_dict['dhcp_server'] = subnet['gateway_ip']
subnet_object = network_model.Subnet(**subnet_dict)
for dns in subnet.get('dns_nameservers', []):
subnet_object.add_dns(
network_model.IP(address=dns, type='dns'))
for route in subnet.get('host_routes', []):
subnet_object.add_route(
network_model.Route(cidr=route['destination'],
gateway=network_model.IP(
address=route['nexthop'],
type='gateway')))
subnets.append(subnet_object)
return subnets
def setup_instance_network_on_host(
self, context, instance, host, migration=None,
provider_mappings=None):
"""Setup network for specified instance on host.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param host: The host which network should be setup for instance.
:param migration: The migration object if the instance is being
tracked with a migration.
:param provider_mappings: a dict of lists of resource provider uuids
keyed by port uuid
"""
self._update_port_binding_for_instance(
context, instance, host, migration, provider_mappings)
def cleanup_instance_network_on_host(self, context, instance, host):
"""Cleanup network for specified instance on host.
Port bindings for the given host are deleted. The ports associated
with the instance via the port device_id field are left intact.
:param context: The user request context.
:param instance: Instance object with the associated ports
:param host: host from which to delete port bindings
:raises: PortBindingDeletionFailed if port binding deletion fails.
"""
# First check to see if the port binding extension is supported.
client = get_client(context)
if not self.has_port_binding_extension(client=client):
LOG.info("Neutron extension '%s' is not supported; not cleaning "
"up port bindings for host %s.",
constants.PORT_BINDING_EXTENDED, host, instance=instance)
return
# Now get the ports associated with the instance. We go directly to
# neutron rather than rely on the info cache just like
# setup_networks_on_host.
search_opts = {'device_id': instance.uuid,
'tenant_id': instance.project_id,
'fields': ['id']} # we only need the port id
data = self.list_ports(context, **search_opts)
self._delete_port_bindings(context, data['ports'], host)
def _get_pci_mapping_for_migration(self, instance, migration):
if not instance.migration_context:
return {}
# In case of revert, swap old and new devices to
# update the ports back to the original devices.
revert = (migration and
migration.get('status') == 'reverted')
return instance.migration_context.get_pci_mapping_for_migration(revert)
def _get_port_pci_slot(self, context, instance, port):
"""Find the PCI address of the device corresponding to the port.
Assumes the port is an SRIOV one.
:param context: The request context.
:param instance: The instance to which the port is attached.
:param port: The Neutron port, as obtained from the Neutron API
JSON form.
:return: The PCI address as a string, or None if unable to find.
"""
# Find the port's PCIRequest, or return None
for r in instance.pci_requests.requests:
if r.requester_id == port['id']:
request = r
break
else:
LOG.debug('No PCI request found for port %s', port['id'],
instance=instance)
return None
# Find the request's device, or return None
for d in instance.pci_devices:
if d.request_id == request.request_id:
device = d
break
else:
LOG.debug('No PCI device found for request %s',
request.request_id, instance=instance)
return None
# Return the device's PCI address
return device.address
def _update_port_binding_for_instance(
self, context, instance, host, migration=None,
provider_mappings=None):
neutron = get_client(context, admin=True)
search_opts = {'device_id': instance.uuid,
'tenant_id': instance.project_id}
data = neutron.list_ports(**search_opts)
port_updates = []
ports = data['ports']
FAILED_VIF_TYPES = (network_model.VIF_TYPE_UNBOUND,
network_model.VIF_TYPE_BINDING_FAILED)
for p in ports:
updates = {}
binding_profile = get_binding_profile(p)
# We need to update the port binding if the host has changed or if
# the binding is clearly wrong due to previous lost messages.
vif_type = p.get('binding:vif_type')
if (p.get(constants.BINDING_HOST_ID) != host or
vif_type in FAILED_VIF_TYPES):
updates[constants.BINDING_HOST_ID] = host
# If the host changed, the AZ could have also changed so we
# need to update the device_owner.
updates['device_owner'] = (
'compute:%s' % instance.availability_zone)
# NOTE: Before updating the port binding make sure we
# remove the pre-migration status from the binding profile
if binding_profile.get(constants.MIGRATING_ATTR):
del binding_profile[constants.MIGRATING_ATTR]
updates[constants.BINDING_PROFILE] = binding_profile
# Update port with newly allocated PCI devices. Even if the
# resize is happening on the same host, a new PCI device can be
# allocated. Note that this only needs to happen if a migration
# is in progress such as in a resize / migrate. It is possible
# that this function is called without a migration object, such
# as in an unshelve operation.
vnic_type = p.get('binding:vnic_type')
if vnic_type in network_model.VNIC_TYPES_SRIOV:
# NOTE(artom) For migrations, update the binding profile from
# the migration object...
if migration is not None:
# NOTE(artom) ... except for live migrations, because the
# conductor has already done that whe calling
# bind_ports_to_host().
if not migration.is_live_migration:
pci_mapping = self._get_pci_mapping_for_migration(
instance, migration)
pci_slot = binding_profile.get('pci_slot')
new_dev = pci_mapping.get(pci_slot)
if new_dev:
binding_profile.update(
self._get_pci_device_profile(new_dev))
updates[
constants.BINDING_PROFILE] = binding_profile
else:
raise exception.PortUpdateFailed(port_id=p['id'],
reason=_("Unable to correlate PCI slot %s") %
pci_slot)
# NOTE(artom) If migration is None, this is an unshevle, and we
# need to figure out the pci_slot from the InstancePCIRequest
# and PciDevice objects.
else:
pci_slot = self._get_port_pci_slot(context, instance, p)
if pci_slot:
binding_profile.update({'pci_slot': pci_slot})
updates[constants.BINDING_PROFILE] = binding_profile
# NOTE(gibi): during live migration the conductor already sets the
# allocation key in the port binding. However during resize, cold
# migrate, evacuate and unshelve we have to set the binding here.
# Also note that during unshelve no migration object is created.
if self._has_resource_request(context, p, neutron) and (
migration is None or not migration.is_live_migration
):
if not provider_mappings:
# TODO(gibi): Remove this check when compute RPC API is
# bumped to 6.0
# NOTE(gibi): This should not happen as the API level
# minimum compute service version check ensures that the
# compute services already send the RequestSpec during
# the move operations between the source and the
# destination and the dest compute calculates the
# mapping based on that.
LOG.warning(
"Provider mappings are not available to the compute "
"service but are required for ports with a resource "
"request. If compute RPC API versions are pinned for "
"a rolling upgrade, you will need to retry this "
"operation once the RPC version is unpinned and the "
"nova-compute services are all upgraded.",
instance=instance)
raise exception.PortUpdateFailed(
port_id=p['id'],
reason=_(
"Provider mappings are not available to the "
"compute service but are required for ports with "
"a resource request."))
binding_profile[constants.ALLOCATION] = (
self._get_binding_profile_allocation(
context, p, neutron, provider_mappings))
updates[constants.BINDING_PROFILE] = binding_profile
port_updates.append((p['id'], updates))
# Avoid rolling back updates if we catch an error above.
# TODO(lbeliveau): Batch up the port updates in one neutron call.
for port_id, updates in port_updates:
if updates:
LOG.info("Updating port %(port)s with "
"attributes %(attributes)s",
{"port": port_id, "attributes": updates},
instance=instance)
try:
neutron.update_port(port_id, {'port': updates})
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Unable to update binding details "
"for port %s",
port_id, instance=instance)
def update_instance_vnic_index(self, context, instance, vif, index):
"""Update instance vnic index.
When the 'VNIC index' extension is supported this method will update
the vnic index of the instance on the port. An instance may have more
than one vnic.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param vif: The VIF in question.
:param index: The index on the instance for the VIF.
"""
neutron = get_client(context)
if self.has_vnic_index_extension(client=neutron):
port_req_body = {'port': {'vnic_index': index}}
try:
neutron.update_port(vif['id'], port_req_body)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Unable to update instance VNIC index '
'for port %s.',
vif['id'], instance=instance)
def get_segment_ids_for_network(
self,
context: nova.context.RequestContext,
network_id: str,
) -> ty.List[str]:
"""Query the segmentation ids for the given network.
:param context: The request context.
:param network_id: The UUID of the network to be queried.
:returns: The list of segment UUIDs of the network or an empty list if
either Segment extension isn't enabled in Neutron or if the network
isn't configured for routing.
"""
client = get_client(context)
if not self.has_segment_extension(client=client):
return []
try:
# NOTE(sbauza): We can't use list_segments() directly because the
# API is borked and returns both segments but also segmentation IDs
# of a provider network if any.
subnets = client.list_subnets(network_id=network_id,
fields='segment_id')['subnets']
except neutron_client_exc.NeutronClientException as e:
raise exception.InvalidRoutedNetworkConfiguration(
'Failed to get segment IDs for network %s' % network_id) from e
# The segment field of an unconfigured subnet could be None
return [subnet['segment_id'] for subnet in subnets
if subnet['segment_id'] is not None]
def get_segment_id_for_subnet(
self,
context: nova.context.RequestContext,
subnet_id: str,
) -> ty.Optional[str]:
"""Query the segmentation id for the given subnet.
:param context: The request context.
:param subnet_id: The UUID of the subnet to be queried.
:returns: The segment UUID of the subnet or None if either Segment
extension isn't enabled in Neutron or the provided subnet doesn't
have segments (if the related network isn't configured for routing)
"""
client = get_client(context)
if not self.has_segment_extension(client=client):
return None
try:
subnet = client.show_subnet(subnet_id)['subnet']
except neutron_client_exc.NeutronClientException as e:
raise exception.InvalidRoutedNetworkConfiguration(
'Subnet %s not found' % subnet_id) from e
return subnet.get('segment_id')
def _ensure_requested_network_ordering(accessor, unordered, preferred):
"""Sort a list with respect to the preferred network ordering."""
if preferred:
unordered.sort(key=lambda i: preferred.index(accessor(i)))
| openstack/nova | nova/network/neutron.py | Python | apache-2.0 | 185,923 |
from nupic.encoders import (LogEncoder, DateEncoder, MultiEncoder, ScalarEncoder)
from nupic.data import FunctionSource
from nupic.frameworks.prediction.callbacks import displaySPCoincidences, printSPCoincidences
from nupic.data.dictutils import DictObj
nCoincidences = 30
iterationCount = 10000
nRandomFields = 1
randomFieldWidth = 66
# Controls behavior of iteration callback
showSPCoincs = True
def generateFunction(info):
# This function needs to be self-contained so that it can work
# after de-serialization.
# These imports shouldn't be too slow after the first import
import datetime
import random
d = DictObj()
# Generate a random time in a one-month period
t = datetime.datetime.fromtimestamp(1289409426 + random.randint(0, 30*86000))
# Amount varies as follows:
# Most of the day, it has a 90% chance of being between 1 and 10.00
# and a 10% chance of being between 100 and 1000)
# between 8PM and 11PM, the probabilities are reversed
# p = probability of high value
p = 1.0
if 20 <= t.hour < 23:
p = 1.0 - p
if random.random() < p:
amount = random.randint(100, 1000)
else:
amount = random.randint(1, 10)
# Dictionary keys must match the names in the multiencoder
d["date"] = t
d["amount"] = amount
for i in xrange(info['nRandomFields']):
d["random%d" %i] = random.randint(0, info['randomFieldWidth'])
return d
def getBaseDatasets():
return dict()
def getDatasets(baseDatasets, generate=False):
return baseDatasets
def getDescription(datasets):
encoder = MultiEncoder()
encoder.addEncoder("date", DateEncoder(timeOfDay=3))
encoder.addEncoder("amount", LogEncoder(name="amount", maxval=1000))
for i in xrange(0, nRandomFields):
s = ScalarEncoder(name="scalar", minval=0, maxval=randomFieldWidth, resolution=1, w=3)
encoder.addEncoder("random%d" % i, s)
dataSource = FunctionSource(generateFunction, dict(nRandomFields=nRandomFields,
randomFieldWidth=randomFieldWidth))
inputShape = (1, encoder.getWidth())
# Layout the coincidences vertically stacked on top of each other, each
# looking at the entire input field.
coincidencesShape = (nCoincidences, 1)
# TODO: why do we need input border?
inputBorder = inputShape[1]/2
if inputBorder*2 >= inputShape[1]:
inputBorder -= 1
nodeParams = dict()
spParams = dict(
commonDistributions=0,
inputShape = inputShape,
inputBorder = inputBorder,
coincidencesShape = coincidencesShape,
coincInputRadius = inputShape[1]/2,
coincInputPoolPct = 0.75,
gaussianDist = 0,
localAreaDensity = 0.10,
# localAreaDensity = 0.04,
numActivePerInhArea = -1,
dutyCyclePeriod = 1000,
stimulusThreshold = 5,
synPermInactiveDec=0.08,
# synPermInactiveDec=0.02,
synPermActiveInc=0.02,
synPermActiveSharedDec=0.0,
synPermOrphanDec = 0.0,
minPctDutyCycleBeforeInh = 0.05,
# minPctDutyCycleAfterInh = 0.1,
# minPctDutyCycleBeforeInh = 0.05,
minPctDutyCycleAfterInh = 0.05,
# minPctDutyCycleAfterInh = 0.4,
seed = 1,
)
otherParams = dict(
disableTemporal=1,
trainingStep='spatial',
)
nodeParams.update(spParams)
nodeParams.update(otherParams)
def mySetupCallback(experiment):
print "Setup function called"
description = dict(
options = dict(
logOutputsDuringInference = False,
),
network = dict(
sensorDataSource = dataSource,
sensorEncoder = encoder,
CLAType = "py.CLARegion",
CLAParams = nodeParams,
classifierType = None,
classifierParams = None),
# step
spTrain = dict(
name="phase1",
setup=mySetupCallback,
iterationCount=5000,
#iter=displaySPCoincidences(100),
finish=printSPCoincidences()),
tpTrain = None, # same format as sptrain if non-empty
infer = None, # same format as sptrain if non-empty
)
return description
| chetan51/nupic | examples/prediction/experiments/generated_data/description.py | Python | gpl-3.0 | 4,064 |
from os import path
from enigma import iPlayableService, iServiceInformation, eTimer
from Screens.Screen import Screen
from Components.About import about
from Components.SystemInfo import SystemInfo
from Components.ConfigList import ConfigListScreen
from Components.config import config, configfile, getConfigListEntry
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Components.Pixmap import Pixmap
from Components.Sources.Boolean import Boolean
from Components.ServiceEventTracker import ServiceEventTracker
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from Tools.HardwareInfo import HardwareInfo
from Components.AVSwitch import iAVSwitch
resolutionlabel = None
class VideoSetup(Screen, ConfigListScreen):
def __init__(self, session):
Screen.__init__(self, session)
self.skinName = ["Setup" ]
self.setup_title = _("A/V settings")
self["HelpWindow"] = Pixmap()
self["HelpWindow"].hide()
self["VKeyIcon"] = Boolean(False)
self['footnote'] = Label()
self.hw = iAVSwitch
self.onChangedEntry = [ ]
# handle hotplug by re-creating setup
self.onShow.append(self.startHotplug)
self.onHide.append(self.stopHotplug)
self.list = [ ]
ConfigListScreen.__init__(self, self.list, session = session, on_change = self.changedEntry)
from Components.ActionMap import ActionMap
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.apply,
"menu": self.closeRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["description"] = Label("")
self.createSetup()
self.grabLastGoodMode()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def startHotplug(self):
self.hw.on_hotplug.append(self.createSetup)
def stopHotplug(self):
self.hw.on_hotplug.remove(self.createSetup)
def createSetup(self):
level = config.usage.setup_level.index
self.list = [
getConfigListEntry(_("Video output"), config.av.videoport, _("Configures which video output connector will be used."))
]
if config.av.videoport.value in ('HDMI', 'YPbPr', 'Scart-YPbPr') and not path.exists(resolveFilename(SCOPE_PLUGINS)+'SystemPlugins/AutoResolution'):
self.list.append(getConfigListEntry(_("Automatic resolution"), config.av.autores,_("If enabled the output resolution of the box will try to match the resolution of the video contents resolution")))
if config.av.autores.value in ('all', 'hd'):
self.list.append(getConfigListEntry(_("Delay time"), config.av.autores_delay,_("Set the time before checking video source for resolution infomation.")))
self.list.append(getConfigListEntry(_("Force de-interlace"), config.av.autores_deinterlace,_("If enabled the video will always be de-interlaced.")))
self.list.append(getConfigListEntry(_("Automatic resolution label"), config.av.autores_label_timeout,_("Allows you to adjust the amount of time the resolution infomation display on screen.")))
if config.av.autores.value in 'hd':
self.list.append(getConfigListEntry(_("Show SD as"), config.av.autores_sd,_("This option allows you to choose how to display standard defintion video on your TV.")))
self.list.append(getConfigListEntry(_("Show 720p 24fps as"), config.av.autores_720p24,_("This option allows you to choose how to display 720p 24Hz on your TV. (as not all TV's support these resolutions)")))
self.list.append(getConfigListEntry(_("Show 1080p 24fps as"), config.av.autores_1080p24,_("This option allows you to choose how to display 1080p 24Hz on your TV. (as not all TV's support these resolutions)")))
self.list.append(getConfigListEntry(_("Show 1080p 25fps as"), config.av.autores_1080p25,_("This option allows you to choose how to display 1080p 25Hz on your TV. (as not all TV's support these resolutions)")))
self.list.append(getConfigListEntry(_("Show 1080p 30fps as"), config.av.autores_1080p30,_("This option allows you to choose how to display 1080p 30Hz on your TV. (as not all TV's support these resolutions)")))
# if we have modes for this port:
if (config.av.videoport.value in config.av.videomode and config.av.autores.value == 'disabled') or config.av.videoport.value == 'Scart':
# add mode- and rate-selection:
self.list.append(getConfigListEntry(pgettext("Video output mode", "Mode"), config.av.videomode[config.av.videoport.value], _("This option configures the video output mode (or resolution).")))
if config.av.videomode[config.av.videoport.value].value == 'PC':
self.list.append(getConfigListEntry(_("Resolution"), config.av.videorate[config.av.videomode[config.av.videoport.value].value], _("This option configures the screen resolution in PC output mode.")))
elif config.av.videoport.value != 'Scart':
self.list.append(getConfigListEntry(_("Refresh rate"), config.av.videorate[config.av.videomode[config.av.videoport.value].value], _("Configure the refresh rate of the screen.")))
port = config.av.videoport.value
if port not in config.av.videomode:
mode = None
else:
mode = config.av.videomode[port].value
# some modes (720p, 1080i) are always widescreen. Don't let the user select something here, "auto" is not what he wants.
force_wide = self.hw.isWidescreenMode(port, mode)
# if not force_wide:
# self.list.append(getConfigListEntry(_("Aspect ratio"), config.av.aspect, _("Configure the aspect ratio of the screen.")))
if force_wide or config.av.aspect.value in ("16:9", "16:10"):
self.list.extend((
getConfigListEntry(_("Display 4:3 content as"), config.av.policy_43, _("When the content has an aspect ratio of 4:3, choose whether to scale/stretch the picture.")),
getConfigListEntry(_("Display >16:9 content as"), config.av.policy_169, _("When the content has an aspect ratio of 16:9, choose whether to scale/stretch the picture."))
))
elif config.av.aspect.value == "4:3":
self.list.append(getConfigListEntry(_("Display 16:9 content as"), config.av.policy_169, _("When the content has an aspect ratio of 16:9, choose whether to scale/stretch the picture.")))
# if config.av.videoport.value == "HDMI":
# self.list.append(getConfigListEntry(_("Allow unsupported modes"), config.av.edid_override))
if config.av.videoport.value == "Scart":
self.list.append(getConfigListEntry(_("Color format"), config.av.colorformat, _("Configure which color format should be used on the SCART output.")))
if level >= 1:
self.list.append(getConfigListEntry(_("WSS on 4:3"), config.av.wss, _("When enabled, content with an aspect ratio of 4:3 will be stretched to fit the screen.")))
if SystemInfo["ScartSwitch"]:
self.list.append(getConfigListEntry(_("Auto scart switching"), config.av.vcrswitch, _("When enabled, your receiver will detect activity on the VCR SCART input.")))
if level >= 1:
if SystemInfo["CanPcmMultichannel"]:
self.list.append(getConfigListEntry(_("PCM Multichannel"), config.av.pcm_multichannel, _("Choose whether multi channel sound tracks should be output as PCM.")))
if SystemInfo["CanDownmixAC3"]:
self.list.append(getConfigListEntry(_("Dolby Digital / DTS downmix"), config.av.downmix_ac3, _("Choose whether multi channel sound tracks should be downmixed to stereo.")))
if SystemInfo["CanDownmixAAC"]:
self.list.append(getConfigListEntry(_("AAC downmix"), config.av.downmix_aac, _("Choose whether multi channel sound tracks should be downmixed to stereo.")))
if SystemInfo["CanAACTranscode"]:
self.list.append(getConfigListEntry(_("AAC transcoding"), config.av.transcodeaac, _("Choose whether AAC sound tracks should be transcoded.")))
self.list.extend((
getConfigListEntry(_("General AC3 delay"), config.av.generalAC3delay, _("This option configures the general audio delay of Dolby Digital sound tracks.")),
getConfigListEntry(_("General PCM delay"), config.av.generalPCMdelay, _("This option configures the general audio delay of stereo sound tracks."))
))
if SystemInfo["Can3DSurround"]:
self.list.append(getConfigListEntry(_("3D Surround"), config.av.surround_3d,_("This option allows you to enable 3D Surround Sound.")))
if SystemInfo["Canedidchecking"]:
self.list.append(getConfigListEntry(_("Bypass HDMI EDID Check"), config.av.bypass_edid_checking,_("This option allows you to bypass HDMI EDID check")))
# if not isinstance(config.av.scaler_sharpness, ConfigNothing):
# self.list.append(getConfigListEntry(_("Scaler sharpness"), config.av.scaler_sharpness, _("This option configures the picture sharpness.")))
self["config"].list = self.list
self["config"].l.setList(self.list)
if config.usage.sort_settings.value:
self["config"].list.sort()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.createSetup()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.createSetup()
def confirm(self, confirmed):
if not confirmed:
config.av.videoport.setValue(self.last_good[0])
config.av.videomode[self.last_good[0]].setValue(self.last_good[1])
config.av.videorate[self.last_good[1]].setValue(self.last_good[2])
self.hw.setMode(*self.last_good)
else:
self.keySave()
def grabLastGoodMode(self):
port = config.av.videoport.value
mode = config.av.videomode[port].value
rate = config.av.videorate[mode].value
self.last_good = (port, mode, rate)
def saveAll(self):
if config.av.videoport.value == 'Scart':
config.av.autores.setValue('disabled')
for x in self["config"].list:
x[1].save()
configfile.save()
def apply(self):
port = config.av.videoport.value
mode = config.av.videomode[port].value
rate = config.av.videorate[mode].value
if (port, mode, rate) != self.last_good:
self.hw.setMode(port, mode, rate)
from Screens.MessageBox import MessageBox
self.session.openWithCallback(self.confirm, MessageBox, _("Is this video mode ok?"), MessageBox.TYPE_YESNO, timeout = 20, default = False)
else:
self.keySave()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def getCurrentDescription(self):
return self["config"].getCurrent() and len(self["config"].getCurrent()) > 2 and self["config"].getCurrent()[2] or ""
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
class AutoVideoModeLabel(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self["content"] = Label()
self["restxt"] = Label()
self.hideTimer = eTimer()
self.hideTimer.callback.append(self.hide)
self.onShow.append(self.hide_me)
def hide_me(self):
idx = config.av.autores_label_timeout.index
if idx:
idx += 4
self.hideTimer.start(idx*1000, True)
class AutoVideoMode(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evVideoSizeChanged: self.VideoChanged,
iPlayableService.evVideoProgressiveChanged: self.VideoChanged,
iPlayableService.evVideoFramerateChanged: self.VideoChanged,
iPlayableService.evBuffering: self.BufferInfo,
})
self.delay = False
self.bufferfull = True
self.detecttimer = eTimer()
self.detecttimer.callback.append(self.VideoChangeDetect)
def BufferInfo(self):
bufferInfo = self.session.nav.getCurrentService().streamed().getBufferCharge()
if bufferInfo[0] > 98:
self.bufferfull = True
self.VideoChanged()
else:
self.bufferfull = False
def VideoChanged(self):
if self.session.nav.getCurrentlyPlayingServiceReference() and not self.session.nav.getCurrentlyPlayingServiceReference().toString().startswith('4097:'):
delay = config.av.autores_delay.value
else:
delay = config.av.autores_delay.value * 2
if not self.detecttimer.isActive() and not self.delay:
self.delay = True
self.detecttimer.start(delay)
else:
self.delay = True
self.detecttimer.stop()
self.detecttimer.start(delay)
def VideoChangeDetect(self):
global resolutionlabel
config_port = config.av.videoport.value
config_mode = str(config.av.videomode[config_port].value).replace('\n','')
config_res = str(config.av.videomode[config_port].value[:-1]).replace('\n','')
config_pol = str(config.av.videomode[config_port].value[-1:]).replace('\n','')
config_rate = str(config.av.videorate[config_mode].value).replace('Hz','').replace('\n','')
f = open("/proc/stb/video/videomode")
current_mode = f.read()[:-1].replace('\n','')
f.close()
if current_mode.upper() in ('PAL', 'NTSC'):
current_mode = current_mode.upper()
current_pol = ''
if 'i' in current_mode:
current_pol = 'i'
elif 'p' in current_mode:
current_pol = 'p'
current_res = current_pol and current_mode.split(current_pol)[0].replace('\n','') or ""
current_rate = current_pol and current_mode.split(current_pol)[0].replace('\n','') and current_mode.split(current_pol)[1].replace('\n','') or ""
video_height = None
video_width = None
video_pol = None
video_rate = None
if path.exists("/proc/stb/vmpeg/0/yres"):
f = open("/proc/stb/vmpeg/0/yres", "r")
video_height = int(f.read(),16)
f.close()
if path.exists("/proc/stb/vmpeg/0/xres"):
f = open("/proc/stb/vmpeg/0/xres", "r")
video_width = int(f.read(),16)
f.close()
if path.exists("/proc/stb/vmpeg/0/progressive"):
f = open("/proc/stb/vmpeg/0/progressive", "r")
video_pol = "p" if int(f.read(),16) else "i"
f.close()
if path.exists("/proc/stb/vmpeg/0/framerate"):
f = open("/proc/stb/vmpeg/0/framerate", "r")
video_rate = int(f.read())
f.close()
if not video_height or not video_width or not video_pol or not video_rate:
service = self.session.nav.getCurrentService()
if service is not None:
info = service.info()
else:
info = None
if info:
video_height = int(info.getInfo(iServiceInformation.sVideoHeight))
video_width = int(info.getInfo(iServiceInformation.sVideoWidth))
video_pol = ("i", "p")[info.getInfo(iServiceInformation.sProgressive)]
video_rate = int(info.getInfo(iServiceInformation.sFrameRate))
if video_height and video_width and video_pol and video_rate:
resolutionlabel["content"].setText(_("Video content: %ix%i%s %iHz") % (video_width, video_height, video_pol, (video_rate + 500) / 1000))
if video_height != -1:
if video_height > 720 or video_width > 1280:
new_res = "1080"
elif (576 < video_height <= 720) or video_width > 1024:
new_res = "720"
elif (480 < video_height <= 576) or video_width > 720 or video_rate in (25000, 23976, 24000):
new_res = "576"
else:
new_res = "480"
else:
new_res = config_res
if video_rate != -1:
if video_rate == 25000 and video_pol == 'i':
new_rate = 50000
elif video_rate == 59940 or (video_rate == 29970 and video_pol == 'i') or (video_rate == 29970 and video_pol == 'p' and config.av.autores.value == 'disabled'):
new_rate = 60000
elif video_rate == 23976:
new_rate = 24000
elif video_rate == 29970:
new_rate = 30000
else:
new_rate = video_rate
new_rate = str((new_rate + 500) / 1000)
else:
new_rate = config_rate
if video_pol != -1:
new_pol = str(video_pol)
else:
new_pol = config_pol
write_mode = None
new_mode = None
if config_mode in ('PAL', 'NTSC'):
write_mode = config_mode
elif config.av.autores.value == 'all' or (config.av.autores.value == 'hd' and int(new_res) >= 720):
if (config.av.autores_deinterlace.value and HardwareInfo().is_nextgen()) or (config.av.autores_deinterlace.value and not HardwareInfo().is_nextgen() and int(new_res) <= 720):
new_pol = new_pol.replace('i','p')
if new_res+new_pol+new_rate in iAVSwitch.modes_available:
new_mode = new_res+new_pol+new_rate
if new_mode == '720p24':
new_mode = config.av.autores_720p24.value
if new_mode == '1080p24':
new_mode = config.av.autores_1080p24.value
if new_mode == '1080p25':
new_mode = config.av.autores_1080p25.value
if new_mode == '1080p30':
new_mode = config.av.autores_1080p30.value
elif new_res+new_pol in iAVSwitch.modes_available:
new_mode = new_res+new_pol
else:
write_mode = config_mode+new_rate
write_mode = new_mode
elif config.av.autores.value == 'hd' and int(new_res) <= 576:
if (config.av.autores_deinterlace.value and HardwareInfo().is_nextgen()) or (config.av.autores_deinterlace.value and not HardwareInfo().is_nextgen() and not config.av.autores_sd.value == '1080i'):
new_mode = config.av.autores_sd.value.replace('i','p')+new_rate
else:
if new_pol in 'p':
new_mode = config.av.autores_sd.value.replace('i','p')+new_rate
else:
new_mode = config.av.autores_sd.value+new_rate
if new_mode == '720p24':
new_mode = config.av.autores_720p24.value
if new_mode == '1080p24':
new_mode = config.av.autores_1080p24.value
if new_mode == '1080p25':
new_mode = config.av.autores_1080p25.value
if new_mode == '1080p30':
new_mode = config.av.autores_1080p30.value
write_mode = new_mode
else:
if path.exists('/proc/stb/video/videomode_%shz' % new_rate) and config_rate == 'multi':
f = open("/proc/stb/video/videomode_%shz" % new_rate, "r")
multi_videomode = f.read().replace('\n','')
f.close()
if multi_videomode and (current_mode != multi_videomode):
write_mode = multi_videomode
else:
write_mode = config_mode+new_rate
if write_mode and current_mode != write_mode and self.bufferfull:
resolutionlabel["restxt"].setText(_("Video mode: %s") % write_mode)
if config.av.autores.value != "disabled" and config.av.autores_label_timeout.value != '0':
resolutionlabel.show()
print "[VideoMode] setMode - port: %s, mode: %s" % (config_port, write_mode)
f = open("/proc/stb/video/videomode", "w")
f.write(write_mode)
f.close()
iAVSwitch.setAspect(config.av.aspect)
iAVSwitch.setWss(config.av.wss)
iAVSwitch.setPolicy43(config.av.policy_43)
iAVSwitch.setPolicy169(config.av.policy_169)
self.delay = False
self.detecttimer.stop()
def autostart(session):
global resolutionlabel
if not path.exists(resolveFilename(SCOPE_PLUGINS)+'SystemPlugins/AutoResolution'):
if resolutionlabel is None:
resolutionlabel = session.instantiateDialog(AutoVideoModeLabel)
AutoVideoMode(session)
else:
config.av.autores.setValue(False)
config.av.autores.save()
configfile.save()
| oe-alliance/oe-alliance-enigma2 | lib/python/Screens/VideoMode.py | Python | gpl-2.0 | 18,622 |
# -*- coding: utf-8 -*-
'''Public section, including homepage and signup.'''
from flask import (Blueprint, request, render_template, flash, url_for,
redirect, session, jsonify, redirect, request, current_app,
abort)
from flask.ext.login import login_user, login_required, logout_user
from translatorsdesk.extensions import login_manager
from translatorsdesk.user.models import User
from translatorsdesk.public.forms import LoginForm
from translatorsdesk.user.forms import RegisterForm
from translatorsdesk.utils import flash_errors
from translatorsdesk.database import db
import translatorsdesk.worker_functions as worker_functions
import polib
import datetime, uuid, os
blueprint = Blueprint('public', __name__, static_folder="../static")
from rq import Queue
from redis import Redis
#TO-DO : Change this to a redis pool
redis_conn = Redis()
q = Queue(connection=redis_conn)
@login_manager.user_loader
def load_user(id):
return User.get_by_id(int(id))
@blueprint.route("/", methods=["GET", "POST"])
def home():
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash("You are logged in.", 'success')
redirect_url = request.args.get("next") or url_for("user.members")
return redirect(redirect_url)
else:
flash_errors(form)
return render_template("public/home.html", form=form)
@blueprint.route('/logout/')
@login_required
def logout():
logout_user()
flash('You are logged out.', 'info')
return redirect(url_for('public.home'))
@blueprint.route("/register/", methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form, csrf_enabled=False)
if form.validate_on_submit():
new_user = User.create(username=form.username.data,
email=form.email.data,
password=form.password.data,
active=True)
flash("Thank you for registering. You can now log in.", 'success')
return redirect(url_for('public.home'))
else:
flash_errors(form)
return render_template('public/register.html', form=form)
@blueprint.route("/about/")
def about():
form = LoginForm(request.form)
return render_template("public/about.html", form=form)
"""
Handles file uploads
"""
@blueprint.route('/upload', methods=['POST'])
def upload():
if request.method == 'POST':
file = request.files.get('file', None)
raw_text = request.values.get("raw_text", None)
print file
print raw_text
if file:
if _allowed_file(file.filename):
_uuid = str(uuid.uuid4())
secure_filename = file.filename.replace('/', "_").replace('\\', '_')
filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], _uuid, secure_filename)
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
file.save(filepath)
else:
return jsonify({"success": False, "message": "File Type not supported yet!!"})
elif raw_text:
print "I am in raw_text"
_uuid = str(uuid.uuid4())
secure_filename = "raw_text.txt"
filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], _uuid, secure_filename)
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
print "Made dir"
f = open(filepath, 'w')
f.write(raw_text)
f.close()
print "made file"
if file or raw_text:
## Add Job to Queue
src = request.values.get("src", None)
tgt = request.values.get("tgt", None)
print src, tgt
if not (src and tgt):
return jsonify({"success": False, "message": "Source and Target Languages not specified!!"})
#CLEAN SRC AND TGT VAR
src = src.strip('\n').strip('\r').strip()
tgt = tgt.strip('\n').strip('\r').strip()
job = q.enqueue_call(func=worker_functions.process_input_file, args=(filepath, src, tgt))
return jsonify({"success":True, "filename":secure_filename, "uuid": _uuid })
else:
return jsonify({"success": False, "message": "Corrupt File"})
def _allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in current_app.config['ALLOWED_FILE_EXTENSIONS']
"""
Helper functions for Translate
"""
"""
Checks if a uid, fileName pair exists
"""
def fileExists(uid, fileName):
filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], uid, fileName)
if os.path.exists(filepath):
return True
else:
return False
"""
Checks if the XLIFF file for a uid, fileName pair exists
Note: This assumes that the uid and fileName pair exists
"""
def fileXLIFFExists(uid, fileName):
filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], uid, fileName+".xlf")
if os.path.exists(filepath):
return True
else:
return False
def returnFileData(uid, fileName):
filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], uid, fileName)
f = open(filepath, 'r')
data = f.read()
f.close()
return data
def get_redis_connection():
return Redis()
"""
Handles Computer Assisted Translation of a particular xliff file
"""
@blueprint.route('/translate/<uid>/<fileName>/', methods=['GET'])
def translate(uid, fileName):
##Check if the uid and filename exists
r_conn = get_redis_connection()
_status = r_conn.lrange("state_"+uid+"/"+fileName, 0, -1)
if len(_status) >0 and (_status[0]=="GENERATING_TRANSLATED_PO_FILE:::COMPLETE" or _status[0].startswith("OUTPUT_FILE_GENERATED") ) :
if fileExists(uid, fileName):
if(fileExists(uid, fileName+".po")):
po = polib.pofile(os.path.join(current_app.config['UPLOAD_FOLDER'], uid, fileName+".po"))
valid_entries = [e for e in po if not e.obsolete]
d = []
for entry in valid_entries:
if entry.msgid.strip() != "":
_tgt_lang = r_conn.lrange("lang_"+uid+"/"+fileName, 0, -1)
d.append({"src":entry.msgid,"tgt":entry.msgstr,"tgt_lang":_tgt_lang[0]})
r_conn = get_redis_connection()
_status = r_conn.lrange("state_"+uid+"/"+fileName, 0, -1)
return render_template('public/translate.html',\
fileName=fileName,
uid=uid,
status = _status,
PO = {'po':True, 'data':d}
)
else:
return abort(404)
else:
return abort(404)
else:
r_conn = get_redis_connection()
_status = r_conn.lrange("state_"+uid+"/"+fileName, 0, -1)
return render_template('public/translate.html',\
fileName=fileName,
uid=uid,
status=_status,
PO = False
)
import subprocess
@blueprint.route('/preview', methods=['POST'])
def preview():
data = request.json
fileName = data['fileName']
uid = data['uid']
po = polib.POFile()
for _d in data['data']:
_msgid = _d['src'].strip()
_msgstr = _d['tgt'].strip()
entry = polib.POEntry(
msgid=unicode(_msgid),
msgstr=unicode(_msgstr),
)
po.append(entry)
print data
po.save(os.path.join(current_app.config['UPLOAD_FOLDER'], uid, fileName+".updated.po"))
filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], uid, fileName)
job = q.enqueue_call(func=worker_functions.generateOutputFile, args=(filepath,))
return "#";
@blueprint.route('/status/<uid>/<fileName>', methods=['GET'])
def status(uid, fileName):
r_conn = get_redis_connection()
_status = r_conn.lrange("state_"+uid+"/"+fileName, 0, -1)
if len(_status) > 0 and _status[0].startswith("OUTPUT_FILE_GENERATED"):
return jsonify({'file':_status[0].split(":::")[-1], 'fileReady':True})
else:
return jsonify({'fileReady':False})
| ltrc/TranslatorsDesk | translatorsdesk/public/views.py | Python | bsd-3-clause | 8,582 |
# Project: PyUsbLamp
# Author: onelife
import sys
from six.moves.queue import Queue, Empty
from threading import Thread, Event
import usb.core
import usb.backend.libusb1
from usb.core import USBError
if sys.version_info >= (3,):
from .applog import AppLog
else:
from applog import AppLog
DEBUG = 0
STEPS = 32
logger = AppLog().get_logger(__name__)
logger.setLevel(DEBUG and AppLog.DEBUG or AppLog.INFO)
class USBLamp(object):
ID_VENDOR = 0x1d34
ID_PRODUCT = (0x000a, 0x0004)
RGB_MAX = 0x40
@staticmethod
def get_steps(start, end, steps):
if start == end:
x = []
else:
delta = int((end - start) / float(steps - 1))
if delta == 0:
delta = 1 if start < end else -1
x = list(range(start, end + 1, delta))
if len(x) >= steps:
x = x[:steps - 1]
x.append(end)
else:
x.extend([end] * (steps - len(x)))
return x
@staticmethod
def fading(lamp):
step = 0
direction = 1
idle = True
while True:
# check exit condition
if lamp.error.is_set():
break
# get task
try:
delay, from_color, to_color = lamp._task.get(block=idle)
if delay <= 0:
continue
from_color = tuple([max(0, min(lamp.RGB_MAX, c)) for c in from_color])
to_color = tuple([max(0, min(lamp.RGB_MAX, c)) for c in to_color])
idle = False
r = lamp.get_steps(from_color[0], to_color[0], STEPS)
g = lamp.get_steps(from_color[1], to_color[1], STEPS)
b = lamp.get_steps(from_color[2], to_color[2], STEPS)
state = list(zip(r, g, b))
except Empty:
pass
# do delay and check if stop
idle = lamp.stop.wait(delay)
if idle:
# reset color
lamp.send(lamp._color + (0x00, 0x00, 0x00, 0x00, 0x05))
continue
# change color and update step
lamp.send(state[step] + (0x00, 0x00, 0x00, 0x00, 0x05))
step += direction
if step == STEPS - 1 or step == 0:
direction = -direction
logger.debug("*** fading thread exited.")
def send(self, data):
# logger.debug("send(%d) %02X %02X %02X %02X %02X %02X %02X %02X" % (
# len(data), data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]))
try:
# request_type = USB_TYPE_CLASS | USB_RECIP_INTERFACE
# request = USB_REQ_SET_CONFIGURATION
# value = 0x200
# index = 0x00
# timeout = 1000
ret = self._lamp.ctrl_transfer(0x21, 0x09, 0x200, 0x00, data, 1000)
except USBError as e:
logger.error(str(e))
self.error.set()
if self.error_cb:
self.error_cb()
raise
if ret != len(data):
logger.error("Get %d VS. send %d" % (ret, len(data)))
def __init__(self, error_cb=None):
# initial backend
if sys.platform == 'win32':
from os import path
import re
backend = usb.backend.libusb1.get_backend(find_library=lambda x: path.join(
path.dirname(__file__),
'libusb',
'MS' + re.search('(\d+) bit', sys.version).groups()[0],
'dll', 'libusb-1.0.dll'))
elif sys.platform.startswith('linux'):
backend = None
else:
raise NotImplementedError('%s system is not supported!')
# get device
for pid in self.ID_PRODUCT:
devs = list(usb.core.find(idVendor=self.ID_VENDOR, idProduct=pid, find_all=True, backend=backend))
if devs:
logger.info("idVendor %d, idProduct %d" % (self.ID_VENDOR, pid))
break
else:
raise SystemError('No device found!')
# initial lamp and color
self._lamp = devs[0]
if sys.platform.startswith('linux') and self._lamp.is_kernel_driver_active(0):
self._reattach = True
self._lamp.detach_kernel_driver(0)
else:
self._reattach = False
self._color = (0, 0, 0)
# error and exit event
self.error = Event()
self.error_cb = error_cb
# send init cmd
self.send((0x1f, 0x02, 0x00, 0x2e, 0x00, 0x00, 0x2b, 0x03))
self.send((0x00, 0x02, 0x00, 0x2e, 0x00, 0x00, 0x2b, 0x04))
self.send((0x00, 0x02, 0x00, 0x2e, 0x00, 0x00, 0x2b, 0x05))
self.send(self._color + (0x00, 0x00, 0x00, 0x00, 0x05))
# create stop event, fading task queue and daemon thread
self.stop = Event()
self._task = Queue()
self._thread = Thread(target=self.fading, args=(self,))
self._thread.daemon = True
self._thread.start()
def get_color(self):
return self._color
def set_color(self, new_color):
self._color = tuple([max(0, min(self.RGB_MAX, c)) for c in new_color])
logger.debug("Set color %s" % str(self._color))
self.send(self._color + (0x00, 0x00, 0x00, 0x00, 0x05))
def start_fading(self, delay, to_color, from_color=None):
if not from_color:
from_color = (0, 0, 0)
logger.debug("Start fading with delay %f, %s ~ %s" % (delay, str(from_color), str(to_color)))
self.stop.clear()
self._task.put((delay, from_color, to_color))
def stop_fading(self):
logger.debug("Stop fading")
self.stop.set()
def off(self):
self.stop.set()
self.set_color((0, 0, 0))
def exit(self):
self._task.put((0, (0, 0, 0), (0, 0, 0)))
self.stop.set()
self.error.set()
self._thread.join()
self.off()
usb.util.dispose_resources(self._lamp)
if self._reattach:
self._lamp.attach_kernel_driver(0)
logger.debug("Exit!")
| onelife/PyUsbLamp | pyusblamp/pyusblamp.py | Python | gpl-3.0 | 6,105 |
# -*- coding: utf-8 -*-
###############################################################################################
#
# MediaPortal for Dreambox OS
#
# Coded by MediaPortal Team (c) 2013-2015
#
# This plugin is open source but it is NOT free software.
#
# This plugin may only be distributed to and executed on hardware which
# is licensed by Dream Property GmbH. This includes commercial distribution.
# In other words:
# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way
# to hardware which is NOT licensed by Dream Property GmbH.
# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way
# on hardware which is NOT licensed by Dream Property GmbH.
#
# This applies to the source code as a whole as well as to parts of it, unless
# explicitely stated otherwise.
#
# If you want to use or modify the code or parts of it,
# you have to keep OUR license and inform us about the modifications, but it may NOT be
# commercially distributed other than under the conditions noted above.
#
# As an exception regarding modifcations, you are NOT permitted to remove
# any copy protections implemented in this plugin or change them for means of disabling
# or working around the copy protections, unless the change has been explicitly permitted
# by the original authors. Also decompiling and modification of the closed source
# parts is NOT permitted.
#
# Advertising with this plugin is NOT allowed.
# For other uses, permission from the authors is necessary.
#
###############################################################################################
from Plugins.Extensions.MediaPortal.plugin import _
from imports import *
import mp_globals
from messageboxext import MessageBoxExt
gLogFile = None
class checkupdate:
def __init__(self, session):
self.session = session
def checkforupdate(self):
getPage("http://master.dl.sourceforge.net/project/e2-mediaportal/version.txt", timeout=15).addCallback(self.gotUpdateInfo).addErrback(self.gotError)
def gotError(self, error=""):
pass
def gotUpdateInfo(self, html):
if re.search(".*?<html", html):
return
self.html = html
tmp_infolines = html.splitlines()
remoteversion_ipk = tmp_infolines[0]
remoteversion_deb = tmp_infolines[2]
if mp_globals.isDreamOS:
self.updateurl = tmp_infolines[3]
remoteversion = remoteversion_deb
else:
self.updateurl = tmp_infolines[1]
remoteversion = remoteversion_ipk
if config.mediaportal.version.value < remoteversion:
self.session.openWithCallback(self.startUpdate,MessageBoxExt,_("An update is available for the MediaPortal Plugin!\nDo you want to download and install it now?"), MessageBoxExt.TYPE_YESNO)
return
else:
return
def startUpdate(self,answer):
if answer is True:
self.session.open(MPUpdateScreen,self.updateurl)
else:
return
class MPUpdateScreen(Screen):
def __init__(self, session, updateurl):
self.session = session
self.updateurl = updateurl
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/MPUpdate.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/MPUpdate.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
self["mplog"] = ScrollLabel()
Screen.__init__(self, session)
self['title'] = Label("MediaPortal Update")
self.setTitle("MediaPortal Update")
self.onLayoutFinish.append(self.__onLayoutFinished)
def __onLayoutFinished(self):
sl = self["mplog"]
sl.instance.setZPosition(1)
self["mplog"].setText(_("Starting update, please wait..."))
self.startPluginUpdate()
def startPluginUpdate(self):
self.container=eConsoleAppContainer()
if mp_globals.isDreamOS:
self.container.appClosed_conn = self.container.appClosed.connect(self.finishedPluginUpdate)
self.container.stdoutAvail_conn = self.container.stdoutAvail.connect(self.mplog)
self.container.execute("apt-get update ; wget -q -O /tmp/foobar %s ; dpkg --install --force-depends --force-overwrite /tmp/foobar ; apt-get -y -f install" % str(self.updateurl))
else:
self.container.appClosed.append(self.finishedPluginUpdate)
self.container.stdoutAvail.append(self.mplog)
#self.container.stderrAvail.append(self.mplog)
#self.container.dataAvail.append(self.mplog)
self.container.execute("opkg update ; opkg install --force-overwrite --force-depends " + str(self.updateurl))
def finishedPluginUpdate(self,retval):
self.container.kill()
if retval == 0:
config.mediaportal.filter.value = "ALL"
config.mediaportal.filter.save()
configfile.save()
self.session.openWithCallback(self.restartGUI, MessageBoxExt, _("MediaPortal successfully updated!\nDo you want to restart the Enigma2 GUI now?"), MessageBoxExt.TYPE_YESNO)
elif retval == 2:
self.session.openWithCallback(self.restartGUI2, MessageBoxExt, _("MediaPortal update failed! Please check free space on your root filesystem, at least 8MB are required for installation.\nCheck the update log carefully!\nThe Enigma2 GUI will restart now!"), MessageBoxExt.TYPE_ERROR)
else:
self.session.openWithCallback(self.returnGUI, MessageBoxExt, _("MediaPortal update failed! Check the update log carefully!"), MessageBoxExt.TYPE_ERROR)
def restartGUI(self, answer):
if answer is True:
self.session.open(TryQuitMainloop, 3)
else:
self.close()
def restartGUI2(self, answer):
self.session.open(TryQuitMainloop, 3)
def returnGUI(self, answer):
self.close()
def mplog(self,str):
self["mplog"].setText(str)
self.writeToLog(str)
def writeToLog(self, log):
global gLogFile
if gLogFile is None:
self.openLogFile()
now = datetime.datetime.now()
gLogFile.write(str(log) + "\n")
gLogFile.flush()
def openLogFile(self):
global gLogFile
baseDir = "/tmp"
logDir = baseDir + "/mediaportal"
now = datetime.datetime.now()
try:
os.makedirs(baseDir)
except OSError, e:
pass
try:
os.makedirs(logDir)
except OSError, e:
pass
gLogFile = open(logDir + "/MediaPortal_update_%04d%02d%02d_%02d%02d.log" % (now.year, now.month, now.day, now.hour, now.minute, ), "w") | n3wb13/OpenNfrGui-5.0-1 | lib/python/Plugins/Extensions/MediaPortal/resources/update.py | Python | gpl-2.0 | 6,241 |
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Bootstraps gn.
It is done by first building it manually in a temporary directory, then building
it with its own BUILD.gn to the final destination.
"""
import contextlib
import errno
import logging
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
BOOTSTRAP_DIR = os.path.dirname(os.path.abspath(__file__))
GN_ROOT = os.path.dirname(BOOTSTRAP_DIR)
SRC_ROOT = os.path.dirname(os.path.dirname(GN_ROOT))
is_linux = sys.platform.startswith('linux')
is_mac = sys.platform.startswith('darwin')
is_posix = is_linux or is_mac
def check_call(cmd, **kwargs):
logging.debug('Running: %s', ' '.join(cmd))
subprocess.check_call(cmd, cwd=GN_ROOT, **kwargs)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
@contextlib.contextmanager
def scoped_tempdir():
path = tempfile.mkdtemp()
try:
yield path
finally:
shutil.rmtree(path)
def main(argv):
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option('-d', '--debug', action='store_true',
help='Do a debug build. Defaults to release build.')
parser.add_option('-o', '--output',
help='place output in PATH', metavar='PATH')
parser.add_option('-s', '--no-rebuild', action='store_true',
help='Do not rebuild GN with GN.')
parser.add_option('-v', '--verbose', action='store_true',
help='Log more details')
options, args = parser.parse_args(argv)
if args:
parser.error('Unrecognized command line arguments: %s.' % ', '.join(args))
logging.basicConfig(level=logging.DEBUG if options.verbose else logging.ERROR)
if options.debug:
build_rel = os.path.join('out', 'Debug')
else:
build_rel = os.path.join('out', 'Release')
build_root = os.path.join(SRC_ROOT, build_rel)
try:
with scoped_tempdir() as tempdir:
print 'Building gn manually in a temporary directory for bootstrapping...'
build_gn_with_ninja_manually(tempdir, options)
temp_gn = os.path.join(tempdir, 'gn')
out_gn = os.path.join(build_root, 'gn')
if options.no_rebuild:
mkdir_p(build_root)
shutil.copy2(temp_gn, out_gn)
else:
print 'Building gn using itself to %s...' % build_rel
build_gn_with_gn(temp_gn, build_rel, options)
if options.output:
# Preserve the executable permission bit.
shutil.copy2(out_gn, options.output)
except subprocess.CalledProcessError as e:
print >> sys.stderr, str(e)
return 1
return 0
def build_gn_with_ninja_manually(tempdir, options):
write_ninja(os.path.join(tempdir, 'build.ninja'), options)
cmd = ['ninja', '-C', tempdir]
if options.verbose:
cmd.append('-v')
cmd.append('gn')
check_call(cmd)
def write_ninja(path, options):
cc = os.environ.get('CC', '')
cxx = os.environ.get('CXX', '')
cflags = os.environ.get('CFLAGS', '').split()
cflags_cc = os.environ.get('CXXFLAGS', '').split()
ld = os.environ.get('LD', cxx)
ldflags = os.environ.get('LDFLAGS', '').split()
include_dirs = [SRC_ROOT]
libs = []
if is_posix:
if options.debug:
cflags.extend(['-O0', '-g'])
else:
cflags.extend(['-O2', '-g0'])
cflags.extend(['-D_FILE_OFFSET_BITS=64', '-pthread', '-pipe'])
cflags_cc.extend(['-std=gnu++11', '-Wno-c++11-narrowing'])
static_libraries = {
'base': {'sources': [], 'tool': 'cxx'},
'dynamic_annotations': {'sources': [], 'tool': 'cc'},
'gn': {'sources': [], 'tool': 'cxx'},
}
for name in os.listdir(GN_ROOT):
if not name.endswith('.cc'):
continue
if name.endswith('_unittest.cc'):
continue
if name in ['generate_test_gn_data.cc', 'run_all_unittests.cc']:
continue
full_path = os.path.join(GN_ROOT, name)
static_libraries['gn']['sources'].append(
os.path.relpath(full_path, SRC_ROOT))
static_libraries['dynamic_annotations']['sources'].extend([
'base/third_party/dynamic_annotations/dynamic_annotations.c',
])
static_libraries['base']['sources'].extend([
'base/at_exit.cc',
'base/atomicops_internals_x86_gcc.cc',
'base/base_paths.cc',
'base/base_switches.cc',
'base/callback_internal.cc',
'base/command_line.cc',
'base/debug/alias.cc',
'base/debug/stack_trace.cc',
'base/debug/task_annotator.cc',
'base/environment.cc',
'base/files/file.cc',
'base/files/file_enumerator.cc',
'base/files/file_path.cc',
'base/files/file_path_constants.cc',
'base/files/file_util.cc',
'base/files/scoped_file.cc',
'base/json/json_parser.cc',
'base/json/json_reader.cc',
'base/json/json_string_value_serializer.cc',
'base/json/json_writer.cc',
'base/json/string_escape.cc',
'base/lazy_instance.cc',
'base/location.cc',
'base/logging.cc',
'base/memory/ref_counted.cc',
'base/memory/ref_counted_memory.cc',
'base/memory/singleton.cc',
'base/memory/weak_ptr.cc',
'base/message_loop/incoming_task_queue.cc',
'base/message_loop/message_loop.cc',
'base/message_loop/message_loop_proxy.cc',
'base/message_loop/message_loop_proxy_impl.cc',
'base/message_loop/message_pump.cc',
'base/message_loop/message_pump_default.cc',
'base/metrics/bucket_ranges.cc',
'base/metrics/histogram.cc',
'base/metrics/histogram_base.cc',
'base/metrics/histogram_samples.cc',
'base/metrics/sample_map.cc',
'base/metrics/sample_vector.cc',
'base/metrics/sparse_histogram.cc',
'base/metrics/statistics_recorder.cc',
'base/path_service.cc',
'base/pending_task.cc',
'base/pickle.cc',
'base/process/kill.cc',
'base/process/process_iterator.cc',
'base/process/process_metrics.cc',
'base/profiler/alternate_timer.cc',
'base/profiler/tracked_time.cc',
'base/run_loop.cc',
'base/sequence_checker_impl.cc',
'base/sequenced_task_runner.cc',
'base/strings/string16.cc',
'base/strings/string_number_conversions.cc',
'base/strings/string_piece.cc',
'base/strings/string_split.cc',
'base/strings/string_util.cc',
'base/strings/string_util_constants.cc',
'base/strings/stringprintf.cc',
'base/strings/utf_string_conversion_utils.cc',
'base/strings/utf_string_conversions.cc',
'base/synchronization/cancellation_flag.cc',
'base/synchronization/lock.cc',
'base/sys_info.cc',
'base/task_runner.cc',
'base/third_party/dmg_fp/dtoa_wrapper.cc',
'base/third_party/dmg_fp/g_fmt.cc',
'base/third_party/icu/icu_utf.cc',
'base/third_party/nspr/prtime.cc',
'base/thread_task_runner_handle.cc',
'base/threading/non_thread_safe_impl.cc',
'base/threading/post_task_and_reply_impl.cc',
'base/threading/sequenced_worker_pool.cc',
'base/threading/simple_thread.cc',
'base/threading/thread_checker_impl.cc',
'base/threading/thread_collision_warner.cc',
'base/threading/thread_id_name_manager.cc',
'base/threading/thread_local_storage.cc',
'base/threading/thread_restrictions.cc',
'base/time/time.cc',
'base/timer/elapsed_timer.cc',
'base/timer/timer.cc',
'base/trace_event/trace_event_impl.cc',
'base/trace_event/trace_event_impl_constants.cc',
'base/trace_event/trace_event_memory.cc',
'base/trace_event/trace_event_synthetic_delay.cc',
'base/tracked_objects.cc',
'base/tracking_info.cc',
'base/values.cc',
'base/vlog.cc',
])
if is_posix:
static_libraries['base']['sources'].extend([
'base/base_paths_posix.cc',
'base/debug/debugger_posix.cc',
'base/debug/stack_trace_posix.cc',
'base/files/file_enumerator_posix.cc',
'base/files/file_posix.cc',
'base/files/file_util_posix.cc',
'base/message_loop/message_pump_libevent.cc',
'base/posix/file_descriptor_shuffle.cc',
'base/process/kill_posix.cc',
'base/process/process_handle_posix.cc',
'base/process/process_metrics_posix.cc',
'base/process/process_posix.cc',
'base/safe_strerror_posix.cc',
'base/synchronization/condition_variable_posix.cc',
'base/synchronization/lock_impl_posix.cc',
'base/synchronization/waitable_event_posix.cc',
'base/sys_info_posix.cc',
'base/threading/platform_thread_posix.cc',
'base/threading/thread_local_posix.cc',
'base/threading/thread_local_storage_posix.cc',
'base/time/time_posix.cc',
])
static_libraries['libevent'] = {
'sources': [
'third_party/libevent/buffer.c',
'third_party/libevent/evbuffer.c',
'third_party/libevent/evdns.c',
'third_party/libevent/event.c',
'third_party/libevent/event_tagging.c',
'third_party/libevent/evrpc.c',
'third_party/libevent/evutil.c',
'third_party/libevent/http.c',
'third_party/libevent/log.c',
'third_party/libevent/poll.c',
'third_party/libevent/select.c',
'third_party/libevent/signal.c',
'third_party/libevent/strlcpy.c',
],
'tool': 'cc',
'include_dirs': [],
'cflags': cflags + ['-DHAVE_CONFIG_H'],
}
if is_linux:
libs.extend(['-lrt'])
ldflags.extend(['-pthread'])
static_libraries['xdg_user_dirs'] = {
'sources': [
'base/third_party/xdg_user_dirs/xdg_user_dir_lookup.cc',
],
'tool': 'cxx',
}
static_libraries['base']['sources'].extend([
'base/nix/xdg_util.cc',
'base/process/internal_linux.cc',
'base/process/process_handle_linux.cc',
'base/process/process_iterator_linux.cc',
'base/process/process_linux.cc',
'base/process/process_metrics_linux.cc',
'base/strings/sys_string_conversions_posix.cc',
'base/sys_info_linux.cc',
'base/threading/platform_thread_linux.cc',
])
static_libraries['libevent']['include_dirs'].extend([
os.path.join(SRC_ROOT, 'third_party', 'libevent', 'linux')
])
static_libraries['libevent']['sources'].extend([
'third_party/libevent/epoll.c',
])
if is_mac:
static_libraries['base']['sources'].extend([
'base/base_paths_mac.mm',
'base/files/file_util_mac.mm',
'base/mac/bundle_locations.mm',
'base/mac/foundation_util.mm',
'base/mac/mach_logging.cc',
'base/mac/scoped_mach_port.cc',
'base/mac/scoped_nsautorelease_pool.mm',
'base/message_loop/message_pump_mac.mm',
'base/process/process_handle_mac.cc',
'base/process/process_iterator_mac.cc',
'base/strings/sys_string_conversions_mac.mm',
'base/time/time_mac.cc',
'base/threading/platform_thread_mac.mm',
])
static_libraries['libevent']['include_dirs'].extend([
os.path.join(SRC_ROOT, 'third_party', 'libevent', 'mac')
])
static_libraries['libevent']['sources'].extend([
'third_party/libevent/kqueue.c',
])
if is_mac:
template_filename = 'build_mac.ninja.template'
else:
template_filename = 'build.ninja.template'
with open(os.path.join(GN_ROOT, 'bootstrap', template_filename)) as f:
ninja_template = f.read()
def src_to_obj(path):
return '%s' % os.path.splitext(path)[0] + '.o'
ninja_lines = []
for library, settings in static_libraries.iteritems():
for src_file in settings['sources']:
ninja_lines.extend([
'build %s: %s %s' % (src_to_obj(src_file),
settings['tool'],
os.path.join(SRC_ROOT, src_file)),
' includes = %s' % ' '.join(
['-I' + dirname for dirname in
include_dirs + settings.get('include_dirs', [])]),
' cflags = %s' % ' '.join(cflags + settings.get('cflags', [])),
' cflags_cc = %s' %
' '.join(cflags_cc + settings.get('cflags_cc', [])),
])
if cc:
ninja_lines.append(' cc = %s' % cc)
if cxx:
ninja_lines.append(' cxx = %s' % cxx)
ninja_lines.append('build %s.a: alink_thin %s' % (
library,
' '.join([src_to_obj(src_file) for src_file in settings['sources']])))
if is_mac:
libs.extend([
'-framework', 'AppKit',
'-framework', 'CoreFoundation',
'-framework', 'Foundation',
'-framework', 'Security',
]);
ninja_lines.extend([
'build gn: link %s' % (
' '.join(['%s.a' % library for library in static_libraries])),
' ldflags = %s' % ' '.join(ldflags),
' libs = %s' % ' '.join(libs),
])
if ld:
ninja_lines.append(' ld = %s' % ld)
else:
ninja_lines.append(' ld = $ldxx')
ninja_lines.append('') # Make sure the file ends with a newline.
with open(path, 'w') as f:
f.write(ninja_template + '\n'.join(ninja_lines))
def build_gn_with_gn(temp_gn, build_dir, options):
cmd = [temp_gn, 'gen', build_dir]
if not options.debug:
cmd.append('--args=is_debug=false')
check_call(cmd)
cmd = ['ninja', '-C', build_dir]
if options.verbose:
cmd.append('-v')
cmd.append('gn')
check_call(cmd)
if not options.debug:
check_call(['strip', os.path.join(build_dir, 'gn')])
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mou4e/zirconium | tools/gn/bootstrap/bootstrap.py | Python | bsd-3-clause | 13,743 |
"""
WSGI config for munsell project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "jajaja.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "munsell.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| HalescodeLLC/django-munsell | munsell/munsell/wsgi.py | Python | mit | 1,562 |
"""Native adapter for serving CherryPy via its builtin server."""
import logging
import sys
import io
import cheroot.server
import cherrypy
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil
from ._cpcompat import tonative
class NativeGateway(cheroot.server.Gateway):
"""Native gateway implementation allowing to bypass WSGI."""
recursive = False
def respond(self):
"""Obtain response from CherryPy machinery and then send it."""
req = self.req
try:
# Obtain a Request object from CherryPy
local = req.server.bind_addr # FIXME: handle UNIX sockets
local = tonative(local[0]), local[1]
local = httputil.Host(local[0], local[1], '')
remote = tonative(req.conn.remote_addr), req.conn.remote_port
remote = httputil.Host(remote[0], remote[1], '')
scheme = tonative(req.scheme)
sn = cherrypy.tree.script_name(tonative(req.uri or '/'))
if sn is None:
self.send_response('404 Not Found', [], [''])
else:
app = cherrypy.tree.apps[sn]
method = tonative(req.method)
path = tonative(req.path)
qs = tonative(req.qs or '')
headers = (
(tonative(h), tonative(v))
for h, v in req.inheaders.items()
)
rfile = req.rfile
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(
local, remote, scheme, 'HTTP/1.1')
request.multithread = True
request.multiprocess = False
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the
# response
try:
request.run(
method, path, qs,
tonative(req.request_protocol),
headers, rfile,
)
break
except cherrypy.InternalRedirect:
ir = sys.exc_info()[1]
app.release_serving()
prev = request
if not self.recursive:
if ir.path in redirections:
raise RuntimeError(
'InternalRedirector visited the same '
'URL twice: %r' % ir.path)
else:
# Add the *previous* path_info + qs to
# redirections.
if qs:
qs = '?' + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = 'GET'
path = ir.path
qs = ir.query_string
rfile = io.BytesIO()
self.send_response(
response.output_status, response.header_list,
response.body)
finally:
app.release_serving()
except Exception:
tb = format_exc()
# print tb
cherrypy.log(tb, 'NATIVE_ADAPTER', severity=logging.ERROR)
s, h, b = bare_error()
self.send_response(s, h, b)
def send_response(self, status, headers, body):
"""Send response to HTTP request."""
req = self.req
# Set response status
req.status = status or b'500 Server Error'
# Set response headers
for header, value in headers:
req.outheaders.append((header, value))
if (req.ready and not req.sent_headers):
req.sent_headers = True
req.send_headers()
# Set response body
for seg in body:
req.write(seg)
class CPHTTPServer(cheroot.server.HTTPServer):
"""Wrapper for cheroot.server.HTTPServer.
cheroot has been designed to not reference CherryPy in any way,
so that it can be used in other frameworks and applications.
Therefore, we wrap it here, so we can apply some attributes
from config -> cherrypy.server -> HTTPServer.
"""
def __init__(self, server_adapter=cherrypy.server):
"""Initialize CPHTTPServer."""
self.server_adapter = server_adapter
server_name = (self.server_adapter.socket_host or
self.server_adapter.socket_file or
None)
cheroot.server.HTTPServer.__init__(
self, server_adapter.bind_addr, NativeGateway,
minthreads=server_adapter.thread_pool,
maxthreads=server_adapter.thread_pool_max,
server_name=server_name)
self.max_request_header_size = (
self.server_adapter.max_request_header_size or 0)
self.max_request_body_size = (
self.server_adapter.max_request_body_size or 0)
self.request_queue_size = self.server_adapter.socket_queue_size
self.timeout = self.server_adapter.socket_timeout
self.shutdown_timeout = self.server_adapter.shutdown_timeout
self.protocol = self.server_adapter.protocol_version
self.nodelay = self.server_adapter.nodelay
ssl_module = self.server_adapter.ssl_module or 'pyopenssl'
if self.server_adapter.ssl_context:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain,
self.server_adapter.ssl_ciphers)
self.ssl_adapter.context = self.server_adapter.ssl_context
elif self.server_adapter.ssl_certificate:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain,
self.server_adapter.ssl_ciphers)
| JonnyWong16/plexpy | lib/cherrypy/_cpnative_server.py | Python | gpl-3.0 | 6,677 |
#Example oper.py
from numpy import *
a = array([[2,3], [4,5]])
b = array([[1,2], [3,0]])
print a + b
print a * b
| csparkresearch/eyes-online | app/static/scripts/Maths/oper.py | Python | gpl-3.0 | 114 |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo.config import cfg
import six
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron.api.v2 import resource_helper
from neutron.common import exceptions as qexception
from neutron import manager
from neutron.plugins.common import constants
from neutron.services import service_base
# Loadbalancer Exceptions
class DelayOrTimeoutInvalid(qexception.BadRequest):
message = _("Delay must be greater than or equal to timeout")
class NoEligibleBackend(qexception.NotFound):
message = _("No eligible backend for pool %(pool_id)s")
class VipNotFound(qexception.NotFound):
message = _("Vip %(vip_id)s could not be found")
class VipExists(qexception.NeutronException):
message = _("Another Vip already exists for pool %(pool_id)s")
class PoolNotFound(qexception.NotFound):
message = _("Pool %(pool_id)s could not be found")
class MemberNotFound(qexception.NotFound):
message = _("Member %(member_id)s could not be found")
class HealthMonitorNotFound(qexception.NotFound):
message = _("Health_monitor %(monitor_id)s could not be found")
class PoolMonitorAssociationNotFound(qexception.NotFound):
message = _("Monitor %(monitor_id)s is not associated "
"with Pool %(pool_id)s")
class PoolMonitorAssociationExists(qexception.Conflict):
message = _('health_monitor %(monitor_id)s is already associated '
'with pool %(pool_id)s')
class StateInvalid(qexception.NeutronException):
message = _("Invalid state %(state)s of Loadbalancer resource %(id)s")
class PoolInUse(qexception.InUse):
message = _("Pool %(pool_id)s is still in use")
class HealthMonitorInUse(qexception.InUse):
message = _("Health monitor %(monitor_id)s still has associations with "
"pools")
class PoolStatsNotFound(qexception.NotFound):
message = _("Statistics of Pool %(pool_id)s could not be found")
class ProtocolMismatch(qexception.BadRequest):
message = _("Protocol %(vip_proto)s does not match "
"pool protocol %(pool_proto)s")
class ListenerNotFound(qexception.NotFound):
message = _("Listener %(listener_id)s could not be found")
class VipProtocolPortInUse(qexception.InUse):
message = _("Protocol_Port %(protocol_port)s "
"is already used by VIP")
class ListenerNotInVip(qexception.NotFound):
message = _("Listener %(listener_id)s is not in Vip %(vip_id)s")
class ListenerProtocolMismatch(qexception.BadRequest):
message = _("Protocol %(listener_proto)s does not match "
"vip protocol %(vip_proto)s")
class ListenerProtocolPortInUse(qexception.InUse):
message = _("Protocol_Port %(protocol_port)s "
"is already used in listeners")
class ListenerProtocolPortMismatch(qexception.BadRequest):
message = _("Protocol_port % (protocol_port)s does not match "
"listener_protocol_port %(listener_protocol_port)s")
class MemberExists(qexception.NeutronException):
message = _("Member with address %(address)s and port %(port)s "
"already present in pool %(pool)s")
class VipPortNotFound(qexception.NotFound):
message = _("Port of IP address %(address)s in subnet %(subnet_id)s "
"could not be found")
class VipProtocolPortInUse(qexception.InUse):
message = _("Protocol_port %(protocol_port)s in IP address "
"%(address)s is already used")
class VipAddressNotAssigned(qexception.BadRequest):
message = _("Vip IP address is required")
POOLS = 'pools'
MEMBERS = 'members'
RESOURCE_ATTRIBUTE_MAP = {
'vips': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '',
'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'subnet_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'address': {'allow_post': True, 'allow_put': False,
'default': attr.ATTR_NOT_SPECIFIED,
'validate': {'type:ip_address_or_none': None},
'is_visible': True},
'port_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'protocol_port': {'allow_post': True, 'allow_put': False,
'validate': {'type:range': [0, 65535]},
'convert_to': attr.convert_to_int,
'is_visible': True},
'protocol': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['TCP', 'HTTP', 'HTTPS']},
'is_visible': True},
'pool_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid': None},
'is_visible': True},
'session_persistence': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_none_to_empty_dict,
'default': {},
'validate': {
'type:dict_or_empty': {
'type': {'type:values': ['APP_COOKIE',
'HTTP_COOKIE',
'SOURCE_IP'],
'required': True},
'cookie_name': {'type:string': None,
'required': False}}},
'is_visible': True},
'connection_limit': {'allow_post': True, 'allow_put': True,
'default': -1,
'convert_to': attr.convert_to_int,
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_description': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'extra_listeners': {'allow_post': False, 'allow_put': False,
'is_visible': True}
},
'pools': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'vip_id': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '',
'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'subnet_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'protocol': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['TCP', 'HTTP', 'HTTPS']},
'is_visible': True},
'provider': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': attr.ATTR_NOT_SPECIFIED},
'lb_method': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True},
'members': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'health_monitors': {'allow_post': True, 'allow_put': True,
'default': None,
'validate': {'type:uuid_list': None},
'convert_to': attr.convert_to_list,
'is_visible': True},
'health_monitors_status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_description': {'allow_post': False, 'allow_put': False,
'is_visible': True}
},
'members': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'pool_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid': None},
'is_visible': True},
'address': {'allow_post': True, 'allow_put': False,
'validate': {'type:ip_address': None},
'is_visible': True},
'protocol_port': {'allow_post': True, 'allow_put': False,
'validate': {'type:range': [0, 65535]},
'convert_to': attr.convert_to_int,
'is_visible': True},
'weight': {'allow_post': True, 'allow_put': True,
'default': 1,
'validate': {'type:range': [0, 256]},
'convert_to': attr.convert_to_int,
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_description': {'allow_post': False, 'allow_put': False,
'is_visible': True}
},
'health_monitors': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'type': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['PING', 'TCP', 'HTTP', 'HTTPS']},
'is_visible': True},
'delay': {'allow_post': True, 'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': attr.convert_to_int,
'is_visible': True},
'timeout': {'allow_post': True, 'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': attr.convert_to_int,
'is_visible': True},
'max_retries': {'allow_post': True, 'allow_put': True,
'validate': {'type:range': [1, 10]},
'convert_to': attr.convert_to_int,
'is_visible': True},
'http_method': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': 'GET',
'is_visible': True},
'url_path': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '/',
'is_visible': True},
'expected_codes': {'allow_post': True, 'allow_put': True,
'validate': {
'type:regex':
'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$'},
'default': '200',
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_description': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'pools': {'allow_post': False, 'allow_put': False,
'is_visible': True}
}
}
SUB_RESOURCE_ATTRIBUTE_MAP = {
'health_monitors': {
'parent': {'collection_name': 'pools',
'member_name': 'pool'},
'parameters': {'id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
}
},
'listeners': {
'parent':{'collection_name': 'vips',
'member_name': 'vip',
},
'parameters':{
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'protocol_port': {'allow_post': True, 'allow_put': False,
'validate': {'type:range': [0, 65535]},
'convert_to': attr.convert_to_int,
'is_visible': True},
'protocol': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['TCP', 'HTTP', 'HTTPS']},
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'vip_id': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
}
}
}
lbaas_quota_opts = [
cfg.IntOpt('quota_vip',
default=10,
help=_('Number of vips allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_pool',
default=10,
help=_('Number of pools allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_member',
default=-1,
help=_('Number of pool members allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_health_monitor',
default=-1,
help=_('Number of health monitors allowed per tenant. '
'A negative value means unlimited.'))
]
cfg.CONF.register_opts(lbaas_quota_opts, 'QUOTAS')
class Loadbalancer(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "LoadBalancing service"
@classmethod
def get_alias(cls):
return "lbaas"
@classmethod
def get_description(cls):
return "Extension for LoadBalancing service"
@classmethod
def get_namespace(cls):
return "http://wiki.openstack.org/neutron/LBaaS/API_1.0"
@classmethod
def get_updated(cls):
return "2012-10-07T10:00:00-00:00"
@classmethod
def _get_resources(cls):
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
plural_mappings['health_monitors_status'] = 'health_monitor_status'
attr.PLURALS.update(plural_mappings)
action_map = {'pool': {'stats': 'GET'}}
resources = resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.LOADBALANCER,
action_map=action_map,
register_quota=True)
plugin = manager.NeutronManager.get_service_plugins()[
constants.LOADBALANCER]
for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP:
# Special handling needed for sub-resources with 'y' ending
# (e.g. proxies -> proxy)
resource_name = collection_name[:-1]
parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent')
params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get(
'parameters')
controller = base.create_resource(collection_name, resource_name,
plugin, params,
allow_bulk=True,
parent=parent)
resource = extensions.ResourceExtension(
collection_name,
controller, parent,
path_prefix=constants.COMMON_PREFIXES[constants.LOADBALANCER],
attr_map=params)
resources.append(resource)
return resources
@classmethod
def get_resources(cls):
my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()]
my_plurals.append(('health_monitors_status', 'health_monitor_status'))
attr.PLURALS.update(dict(my_plurals))
resources = []
plugin = manager.NeutronManager.get_service_plugins()[
constants.LOADBALANCER]
for collection_name in RESOURCE_ATTRIBUTE_MAP:
# Special handling needed for resources with 'y' ending
# (e.g. proxies -> proxy)
resource_name = collection_name[:-1]
params = RESOURCE_ATTRIBUTE_MAP[collection_name]
member_actions = {}
if resource_name == 'pool':
member_actions = {'stats': 'GET'}
controller = base.create_resource(
collection_name, resource_name, plugin, params,
member_actions=member_actions,
allow_pagination=cfg.CONF.allow_pagination,
allow_sorting=cfg.CONF.allow_sorting)
resource = extensions.ResourceExtension(
collection_name,
controller,
path_prefix=constants.COMMON_PREFIXES[constants.LOADBALANCER],
member_actions=member_actions,
attr_map=params)
resources.append(resource)
for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP:
# Special handling needed for sub-resources with 'y' ending
# (e.g. proxies -> proxy)
resource_name = collection_name[:-1]
parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent')
params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get(
'parameters')
controller = base.create_resource(collection_name, resource_name,
plugin, params,
allow_bulk=True,
parent=parent)
resource = extensions.ResourceExtension(
collection_name,
controller, parent,
path_prefix=constants.COMMON_PREFIXES[constants.LOADBALANCER],
attr_map=params)
resources.append(resource)
return resources
@classmethod
def get_plugin_interface(cls):
return LoadBalancerPluginBase
def update_attributes_map(self, attributes):
super(Loadbalancer, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class LoadBalancerPluginBase(service_base.ServicePluginBase):
def get_plugin_name(self):
return constants.LOADBALANCER
def get_plugin_type(self):
return constants.LOADBALANCER
def get_plugin_description(self):
return 'LoadBalancer service plugin'
@abc.abstractmethod
def get_vips(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_vip(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_vip(self, context, vip):
pass
@abc.abstractmethod
def update_vip(self, context, id, vip):
pass
@abc.abstractmethod
def delete_vip(self, context, id):
pass
@abc.abstractmethod
def get_pools(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_pool(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_pool(self, context, pool):
pass
@abc.abstractmethod
def update_pool(self, context, id, pool):
pass
@abc.abstractmethod
def delete_pool(self, context, id):
pass
@abc.abstractmethod
def stats(self, context, pool_id):
pass
@abc.abstractmethod
def create_pool_health_monitor(self, context, health_monitor, pool_id):
pass
@abc.abstractmethod
def get_pool_health_monitor(self, context, id, pool_id, fields=None):
pass
@abc.abstractmethod
def delete_pool_health_monitor(self, context, id, pool_id):
pass
@abc.abstractmethod
def get_members(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_member(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_member(self, context, member):
pass
@abc.abstractmethod
def update_member(self, context, id, member):
pass
@abc.abstractmethod
def delete_member(self, context, id):
pass
@abc.abstractmethod
def create_vip_listener(self, context, listener, vip_id=None):
pass
@abc.abstractmethod
def delete_vip_listener(self, context, id, vip_id=None):
pass
@abc.abstractmethod
def get_vip_listener(self, context, id, vip_id=None, fields=None):
pass
@abc.abstractmethod
def get_vip_listeners(self,context, vip_id=None, filters=None, fields=None):
pass
@abc.abstractmethod
def get_health_monitors(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_health_monitor(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_health_monitor(self, context, health_monitor):
pass
@abc.abstractmethod
def update_health_monitor(self, context, id, health_monitor):
pass
@abc.abstractmethod
def delete_health_monitor(self, context, id):
pass
| nash-x/hws | neutron/extensions/loadbalancer.py | Python | apache-2.0 | 25,293 |
from .base import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'inventory',
'USER': 'inventory',
'PASSWORD': 'inventory',
'HOST': 'localhost',
'PORT': '',
}
}
#KEY_PREFIX = 'prod'
#KEY_FUNCTION = 'testsite.common.caching.make_key'
# Where is the root of the site? This can be a root-relative URL.
SITE_URL = 'static://inventory.homelinx.org/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = os.path.join(SITE_URL, 'static/')
ALLOWED_HOSTS = [
'.tetrasys-design.net',
'45.76.60.126',
]
# email settings
EMAIL_HOST = 'localhost'
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_REPLY_TO = 'donotreply@'
# Document Email Contacts
DOC_CONTACTS = (
)
# Logging
LOG_ENV = 'production'
LOG_FILE = '{}/{}-general.log'.format(LOG_DIR, LOG_ENV)
LOG_API_FILE = '{}/{}-api.log'.format(LOG_DIR, LOG_ENV)
LOG_CMD_FILE = '{}/{}-commands.log'.format(LOG_DIR, LOG_ENV)
LOGGING.get('handlers', {}).get('inventory_file', {})['filename'] = LOG_FILE
LOGGING.get('handlers', {}).get('api_file', {})['filename'] = LOG_API_FILE
LOGGING.get('handlers', {}).get('command_file', {})['filename'] = LOG_CMD_FILE
| cnobile2012/inventory | inventory/settings/production.py | Python | mit | 1,254 |
from sqlalchemy.sql import text
avatar = text(
"UPDATE users SET "
"info = COLUMN_DELETE(info, "
"'usecustomavatar', "
"'customavatarurl'"
") "
"WHERE id=:username"
";"
)
contactemail = text(
"UPDATE users SET "
"info = COLUMN_DELETE(info, "
"'contactemail', "
"'pgpmirror', "
"'pgpfingerprint' "
") "
"WHERE id=:username"
";"
)
github = text(
"UPDATE users SET "
"info = COLUMN_DELETE(info, "
"'github'"
") "
"WHERE id=:username"
";"
)
twitter = text(
"UPDATE users SET "
"info = COLUMN_DELETE(info, "
"'twitter'"
") "
"WHERE id=:username"
";"
)
location = text(
"UPDATE users SET "
"info = COLUMN_DELETE(info, "
"'location'"
") "
"WHERE id=:username"
";"
)
website = text(
"UPDATE users SET "
"info = COLUMN_DELETE(info, "
"'website'"
") "
"WHERE id=:username"
";"
)
name = text(
"UPDATE users SET "
"info = COLUMN_DELETE(info, "
"'name'"
") "
"WHERE id=:username"
";"
)
imgoodat = text(
"UPDATE users SET "
"info = COLUMN_DELETE(info, "
"'imgoodat'"
") "
"WHERE id=:username"
";"
)
icareabout = text(
"UPDATE users SET "
"info = COLUMN_DELETE(info, "
"'icareabout'"
") "
"WHERE id=:username"
";"
)
contactme = text(
"UPDATE users SET "
"info = COLUMN_DELETE(info, "
"'contactme'"
") "
"WHERE id=:username"
";"
)
| evrom/genitag | libraries/delete.py | Python | gpl-2.0 | 1,478 |
import numpy as np
import vigra
from lazyflow.request import RequestPool
from lazyflow.graph import Graph
from lazyflow.roi import roiToSlice
from lazyflow.operators.opUnblockedArrayCache import OpUnblockedArrayCache
from lazyflow.utility.testing import OpArrayPiperWithAccessCount
import logging
logger = logging.getLogger( __name__ )
cacheLogger = logging.getLogger( "lazyflow.operators.opUnblockedArrayCache" )
class TestOpUnblockedArrayCacheCache(object):
def testBasic(self):
graph = Graph()
opDataProvider = OpArrayPiperWithAccessCount( graph=graph )
opCache = OpUnblockedArrayCache( graph=graph )
data = np.random.random( (100,100,100) ).astype(np.float32)
opDataProvider.Input.setValue( vigra.taggedView( data, 'zyx' ) )
opCache.Input.connect( opDataProvider.Output )
roi = ((30, 30, 30), (50, 50, 50))
cache_data = opCache.Output( *roi ).wait()
assert (cache_data == data[roiToSlice(*roi)]).all()
assert opDataProvider.accessCount == 1
# Request the same data a second time.
# Access count should not change.
cache_data = opCache.Output( *roi ).wait()
assert (cache_data == data[roiToSlice(*roi)]).all()
assert opDataProvider.accessCount == 1
# Now invalidate a part of the data
# The cache will discard it, so the access count should increase.
opDataProvider.Input.setDirty( (30, 30, 30), (31, 31, 31) )
cache_data = opCache.Output( *roi ).wait()
assert (cache_data == data[roiToSlice(*roi)]).all()
assert opDataProvider.accessCount == 2
# Repeat this next part just for safety
for _ in range(10):
# Make sure the cache is empty
opDataProvider.Input.setDirty( (30, 30, 30), (31, 31, 31) )
opDataProvider.accessCount = 0
# Create many requests for the same data.
# Upstream data should only be accessed ONCE.
pool = RequestPool()
for _ in range(10):
pool.add( opCache.Output( *roi ) )
pool.wait()
assert opDataProvider.accessCount == 1
# Also, make sure requests for INNER rois of stored blocks are also serviced from memory
opDataProvider.accessCount = 0
inner_roi = ((35, 35, 35), (45, 45, 45))
cache_data = opCache.Output( *inner_roi ).wait()
assert (cache_data == data[roiToSlice(*inner_roi)]).all()
assert opDataProvider.accessCount == 0
def testCacheApi(self):
graph = Graph()
opDataProvider = OpArrayPiperWithAccessCount( graph=graph )
opCache = OpUnblockedArrayCache( graph=graph )
data = np.random.random( (100,100,100) ).astype(np.float32)
opDataProvider.Input.setValue( vigra.taggedView( data, 'zyx' ) )
opCache.Input.connect( opDataProvider.Output )
opCache.Output[10:20, 20:40, 50:100].wait()
opCache.Output[11:21, 22:43, 53:90].wait()
l = opCache.getBlockAccessTimes()
assert len(l) == 2
for k, t in l:
assert t > 0.0
if __name__ == "__main__":
# Set up logging for debug
import sys
logHandler = logging.StreamHandler( sys.stdout )
logger.addHandler( logHandler )
cacheLogger.addHandler( logHandler )
logger.setLevel( logging.DEBUG )
cacheLogger.setLevel( logging.DEBUG )
# Run nose
import sys
import nose
sys.argv.append("--nocapture") # Don't steal stdout. Show it on the console as usual.
sys.argv.append("--nologcapture") # Don't set the logging level to DEBUG. Leave it alone.
ret = nose.run(defaultTest=__file__)
if not ret: sys.exit(1)
| jakirkham/lazyflow | tests/testOpUnblockedArrayCache.py | Python | lgpl-3.0 | 3,770 |
import datetime
import json
import re
import types
from afs.model.Project import Project
from afs.model.ProjectSpread import ProjectSpread
from afs.service.BaseService import BaseService
from afs.service.ProjectServiceError import ProjectServiceError
from afs.service.FSService import FSService
from afs.service.VolumeService import VolumeService
from afs.model.ExtendedVolumeAttributes import ExtVolAttr
import afs
class ProjectService(BaseService):
"""
Provides Service about Project management.
This deals only with the DBCache.
"""
def __init__(self, conf=None):
BaseService.__init__(self, conf, LLAList=["fs"])
if not self._CFG.DB_CACHE:
raise ProjectServiceError('Error, Projects work only with a DBCache defined ', None)
self.ModelObj = Project()
self._VS = VolumeService()
self._FS = FSService()
return
def get_object(self, obj_or_param) :
"""
turn a input parameter which might be a project name into an object.
do nothing if input is already an object
"""
if isinstance(obj_or_param, Project) :
this_project = obj_or_param
else :
this_project = self.DBManager.get_from_cache(Project, fresh_only=False, name=obj_or_param)
self.Logger.debug("get_object: name %s gave %s" % (obj_or_param, this_project))
return this_project
#
# methods accessing project objects
#
def create_project(self, name, description, owner="", rw_locations=[], ro_locations=[]) :
"""
create an empty project
"""
this_project = Project()
this_project.name = name
this_project.description = description
this_project.owner = owner
this_project.rw_locations = rw_locations
this_project.ro_locations = ro_locations
# store_project adds db_id
return self.store_project(this_project)
def delete_project(self, name_or_obj) :
"""
remove on project from the database.
take name as string or ProjectObject
"""
this_project = self.get_object(name_or_obj)
return self.DBManager.delete_from_cache(Project, name=this_project.name)
def store_project(self, this_project):
"""
store object into DBCache
"""
self.Logger.debug("store_project: Obj=%s" % (this_project))
this_project.update_db_repr()
self.DBManager.set_into_cache(Project, this_project, name=this_project.name)
# we need to return the object from the DB to set db_id.
return self.get_object(this_project.name)
def get_project_by_name(self, name) :
"""
return ProjectObj from Projectname
"""
self.Logger.debug("get_project_by_name: called with name='%s'" % name)
# Note: fresh_only must be false for all get_from_cache operations,
# otherwise we get None
this_project = self.DBManager.get_from_cache(Project, fresh_only=False, name="%s" % name)
return this_project
def get_project_list(self) :
"""
return list of ProjectObjects
"""
project_list = self.DBManager.get_from_cache(Project, fresh_only=False, must_be_unique=False)
if project_list == None :
project_list = []
return project_list
def get_server_partitions(self, obj_or_param) :
"""
return lists of (fs-name, part) tuples
for RW and RO.
So basically it is like rw_serverparts and ro_serverparts, but the
uuids replaced by hostnames.
"""
this_project = self.get_object(obj_or_param)
if not this_project : return None
rw_list = []
ro_list = []
for fileserver_uuid, part in this_project.rw_serverparts :
fs_name = afs.LOOKUP_UTIL[self._CFG.cell].get_hostname_by_fsuuid(fileserver_uuid)
rw_list.append((fs_name,part),)
for fileserver_uuid, part in this_project.ro_serverparts :
fs_name = afs.LOOKUP_UTIL[self._CFG.cell].get_hostname_by_fsuuid(fileserver_uuid)
ro_list.append((fs_name,part),)
return rw_list, ro_list
def add_server_partition(self, obj_or_param, server_partition, volume_type) :
"""
assign a server_partition to a project
"""
this_project = self.get_object(obj_or_param)
fs_name, part = server_partition
fileserver_uuid = afs.LOOKUP_UTIL[self._CFG.cell].get_fsuuid(fs_name)
part = afs.util.misc.canonicalize_partition(part)
if volume_type == "RW" :
this_project.rw_serverparts.append((fileserver_uuid, part))
elif volume_type == "RO" :
this_project.ro_serverparts.append((fileserver_uuid, part))
else :
raise RuntimeError("assign_server_partition: Invalid volume_type '%s'" % volume_type)
self.store_project(this_project)
return this_project
def remove_server_partition(self, obj_or_param, server_partition, volume_type) :
"""
remove server_partition assignment from a project
"""
this_project = self.get_object(obj_or_param)
self.Logger.debug("called with: server_partition=%s, this_project=%s" % (server_partition, this_project) )
fs_name, part = server_partition
fileserver_uuid = afs.LOOKUP_UTIL[self._CFG.cell].get_fsuuid(fs_name)
part = afs.util.misc.canonicalize_partition(part)
if volume_type == "RW" :
this_project.rw_serverparts.remove((fileserver_uuid, part))
elif volume_type == "RO" :
this_project.ro_serverparts.remove((fileserver_uuid, part))
else :
raise RuntimeError("resign_server_partition: Invalid volume_type '%s'" % volume_type)
self.store_project(this_project)
return this_project
def add_location(self, obj_or_param, location, volume_type) :
"""
add a location
"""
this_project = self.get_object(obj_or_param)
if volume_type == "RW" :
this_project.rw_locations.append(location)
elif volume_type == "RO" :
this_project.ro_locations.append(location)
else :
raise RuntimeError("add_location: Invalid volume_type '%s'" % volume_type)
self.store_project(this_project)
return this_project
def remove_location(self, obj_or_param, location, volume_type) :
"""
remove a location
"""
this_project = self.get_object(obj_or_param)
if volume_type == "RW" :
this_project.rw_locations.remove(location)
elif volume_type == "RO" :
this_project.ro_locations.remove(location)
else :
raise RuntimeError("remove_location: Invalid volume_type '%s'" % volume_type)
self.store_project(this_project)
return this_project
def set_parent(self, obj_or_param, parent_obj_or_param) :
"""
set the parent of a project
"""
this_project = self.get_object(obj_or_param)
parent_project = self.get_object(parent_obj_or_param)
this_project.parent_db_id = parent_project.db_id
self.store_project(this_project)
return this_project
def remove_parent(self, obj_or_param) :
"""
remove the parent of a project
"""
this_project = self.get_object(obj_or_param)
this_project.parent_db_id = -1
self.store_project(this_project)
return this_project
def get_parent(self, obj_or_param) :
"""
return the parent project object
"""
this_project = self.get_object(obj_or_param)
parent_project = self.DBManager.get_from_cache(Project, fresh_only=False, must_be_unique=True, db_id = this_project.parent_db_id)
return parent_project
def set_owner(self, obj_or_param, owner) :
"""
set the owner attribute
"""
this_project = self.get_object(obj_or_param)
this_project.owner = owner
self.store_project(this_project)
return this_project
def add_volume_regex(self, obj_or_param, regex) :
"""
add a volume name regex for this project.
"""
this_project = self.get_object(obj_or_param)
if not regex in this_project.volname_regex :
this_project.volname_regex.append(regex)
self.store_project(this_project)
return this_project
def remove_volume_regex(self, obj_or_param, regex) :
"""
add a volume name regex for this project.
"""
this_project = self.get_object(obj_or_param)
this_project.volname_regex.remove(regex)
self.store_project(this_project)
return this_project
def add_additional_volume(self, obj_or_param, volname) :
"""
add an extra volume not covered by the regex to be part of this project
"""
this_project = self.get_object(obj_or_param)
if not volname in this_project.additional_volnames :
this_project.additional_volnames.append(volname)
self.store_project(this_project)
return this_project
def remove_additional_volume(self, obj_or_param, volname) :
"""
remove an extra volume not covered by the regex to be part of this project
"""
this_project = self.get_object(obj_or_param)
this_project.additional_volnames.remove(volname)
self.store_project(this_project)
return this_project
def add_excluded_volume(self, obj_or_param, volname) :
"""
add a volume name to be excluded efrom this project ven if it matches a regex.
"""
this_project = self.get_object(obj_or_param)
if not volname in this_project.excluded_volnames :
this_project.excluded_volnames.append(volname)
self.store_project(this_project)
return this_project
def remove_excluded_volume(self, obj_or_param, volname) :
"""
remove a volume name to be excluded efrom this project ven if it matches a regex.
"""
this_project = self.get_object(obj_or_param)
this_project.excluded_volnames.remove(volname)
self.store_project(this_project)
return this_project
#
# methods scanning cell
#
def get_projects_by_volume_name(self, volname):
"""
return List of lists of Projects Objs from VolumeName.
These lists are sorted by parent hierarchy of the projects
"""
unsorted_list = []
for p in self.DBManager.get_from_cache(Project, fresh_only=False, must_be_unique=False) :
for rx in p.volname_regex :
if re.compile(rx).match(volname) :
unsorted_list.append(p)
hierarchies = []
for p in unsorted_list :
if p.parent_db_id == -1 :
hierarchies.append([p,])
self.Logger.debug("hierarchies=%s" % hierarchies )
self.Logger.debug("unsorted_list=%s" % [ (x.db_id, x.parent_db_id) for x in unsorted_list] )
loop_again = True
while loop_again :
loop_again = False
for p in unsorted_list :
self.Logger.debug("project=%s,%s" % (p.db_id, p.parent_db_id))
for hier in hierarchies :
self.Logger.debug("hierarchy=%s" % hier[-1:][0].db_id)
if p.parent_db_id == hier[-1:][0].db_id and p.db_id not in [ x.db_id for x in hier ] :
hier.append(p)
self.Logger.debug("adding p %s to hierarchy %s" % ( (p.parent_db_id, p.db_id), [ x.db_id for x in hier ] ))
loop_again = True
self.Logger.debug("hierarchies=%s" % [ (x[0].db_id, x[0].parent_db_id) for x in hierarchies] )
return hierarchies
def get_projects_on_server(self, name_or_obj) :
"""
return dict[Partition] of lists of [(ProjectName, VolType)] for a fileserver
"""
ProjectList = self.get_project_list()
if isinstance(name_or_obj, basestring) :
fs_name = name_or_obj
else :
try :
fs_name = name_or_obj.hostnames[0]
except :
raise ProjectServiceError("Name of server (string) or Fileserver-Instance required.")
FSUUID = afs.LOOKUP_UTIL[self._CFG.cell].get_fsuuid(fs_name)
res_dict={}
this_fs = self._FS.get_fileserver(fs_name)
self.Logger.debug("this_fs=%s" % this_fs.servernames)
self.Logger.debug("parts=%s" % this_fs.parts)
for part in this_fs.parts :
p = part.name
res_dict[p]=[]
all_prjs = self.DBManager.get_from_cache(ProjectSpread, fresh_only=False, must_be_unique=False, fileserver_uuid=FSUUID, part=p)
self.Logger.debug("Results from DB: %s" % all_prjs )
if all_prjs :
for prj_spr in all_prjs :
prj = self.DBManager.get_from_cache(Project, fresh_only=False, must_be_unique=True, db_id=prj_spr.project_id)
res_dict[p].append((prj.name, prj_spr.vol_type))
return res_dict
def get_server_spread(self, obj_or_param):
"""
return dict of lists of ProjectSpread Objects :
[VolType]=[ProjectSpread]
"""
self.Logger.debug("get_server_spread: called with %s" % obj_or_param)
this_project = self.get_object(obj_or_param)
if not this_project : return None
res_dict = {"RW" : [], "RO" : [], "BK" : []}
for vol_type in res_dict :
res_dict[vol_type] = self.DBManager.get_from_cache(ProjectSpread, fresh_only=False, must_be_unique=False, vol_type=vol_type, project_id = this_project.db_id)
return res_dict
def gc_server_spread(self) :
registered_prj_ids = [ p.db_id for p in self.get_project_list() ]
all_project_spreads = self.DBManager.get_from_cache(ProjectSpread, fresh_only=False, must_be_unique=False)
if not all_project_spreads :
return
to_be_removed = []
for prj_sprd in all_project_spreads :
if not prj_sprd.project_id in registered_prj_ids :
to_be_removed.append(prj_sprd)
for prj_sprd in to_be_removed :
self.DBManager.delete_from_cache(ProjectSpread, db_id = prj_sprd.db_id)
return
def update_server_spread(self, obj_or_param):
"""
update the server spread from the live cell into the DB.
"""
this_project = self.get_object(obj_or_param)
self.Logger.debug("update_server_spread called for prj %s" % this_project.name)
res_dict = {"RW" : [], "RO" : [], "BK" : []}
VolIDList = self.get_volume_IDs(this_project.name)
if VolIDList == None : return None
for v in VolIDList :
VolGroup = self._VS.get_volume_group(v)
for vol_type in res_dict :
if VolGroup[vol_type] == None : continue
if not isinstance(VolGroup[vol_type],types.ListType) :
VolGroup[vol_type] = [VolGroup[vol_type],]
for vol in VolGroup[vol_type] :
FSUUID = vol.fileserver_uuid
Part = vol.partition
thisPrjSPObj = None
ResDictIndex = -1
for i in range(len(res_dict[vol_type])) :
prjspObj = res_dict[vol_type][i]
self.Logger.debug("comparing %s,%s,%s with %s,%s,%s" % ( prjspObj.fileserver_uuid, prjspObj.part, prjspObj.vol_type, FSUUID, Part, vol_type) )
if prjspObj.fileserver_uuid == FSUUID and prjspObj.part == Part and prjspObj.vol_type == vol_type :
thisPrjSPObj=prjspObj
ResDictIndex=i
break
if thisPrjSPObj == None :
thisPrjSPObj = ProjectSpread()
thisPrjSPObj.project_id = this_project.db_id
thisPrjSPObj.part = Part
thisPrjSPObj.fileserver_uuid = FSUUID
thisPrjSPObj.vol_type = vol_type
thisPrjSPObj.num_vol = 0
thisPrjSPObj.num_vol += 1
if ResDictIndex == -1 :
res_dict[vol_type].append(thisPrjSPObj)
else :
res_dict[vol_type][ResDictIndex] = thisPrjSPObj
for vol_type in res_dict :
for thisPrjSPObj in res_dict[vol_type] :
self.DBManager.set_into_cache(ProjectSpread, thisPrjSPObj, vol_type=vol_type, project_id=thisPrjSPObj.project_id, fileserver_uuid=thisPrjSPObj.fileserver_uuid, part=thisPrjSPObj.part)
self.Logger.debug("update_server_spread: returning %s" % res_dict)
return res_dict
def get_volume_IDs(self, prjname, servers=None) :
"""
return list of Volume IDs part of this project.
servers is an optional list of server_uuids to be used.
"""
self.Logger.debug("Entering with prjname=%s and servers=%s" % (prjname,servers))
this_project = self.get_project_by_name(prjname)
if not this_project :
self.Logger.debug("Invalid project-name %s given.", prjname)
return None
list = self.DBManager.get_from_cache(ExtVolAttr,fresh_only=False, must_be_unique=False)
self.Logger.debug("Results from DB: %s" % list)
list = self.DBManager.get_from_cache_by_list_element(ExtVolAttr, ExtVolAttr.project_ids_js, this_project.db_id, fresh_only=False, must_be_unique=False)
if list == None :
self.Logger.debug("Results from DB: %s" % list )
return []
elif len(list) > 0 :
self.Logger.debug("Results[:10] from DB: %s" % list[:min(10,len(list))] )
else :
self.Logger.debug("Results from DB: %s" % list)
VolIDList=[]
for l in list :
if servers == None :
VolIDList.append(l.vid)
else :
for v in self._VS.get_volume(l.vid, cached=True) :
self.Logger.debug("Comparing '%s' to '%s'" % (v.fileserver_uuid,servers))
if v.fileserver_uuid in servers :
VolIDList.append(l.vid)
self.Logger.debug("Returning VolIDList=%s" % VolIDList)
return VolIDList
def get_storage_usage(self, prjname) :
"""
return dict of storage usage for given project.
"""
this_project=self.get_project_by_name(prjname)
if not this_project : return None
res_dict={}
conn = self._CFG.DB_ENGINE.connect()
transa = conn.begin()
RegEx="\\\[({0}|.*, {0}|{0},.*|.*, {0},.*)\\\]".format(this_project.db_id)
# openafs volumes
# this is not very efficient
# for external RO we need the list of vids
# all vids
rawsql='SELECT E.vid FROM tbl_extvolattr AS E JOIN tbl_volume AS VOL on E.vid = VOL.vid WHERE E.project_ids_js REGEXP "%s";' % (RegEx)
self.Logger.debug("Executing %s" % rawsql)
res = conn.execute(rawsql).fetchall()
self.Logger.debug("got res=%s" % res)
VolIDs=[]
for vid in res :
VolIDs.append(vid[0])
self.Logger.debug("VolIDs=%s" % (VolIDs))
res_dict["diskused"]=0
res_dict["filecount"]=0
for vid in VolIDs :
for field in ["diskused","filecount"] :
rawsql='SELECT %s FROM tbl_volume WHERE vid="%s";' % (field,vid)
self.Logger.debug("Executing %s" % rawsql)
res = conn.execute(rawsql).fetchall()
self.Logger.debug("got res=%s" % res)
try :
res_dict[field] += res[0][0]
except :
res_dict[field] = 0
if res_dict[field] == None : res_dict[field] = 0
for field in ["diskused","filecount"] :
rawsql='SELECT SUM(VOL.%s) FROM tbl_volume AS Vol2 JOIN tbl_volume AS VOL on Vol2.vid = VOL.parentID WHERE Vol2.vid="%s" AND VOL.servername != Vol2.servername;' % (field,vid)
self.Logger.debug("Executing %s" % rawsql)
res = conn.execute(rawsql).fetchall()
self.Logger.debug("got res=%s" % res)
try :
res_dict[field] += res[0][0]
except :
pass
transa.commit()
conn.close()
res_dict["blocks_fs"] += res_dict["diskused"]*1024 # XXX diskused is in Kbytes
res_dict["files_fs"] += res_dict["filecount"]
self.Logger.debug("get_storage_uUsage: returning %s" % res_dict)
return res_dict
def get_new_volume_location(self, obj_or_param, VolObj, reservedSpace={}) :
"""
get a new volume location for a volume in the given project.
Has to be called separately for RW and external RO.
If volume already exists, return alternate location.
For now, only choose on size.
reservedSpace is substracted from the current free space of fileservers,
so that parallel transfers/creations are possible.
must be of form {"fileserver_uuid" : {"partname" : reservedSpace/kB } }
"""
self.Logger.debug("called with Prj=%s, VolObj=%s" % (obj_or_param, VolObj))
this_project = self.get_object(obj_or_param)
if not this_project : return None
if VolObj.type == "RW" :
sps = this_project.rw_serverparts
elif VolObj.type == "RO" :
sps = this_project.ro_serverparts
else :
raise RuntimeError("Invalid Voltype : %s" % VolObj.type)
# get locations of Volume
# check if we're dealing with an existing Volume
## XXX does it work with RO, where the RW has been deleted ??
try :
existing_volume = self._VS.get_volume(VolObj.name, cached=False)
except :
existing_volume = []
if len(existing_volume) > 0 :
existing_volume = self._VS.get_volume(VolObj.name, cached=False)[0]
RWVolLocation = (existing_volume.fileserver_uuid, existing_volume.part)
else :
self.Logger.debug("Volume of name %s doesn't exist" % VolObj.name)
RWVolLocation = ()
if VolObj.type != "RW" :
raise ProjectServiceError("RW-Volume %s does not exist. Cannot create non-RW volumes for that name." % VolObj.name)
ROVolLocations=[]
if VolObj.name[-9:] != ".readonly" :
use_vol_name = "%s.readonly" % VolObj.name
else :
use_vol_name = VolObj.name
try :
existingROVols = self._VS.get_volume(use_vol_name, cached=False)
except :
existingROVols = []
for v in existingROVols :
ROVolLocations.append((v.fileserver_uuid, v.part))
# get PartInfos from livesystem
PartInfos = {}
for fileserver_uuid, thisPart in sps :
this_fs_name = afs.LOOKUP_UTIL[self._CFG.cell].get_hostname_by_fsuuid(fileserver_uuid)
if not fileserver_uuid in PartInfos :
PartInfos[fileserver_uuid] = {}
for p in self._fsLLA.get_partitions(this_fs_name) :
PartInfos[fileserver_uuid][p.name] = p.free
# XXX partinfo contains now all partitions
self.Logger.debug("PartInfos of Prj %s: %s" % (this_project.name, PartInfos))
self.Logger.debug("ServerPartitions of Prj %s: %s" % (this_project.name, sps))
self.Logger.debug("VolumeLocations: RW: %s RO:%s" % (RWVolLocation, ROVolLocations))
# find one with most Free size
# we need to iterate over sps again, since we might have more partitions
# than belonging to this project
maxFree = -1
fs_name = Part = None
for fileserver_uuid, thisPart in sps :
self.Logger.debug("fileserver_uuid =%s, thisPart = %s" % (fileserver_uuid,thisPart))
this_fs_name = afs.LOOKUP_UTIL[self._CFG.CELL_NAME].get_hostname_by_fsuuid(fileserver_uuid)
if not thisPart in PartInfos[fileserver_uuid].keys() :
raise ProjectServiceError("Project %s incorrectly defined. Server %s has no partition %s" % (prjname,this_fs_name,thisPart))
if VolObj.type == "RW" :
# ignore the original SP
if (fileserver_uuid, thisPart) == RWVolLocation : continue
haveVolonOtherPart = False
# if Vol already on dst Server, just consider the corresponding partition
for o_fileserver_uuid,o_part in ROVolLocations :
if o_fileserver_uuid == fileserver_uuid and o_part != thisPart :
haveVolonOtherPart = True
if haveVolonOtherPart : continue
elif VolObj.type == "RO" :
# ignore servers having alread one RO
skip_it = False
for ro_srv_uuid, ro_srv_part in ROVolLocations :
if fileserver_uuid == ro_srv_uuid :
self.Logger.debug("Have already on RO on this server, ignore it.")
skip_it = True
if skip_it :
continue
# if we have a single RW on this SP, ignore other partitions
if fileserver_uuid == RWVolLocation[0] and thisPart != RWVolLocation[1] :
self.Logger.debug("this SP is a different Partition on the RW-Server, ignore it.")
continue
else :
raise ProjectServiceError("Internal Error. Got invalid volume-type %s" % VolObj.type)
# substract reservedSpace
try :
effective_space = PartInfos[fileserver_uuid][thisPart] - reservedSpace[fileserver_uuid][thisPart]
except :
effective_space = PartInfos[fileserver_uuid][thisPart]
# leave at least 100 GB free on destination server
if effective_space > maxFree and effective_space > 1024*1024*100 :
maxFree = PartInfos[fileserver_uuid][thisPart]
fs_name = this_fs_name
Part = thisPart
self.Logger.debug("best bet so far: srv %s, part %s, max_free: %s" % (fs_name,Part,maxFree) )
return fs_name, Part
def update_volume_mappings(self) :
"""
(Re-)scan the entire cell
update all ExtVolAttr to current Project definitions
"""
# cycle through all Projects, collect volumes matching their regex in a dict
RWVols = {}
vname_vid_mapping = {}
Projects = self.get_project_list()
for prj in Projects :
self.Logger.debug("Updating Project %s" % prj.name)
if len(prj.volname_regex) > 0 :
regEXSQL = 'AND ( name REGEXP ("%s")' % prj.volname_regex[0]
if len (prj.volname_regex) > 1 :
for i in range(1,len(prj.volname_regex)) :
regEXSQL += 'OR name REGEXP ("%s") ' % prj.volname_regex[1]
rawSQL = 'SELECT vid, name FROM tbl_volume WHERE type="RW" %s );' % regEXSQL
for vid, vname in self.DBManager.execute_raw(rawSQL).fetchall() :
if vname in prj.excluded_volnames : continue
vname_vid_mapping[vname] = vid
if RWVols.has_key(vname) :
RWVols[vname].append(prj.db_id)
else :
RWVols[vname] = [prj.db_id,]
# additional volumes
for _vname in prj.additional_volnames :
if len(_vname) == 0 : continue
res = self.DBManager.execute_raw('SELECT vid, name FROM tbl_volume WHERE type="RW" and name="%s"' % _vname ).fetchone()
if res == None : raise ProjectServiceError('Project %s corrupted. additional Volume "%s" does not exist.' % (prj.name, _vname))
vid, vname = res
vname_vid_mapping[vname] = vid
if RWVols.has_key(vname) :
if prj.db_id in RWVols[vname] :
self.Logger.warning("Project %s: Volume %s already caught by regexes." % (prj.name,vname))
RWVols[vname].append(prj.db_id)
else :
RWVols[vname] = [prj.db_id,]
# create dict of Projects, prjid is key
Prjs={}
for p in Projects :
Prjs[p.db_id] = p
# clean list of nested projects for one volume.
# a volume should have only leaf projects
for vname in RWVols :
if len(RWVols[vname]) <= 1 : continue
self.Logger.debug("clean up nested projects for RWVols[%s]=%s" % (vname, RWVols[vname]))
prj_hierarchy = self.get_projects_by_volume_name(vname)
#for prjid in RWVols[v] :
# if Prjs[prjid].NestingLevel == 0 : continue
# if Prjs[prjid].NestingLevel > lowestNest :
# self.Logger.debug("Removing prj with id=%s" % prjid)
# removals.append(prjid)
#for r in removals :
# RWVols[v].remove(r)
for vname in RWVols :
vid = vname_vid_mapping[vname]
self.Logger.debug("processing v=%s" % vid)
if vid == None :
self.Logger.warn("got a None in vols=%s" % RWVols)
continue
self.Logger.debug("processing v=%s" % RWVols[vname])
project_ids = RWVols[vname]
self.Logger.debug("project_ids=%s" % project_ids)
this_ext_volume_attributes = self.DBManager.get_from_cache(ExtVolAttr, vid=vid, fresh_only=False, must_be_unique=True)
if this_ext_volume_attributes != None :
this_ext_volume_attributes.project_ids = project_ids
else :
this_ext_volume_attributes = ExtVolAttr()
this_ext_volume_attributes.vid = vid
this_ext_volume_attributes.project_ids = project_ids
this_ext_volume_attributes.num_min_ro = 2
this_ext_volume_attributes.owner = "N/A"
this_ext_volume_attributes.pinned_on_server = 0
this_ext_volume_attributes.update_db_repr()
self.DBManager.set_into_cache(ExtVolAttr, this_ext_volume_attributes, vid=vid )
for p in Projects :
self.update_server_spread(p)
return
| openafs-contrib/afspy | afs/service/ProjectService.py | Python | bsd-2-clause | 30,997 |
from __future__ import absolute_import
__version__ = '0.1.9b1'
import sporco.cdict
import sporco.util
import sporco.plot
import sporco.linalg
import sporco.metric
import sporco.prox
import sporco.admm
| alphacsc/alphacsc | alphacsc/other/sporco/sporco/__init__.py | Python | bsd-3-clause | 203 |
from random import Random
from time import time
import inspyred
import math
# Define an additional "necessary" function for the evaluator
# to see how it must be handled when using pp.
def my_squaring_function(x):
return x**2
def generate_rastrigin(random, args):
size = args.get('num_inputs', 10)
return [random.uniform(-5.12, 5.12) for i in range(size)]
def evaluate_rastrigin(candidates, args):
fitness = []
for cs in candidates:
fit = 10 * len(cs) + sum([(my_squaring_function(x - 1) -
10 * math.cos(2 * math.pi * (x - 1)))
for x in cs])
fitness.append(fit)
if any([f < 9.5 for f in fitness]):
raise TypeError
return fitness
def main(prng=None, display=False):
if prng is None:
prng = Random()
prng.seed(time())
import logging
logger = logging.getLogger('inspyred.ec')
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler('inspyred.log', mode='w')
file_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
ea = inspyred.ec.DEA(prng)
if display:
ea.observer = inspyred.ec.observers.stats_observer
ea.terminator = inspyred.ec.terminators.evaluation_termination
final_pop = ea.evolve(generator=generate_rastrigin,
evaluator=inspyred.ec.evaluators.parallel_evaluation_pp,
pp_evaluator=evaluate_rastrigin,
pp_dependencies=(my_squaring_function,),
pp_modules=("math",),
pop_size=8,
bounder=inspyred.ec.Bounder(-5.12, 5.12),
maximize=False,
max_evaluations=256,
num_inputs=3)
if display:
best = max(final_pop)
print('Best Solution: \n{0}'.format(str(best)))
return ea
if __name__ == '__main__':
main(display=True)
| hishnash/inspyred | examples/advanced/logging_example.py | Python | mit | 2,170 |
__author__ = 'christianurich'
from dm_sei import *
from dm_wtp_extreme_heat import *
from dm_wtp_recreational_value import *
from dm_wtp_stream_health import *
from dm_wtp_water_restrictions import *
from convert_cscol import *
from mikewhisperer import *
from scale_demand import * | iut-ibk/DynaMind-ToolBox | DynaMind-Performance-Assessment/scripts/DMPerformance/__init__.py | Python | gpl-2.0 | 283 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""The main browser widgets."""
import html
import functools
from typing import cast
from PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt, QUrl, QPoint
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtNetwork import QNetworkReply, QNetworkRequest
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtPrintSupport import QPrintDialog
from PyQt5.QtWebKitWidgets import QWebPage, QWebFrame
from qutebrowser.config import websettings, config
from qutebrowser.browser import pdfjs, shared, downloads, greasemonkey
from qutebrowser.browser.webkit import http
from qutebrowser.browser.webkit.network import networkmanager
from qutebrowser.utils import message, usertypes, log, jinja, objreg
from qutebrowser.qt import sip
class BrowserPage(QWebPage):
"""Our own QWebPage with advanced features.
Attributes:
error_occurred: Whether an error occurred while loading.
_extension_handlers: Mapping of QWebPage extensions to their handlers.
_networkmanager: The NetworkManager used.
_win_id: The window ID this BrowserPage is associated with.
_ignore_load_started: Whether to ignore the next loadStarted signal.
_is_shutting_down: Whether the page is currently shutting down.
_tabdata: The TabData object of the tab this page is in.
Signals:
shutting_down: Emitted when the page is currently shutting down.
reloading: Emitted before a web page reloads.
arg: The URL which gets reloaded.
navigation_request: Emitted on acceptNavigationRequest.
"""
shutting_down = pyqtSignal()
reloading = pyqtSignal(QUrl)
navigation_request = pyqtSignal(usertypes.NavigationRequest)
def __init__(self, win_id, tab_id, tabdata, private, parent=None):
super().__init__(parent)
self._win_id = win_id
self._tabdata = tabdata
self._is_shutting_down = False
self._extension_handlers = {
QWebPage.ErrorPageExtension: self._handle_errorpage,
QWebPage.ChooseMultipleFilesExtension: self._handle_multiple_files,
}
self._ignore_load_started = False
self.error_occurred = False
self._networkmanager = networkmanager.NetworkManager(
win_id=win_id, tab_id=tab_id, private=private, parent=self)
self.setNetworkAccessManager(self._networkmanager)
self.setForwardUnsupportedContent(True)
self.reloading.connect(self._networkmanager.clear_rejected_ssl_errors)
self.printRequested.connect( # type: ignore[attr-defined]
self.on_print_requested)
self.downloadRequested.connect( # type: ignore[attr-defined]
self.on_download_requested)
self.unsupportedContent.connect( # type: ignore[attr-defined]
self.on_unsupported_content)
self.loadStarted.connect( # type: ignore[attr-defined]
self.on_load_started)
self.featurePermissionRequested.connect( # type: ignore[attr-defined]
self._on_feature_permission_requested)
self.saveFrameStateRequested.connect( # type: ignore[attr-defined]
self.on_save_frame_state_requested)
self.restoreFrameStateRequested.connect( # type: ignore[attr-defined]
self.on_restore_frame_state_requested)
self.loadFinished.connect( # type: ignore[attr-defined]
functools.partial(self._inject_userjs, self.mainFrame()))
self.frameCreated.connect( # type: ignore[attr-defined]
self._connect_userjs_signals)
@pyqtSlot('QWebFrame*')
def _connect_userjs_signals(self, frame):
"""Connect userjs related signals to `frame`.
Connect the signals used as triggers for injecting user
JavaScripts into the passed QWebFrame.
"""
log.greasemonkey.debug("Connecting to frame {} ({})"
.format(frame, frame.url().toDisplayString()))
frame.loadFinished.connect(
functools.partial(self._inject_userjs, frame))
def javaScriptPrompt(self, frame, js_msg, default):
"""Override javaScriptPrompt to use qutebrowser prompts."""
if self._is_shutting_down:
return (False, "")
try:
return shared.javascript_prompt(frame.url(), js_msg, default,
abort_on=[self.loadStarted,
self.shutting_down])
except shared.CallSuper:
return super().javaScriptPrompt(frame, js_msg, default)
def _handle_errorpage(self, info, errpage):
"""Display an error page if needed.
Loosely based on Helpviewer/HelpBrowserWV.py from eric5
(line 260 @ 5d937eb378dd)
Args:
info: The QWebPage.ErrorPageExtensionOption instance.
errpage: The QWebPage.ErrorPageExtensionReturn instance, where the
error page will get written to.
Return:
False if no error page should be displayed, True otherwise.
"""
ignored_errors = [
(QWebPage.QtNetwork, QNetworkReply.OperationCanceledError),
# "Loading is handled by the media engine"
(QWebPage.WebKit, 203),
# "Frame load interrupted by policy change"
(QWebPage.WebKit, 102),
]
errpage.baseUrl = info.url
urlstr = info.url.toDisplayString()
if (info.domain, info.error) == (QWebPage.QtNetwork,
QNetworkReply.ProtocolUnknownError):
# For some reason, we get a segfault when we use
# QDesktopServices::openUrl with info.url directly - however it
# works when we construct a copy of it.
url = QUrl(info.url)
scheme = url.scheme()
message.confirm_async(
title="Open external application for {}-link?".format(scheme),
text="URL: <b>{}</b>".format(
html.escape(url.toDisplayString())),
yes_action=functools.partial(QDesktopServices.openUrl, url),
url=info.url.toString(QUrl.RemovePassword | QUrl.FullyEncoded))
return True
elif (info.domain, info.error) in ignored_errors:
log.webview.debug("Ignored error on {}: {} (error domain: {}, "
"error code: {})".format(
urlstr, info.errorString, info.domain,
info.error))
return False
else:
error_str = info.errorString
if error_str == networkmanager.HOSTBLOCK_ERROR_STRING:
# We don't set error_occurred in this case.
error_str = "Request blocked by host blocker."
main_frame = info.frame.page().mainFrame()
if info.frame != main_frame:
# Content in an iframe -> Hide the frame so it doesn't use
# any space. We can't hide the frame's documentElement
# directly though.
for elem in main_frame.documentElement().findAll('iframe'):
if QUrl(elem.attribute('src')) == info.url:
elem.setAttribute('style', 'display: none')
return False
else:
self._ignore_load_started = True
self.error_occurred = True
log.webview.error("Error while loading {}: {}".format(
urlstr, error_str))
log.webview.debug("Error domain: {}, error code: {}".format(
info.domain, info.error))
title = "Error loading page: {}".format(urlstr)
error_html = jinja.render(
'error.html',
title=title, url=urlstr, error=error_str)
errpage.content = error_html.encode('utf-8')
errpage.encoding = 'utf-8'
return True
def chooseFile(self, parent_frame: QWebFrame, suggested_file: str) -> str:
"""Override chooseFile to (optionally) invoke custom file uploader."""
handler = config.val.fileselect.handler
if handler == "default":
return super().chooseFile(parent_frame, suggested_file)
assert handler == "external", handler
selected = shared.choose_file(multiple=False)
if not selected:
return ''
else:
return selected[0]
def _handle_multiple_files(self, info, files):
"""Handle uploading of multiple files.
Loosely based on Helpviewer/HelpBrowserWV.py from eric5.
Args:
info: The ChooseMultipleFilesExtensionOption instance.
files: The ChooseMultipleFilesExtensionReturn instance to write
return values to.
Return:
True on success, the superclass return value on failure.
"""
handler = config.val.fileselect.handler
if handler == "default":
suggested_file = ""
if info.suggestedFileNames:
suggested_file = info.suggestedFileNames[0]
files.fileNames, _ = QFileDialog.getOpenFileNames(
None, None, suggested_file) # type: ignore[arg-type]
return True
assert handler == "external", handler
files.fileNames = shared.choose_file(multiple=True)
return True
def shutdown(self):
"""Prepare the web page for being deleted."""
self._is_shutting_down = True
self.shutting_down.emit()
download_manager = objreg.get('qtnetwork-download-manager')
nam = self.networkAccessManager()
if download_manager.has_downloads_with_nam(nam):
nam.setParent(download_manager)
else:
nam.shutdown()
def display_content(self, reply, mimetype):
"""Display a QNetworkReply with an explicitly set mimetype."""
self.mainFrame().setContent(reply.readAll(), mimetype, reply.url())
reply.deleteLater()
def on_print_requested(self, frame):
"""Handle printing when requested via javascript."""
printdiag = QPrintDialog()
printdiag.setAttribute(Qt.WA_DeleteOnClose)
printdiag.open(lambda: frame.print(printdiag.printer()))
def on_download_requested(self, request):
"""Called when the user wants to download a link.
We need to construct a copy of the QNetworkRequest here as the
download_manager needs it async and we'd get a segfault otherwise as
soon as the user has entered the filename, as Qt seems to delete it
after this slot returns.
"""
req = QNetworkRequest(request)
download_manager = objreg.get('qtnetwork-download-manager')
download_manager.get_request(req, qnam=self.networkAccessManager())
@pyqtSlot('QNetworkReply*')
def on_unsupported_content(self, reply):
"""Handle an unsupportedContent signal.
Most likely this will mean we need to download the reply, but we
correct for some common errors the server do.
At some point we might want to implement the MIME Sniffing standard
here: http://mimesniff.spec.whatwg.org/
"""
inline, suggested_filename = http.parse_content_disposition(reply)
download_manager = objreg.get('qtnetwork-download-manager')
if not inline:
# Content-Disposition: attachment -> force download
download_manager.fetch(reply,
suggested_filename=suggested_filename)
return
mimetype, _rest = http.parse_content_type(reply)
if mimetype == 'image/jpg':
# Some servers (e.g. the LinkedIn CDN) send a non-standard
# image/jpg (instead of image/jpeg, defined in RFC 1341 section
# 7.5). If this is the case, we force displaying with a corrected
# mimetype.
if reply.isFinished():
self.display_content(reply, 'image/jpeg')
else:
reply.finished.connect(functools.partial(
self.display_content, reply, 'image/jpeg'))
elif pdfjs.should_use_pdfjs(mimetype, reply.url()):
download_manager.fetch(reply,
target=downloads.PDFJSDownloadTarget(),
auto_remove=True)
else:
# Unknown mimetype, so download anyways.
download_manager.fetch(reply,
suggested_filename=suggested_filename)
@pyqtSlot()
def on_load_started(self):
"""Reset error_occurred when loading of a new page started."""
if self._ignore_load_started:
self._ignore_load_started = False
else:
self.error_occurred = False
def _inject_userjs(self, frame):
"""Inject user JavaScripts into the page.
Args:
frame: The QWebFrame to inject the user scripts into.
"""
if sip.isdeleted(frame):
log.greasemonkey.debug("_inject_userjs called for deleted frame!")
return
url = frame.url()
if url.isEmpty():
url = frame.requestedUrl()
log.greasemonkey.debug("_inject_userjs called for {} ({})"
.format(frame, url.toDisplayString()))
scripts = greasemonkey.gm_manager.scripts_for(url)
# QtWebKit has trouble providing us with signals representing
# page load progress at reasonable times, so we just load all
# scripts on the same event.
toload = scripts.start + scripts.end + scripts.idle
if url.isEmpty():
# This happens during normal usage like with view source but may
# also indicate a bug.
log.greasemonkey.debug("Not running scripts for frame with no "
"url: {}".format(frame))
assert not toload, toload
for script in toload:
if frame is self.mainFrame() or script.runs_on_sub_frames:
log.webview.debug('Running GM script: {}'.format(script.name))
frame.evaluateJavaScript(script.code())
@pyqtSlot('QWebFrame*', 'QWebPage::Feature')
def _on_feature_permission_requested(self, frame, feature):
"""Ask the user for approval for geolocation/notifications."""
if not isinstance(frame, QWebFrame): # pragma: no cover
# This makes no sense whatsoever, but someone reported this being
# called with a QBuffer...
log.misc.error("on_feature_permission_requested got called with "
"{!r}!".format(frame))
return
options = {
QWebPage.Notifications: 'content.notifications',
QWebPage.Geolocation: 'content.geolocation',
}
messages = {
QWebPage.Notifications: 'show notifications',
QWebPage.Geolocation: 'access your location',
}
yes_action = functools.partial(
self.setFeaturePermission, frame, feature,
QWebPage.PermissionGrantedByUser)
no_action = functools.partial(
self.setFeaturePermission, frame, feature,
QWebPage.PermissionDeniedByUser)
url = frame.url().adjusted(cast(QUrl.FormattingOptions,
QUrl.RemoveUserInfo |
QUrl.RemovePath |
QUrl.RemoveQuery |
QUrl.RemoveFragment))
question = shared.feature_permission(
url=url,
option=options[feature], msg=messages[feature],
yes_action=yes_action, no_action=no_action,
abort_on=[self.shutting_down, self.loadStarted])
if question is not None:
self.featurePermissionRequestCanceled.connect( # type: ignore[attr-defined]
functools.partial(self._on_feature_permission_cancelled,
question, frame, feature))
def _on_feature_permission_cancelled(self, question, frame, feature,
cancelled_frame, cancelled_feature):
"""Slot invoked when a feature permission request was cancelled.
To be used with functools.partial.
"""
if frame is cancelled_frame and feature == cancelled_feature:
try:
question.abort()
except RuntimeError:
# The question could already be deleted, e.g. because it was
# aborted after a loadStarted signal.
pass
def on_save_frame_state_requested(self, frame, item):
"""Save scroll position and zoom in history.
Args:
frame: The QWebFrame which gets saved.
item: The QWebHistoryItem to be saved.
"""
if frame != self.mainFrame():
return
data = {
'zoom': frame.zoomFactor(),
'scroll-pos': frame.scrollPosition(),
}
item.setUserData(data)
def on_restore_frame_state_requested(self, frame):
"""Restore scroll position and zoom from history.
Args:
frame: The QWebFrame which gets restored.
"""
if frame != self.mainFrame():
return
data = self.history().currentItem().userData()
if data is None:
return
if 'zoom' in data:
frame.page().view().tab.zoom.set_factor(data['zoom'])
if 'scroll-pos' in data and frame.scrollPosition() == QPoint(0, 0):
frame.setScrollPosition(data['scroll-pos'])
def userAgentForUrl(self, url):
"""Override QWebPage::userAgentForUrl to customize the user agent."""
if not url.isValid():
url = None
return websettings.user_agent(url)
def supportsExtension(self, ext):
"""Override QWebPage::supportsExtension to provide error pages.
Args:
ext: The extension to check for.
Return:
True if the extension can be handled, False otherwise.
"""
return ext in self._extension_handlers
def extension(self, ext, opt, out):
"""Override QWebPage::extension to provide error pages.
Args:
ext: The extension.
opt: Extension options instance.
out: Extension output instance.
Return:
Handler return value.
"""
try:
handler = self._extension_handlers[ext]
except KeyError:
log.webview.warning("Extension {} not supported!".format(ext))
return super().extension(ext, opt, out)
return handler(opt, out)
def javaScriptAlert(self, frame, js_msg):
"""Override javaScriptAlert to use qutebrowser prompts."""
if self._is_shutting_down:
return
try:
shared.javascript_alert(frame.url(), js_msg,
abort_on=[self.loadStarted,
self.shutting_down])
except shared.CallSuper:
super().javaScriptAlert(frame, js_msg)
def javaScriptConfirm(self, frame, js_msg):
"""Override javaScriptConfirm to use the statusbar."""
if self._is_shutting_down:
return False
try:
return shared.javascript_confirm(frame.url(), js_msg,
abort_on=[self.loadStarted,
self.shutting_down])
except shared.CallSuper:
return super().javaScriptConfirm(frame, js_msg)
def javaScriptConsoleMessage(self, msg, line, source):
"""Override javaScriptConsoleMessage to use debug log."""
shared.javascript_log_message(usertypes.JsLogLevel.unknown,
source, line, msg)
def acceptNavigationRequest(self,
frame: QWebFrame,
request: QNetworkRequest,
typ: QWebPage.NavigationType) -> bool:
"""Override acceptNavigationRequest to handle clicked links.
Setting linkDelegationPolicy to DelegateAllLinks and using a slot bound
to linkClicked won't work correctly, because when in a frameset, we
have no idea in which frame the link should be opened.
Checks if it should open it in a tab (middle-click or control) or not,
and then conditionally opens the URL here or in another tab/window.
"""
type_map = {
QWebPage.NavigationTypeLinkClicked:
usertypes.NavigationRequest.Type.link_clicked,
QWebPage.NavigationTypeFormSubmitted:
usertypes.NavigationRequest.Type.form_submitted,
QWebPage.NavigationTypeFormResubmitted:
usertypes.NavigationRequest.Type.form_resubmitted,
QWebPage.NavigationTypeBackOrForward:
usertypes.NavigationRequest.Type.back_forward,
QWebPage.NavigationTypeReload:
usertypes.NavigationRequest.Type.reloaded,
QWebPage.NavigationTypeOther:
usertypes.NavigationRequest.Type.other,
}
is_main_frame = frame is self.mainFrame()
navigation = usertypes.NavigationRequest(url=request.url(),
navigation_type=type_map[typ],
is_main_frame=is_main_frame)
if navigation.navigation_type == navigation.Type.reloaded:
self.reloading.emit(navigation.url)
self.navigation_request.emit(navigation)
return navigation.accepted
| forkbong/qutebrowser | qutebrowser/browser/webkit/webpage.py | Python | gpl-3.0 | 22,693 |
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
import os
import json
from fx_desktop_build import FxDesktopBuild
from mozharness.base.config import parse_config_file
from mozharness.base.vcs.vcsbase import VCSMixin
from mozharness.mozilla.l10n.locales import GaiaLocalesMixin
class B2GDesktopBuild(FxDesktopBuild, GaiaLocalesMixin, VCSMixin, object):
def _checkout_gaia(self):
# Checkout gaia; read gaia.json
try:
dirs = self.query_abs_dirs()
gaia_json_path = os.path.join(dirs['abs_src_dir'], 'b2g', 'config', 'gaia.json')
self.info("loading %s" % gaia_json_path)
gaia_json = json.load(open(gaia_json_path))
self.debug("got %s" % gaia_json)
gaia_dir = os.path.join(dirs['abs_src_dir'], 'gaia')
vcs_checkout_kwargs = {
'vcs': 'gittool',
'repo': gaia_json['git']['remote'],
'revision': gaia_json['git']['git_revision'],
'dest': gaia_dir,
}
gaia_rev = self.vcs_checkout(**vcs_checkout_kwargs)
self.set_buildbot_property('gaia_revision', gaia_rev, write_to_file=True)
except Exception:
self.exception("failed to checkout gaia")
raise
def _checkout_gaia_l10n(self):
# Checkout gaia l10n
try:
dirs = self.query_abs_dirs()
config_json_path = os.path.join(dirs['abs_src_dir'], 'b2g', 'config', 'desktop', 'config.json')
self.info("loading %s" % config_json_path)
config_json = json.load(open(config_json_path))
self.debug("got %s" % config_json)
l10n_config = config_json['gaia']['l10n']
languages_file = os.path.join(dirs['abs_src_dir'], 'gaia/locales/languages_all.json')
l10n_base_dir = os.path.join(dirs['abs_work_dir'], 'build-gaia-l10n')
# Setup the environment for the gaia build system to find the locales
env = self.query_env()
env['LOCALE_BASEDIR'] = l10n_base_dir
env['LOCALES_FILE'] = languages_file
self.pull_gaia_locale_source(l10n_config, parse_config_file(languages_file).keys(), l10n_base_dir)
except Exception:
self.exception("failed to clone gaia l10n repos")
raise
def _checkout_source(self):
super(B2GDesktopBuild, self)._checkout_source()
self._checkout_gaia()
self._checkout_gaia_l10n()
if __name__ == '__main__':
b2g_desktop_build = B2GDesktopBuild()
b2g_desktop_build.run_and_exit()
| cstipkovic/spidermonkey-research | testing/mozharness/scripts/b2g_desktop_build.py | Python | mpl-2.0 | 2,835 |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_jsonrpc,
start_nodes,
)
class NamedArgumentTest(BitcoinTestFramework):
"""
Test named arguments on RPC calls.
"""
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
self.is_network_split = False
self.sync_all()
def run_test(self):
node = self.nodes[0]
h = node.help(command='getinfo')
assert(h.startswith('getinfo\n'))
assert_raises_jsonrpc(-8, 'Unknown named parameter', node.help, random='getinfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
assert_equal(node.echo(), [])
assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
assert_equal(node.echo(arg1=1), [None, 1])
assert_equal(node.echo(arg9=None), [None]*10)
assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
if __name__ == '__main__':
NamedArgumentTest().main()
| Cocosoft/bitcoin | qa/rpc-tests/rpcnamedargs.py | Python | mit | 1,437 |
# Webservices python tester
| zealhydra/Webservices | websec.py | Python | gpl-2.0 | 29 |
from django import forms
from django.conf.global_settings import LANGUAGES
from liqpay.constants import CURRENCIES, ACTIONS
class ApiForm(forms.Form):
version = forms.IntegerField()
public_key = forms.CharField(max_length=20, min_length=5)
action = forms.ChoiceField(choices=ACTIONS)
amount = forms.FloatField()
currency = forms.ChoiceField(choices=CURRENCIES)
description = forms.CharField()
order_id = forms.CharField(max_length=255)
result_url = forms.URLField()
server_url = forms.URLField()
language = forms.ChoiceField(choices=LANGUAGES)
class CheckoutForm(forms.Form):
method = 'POST'
def __init__(self, action, *args, **kwargs):
self.action = action
super(CheckoutForm, self).__init__(*args, **kwargs)
data = forms.CharField(widget=forms.HiddenInput)
signature = forms.CharField(widget=forms.HiddenInput)
| pmaigutyak/mp-shop | liqpay/forms.py | Python | isc | 904 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sys
import time
import signal
from os.path import basename, dirname, join
from random import shuffle
from swift import gettext_ as _
from contextlib import closing
from eventlet import Timeout
from swift.obj import diskfile, replicator
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist,\
DiskFileDeleted, DiskFileExpired, QuarantineRequest
from swift.common.daemon import Daemon
from swift.common.storage_policy import POLICIES
from swift.common.utils import (
config_auto_int_value, dump_recon_cache, get_logger, list_from_csv,
listdir, load_pkg_resource, parse_prefixed_conf, ratelimit_sleep,
readconf, round_robin_iter, unlink_paths_older_than, PrefixLoggerAdapter)
class AuditorWorker(object):
"""Walk through file system to audit objects"""
def __init__(self, conf, logger, rcache, devices, zero_byte_only_at_fps=0,
watcher_defs=None):
if watcher_defs is None:
watcher_defs = {}
self.conf = conf
self.logger = logger
self.devices = devices
self.max_files_per_second = float(conf.get('files_per_second', 20))
self.max_bytes_per_second = float(conf.get('bytes_per_second',
10000000))
try:
# ideally unless ops overrides the rsync_tempfile_timeout in the
# auditor section we can base our behavior on whatever they
# configure for their replicator
replicator_config = readconf(self.conf['__file__'],
'object-replicator')
except (KeyError, ValueError, IOError):
# if we can't parse the real config (generally a KeyError on
# __file__, or ValueError on no object-replicator section, or
# IOError if reading the file failed) we use
# a very conservative default for rsync_timeout
default_rsync_timeout = 86400
else:
replicator_rsync_timeout = int(replicator_config.get(
'rsync_timeout', replicator.DEFAULT_RSYNC_TIMEOUT))
# Here we can do some light math for ops and use the *replicator's*
# rsync_timeout (plus 15 mins to avoid deleting local tempfiles
# before the remote replicator kills it's rsync)
default_rsync_timeout = replicator_rsync_timeout + 900
# there's not really a good reason to assume the replicator
# section's reclaim_age is more appropriate than the reconstructor
# reclaim_age - but we're already parsing the config so we can set
# the default value in our config if it's not already set
if 'reclaim_age' in replicator_config:
conf.setdefault('reclaim_age',
replicator_config['reclaim_age'])
self.rsync_tempfile_timeout = config_auto_int_value(
self.conf.get('rsync_tempfile_timeout'), default_rsync_timeout)
self.diskfile_router = diskfile.DiskFileRouter(conf, self.logger)
self.auditor_type = 'ALL'
self.zero_byte_only_at_fps = zero_byte_only_at_fps
if self.zero_byte_only_at_fps:
self.max_files_per_second = float(self.zero_byte_only_at_fps)
self.auditor_type = 'ZBF'
self.log_time = int(conf.get('log_time', 3600))
self.last_logged = 0
self.files_running_time = 0
self.bytes_running_time = 0
self.bytes_processed = 0
self.total_bytes_processed = 0
self.total_files_processed = 0
self.passes = 0
self.quarantines = 0
self.errors = 0
self.rcache = rcache
self.stats_sizes = sorted(
[int(s) for s in list_from_csv(conf.get('object_size_stats'))])
self.stats_buckets = dict(
[(s, 0) for s in self.stats_sizes + ['OVER']])
self.watchers = [
WatcherWrapper(wdef['klass'], name, wdef['conf'], logger)
for name, wdef in watcher_defs.items()]
logger.debug("%d audit watcher(s) loaded", len(self.watchers))
def create_recon_nested_dict(self, top_level_key, device_list, item):
if device_list:
device_key = ''.join(sorted(device_list))
return {top_level_key: {device_key: item}}
else:
return {top_level_key: item}
def audit_all_objects(self, mode='once', device_dirs=None):
description = ''
if device_dirs:
device_dir_str = ','.join(sorted(device_dirs))
if self.auditor_type == 'ALL':
description = _(' - parallel, %s') % device_dir_str
else:
description = _(' - %s') % device_dir_str
self.logger.info(_('Begin object audit "%(mode)s" mode (%(audi_type)s'
'%(description)s)') %
{'mode': mode, 'audi_type': self.auditor_type,
'description': description})
for watcher in self.watchers:
watcher.start(self.auditor_type)
begin = reported = time.time()
self.total_bytes_processed = 0
self.total_files_processed = 0
total_quarantines = 0
total_errors = 0
time_auditing = 0
# get AuditLocations for each policy
loc_generators = []
for policy in POLICIES:
loc_generators.append(
self.diskfile_router[policy]
.object_audit_location_generator(
policy, device_dirs=device_dirs,
auditor_type=self.auditor_type))
all_locs = round_robin_iter(loc_generators)
for location in all_locs:
loop_time = time.time()
self.failsafe_object_audit(location)
self.logger.timing_since('timing', loop_time)
self.files_running_time = ratelimit_sleep(
self.files_running_time, self.max_files_per_second)
self.total_files_processed += 1
now = time.time()
if now - self.last_logged >= self.log_time:
self.logger.info(_(
'Object audit (%(type)s). '
'Since %(start_time)s: Locally: %(passes)d passed, '
'%(quars)d quarantined, %(errors)d errors, '
'files/sec: %(frate).2f, bytes/sec: %(brate).2f, '
'Total time: %(total).2f, Auditing time: %(audit).2f, '
'Rate: %(audit_rate).2f') % {
'type': '%s%s' % (self.auditor_type, description),
'start_time': time.ctime(reported),
'passes': self.passes, 'quars': self.quarantines,
'errors': self.errors,
'frate': self.passes / (now - reported),
'brate': self.bytes_processed / (now - reported),
'total': (now - begin), 'audit': time_auditing,
'audit_rate': time_auditing / (now - begin)})
cache_entry = self.create_recon_nested_dict(
'object_auditor_stats_%s' % (self.auditor_type),
device_dirs,
{'errors': self.errors, 'passes': self.passes,
'quarantined': self.quarantines,
'bytes_processed': self.bytes_processed,
'start_time': reported, 'audit_time': time_auditing})
dump_recon_cache(cache_entry, self.rcache, self.logger)
reported = now
total_quarantines += self.quarantines
total_errors += self.errors
self.passes = 0
self.quarantines = 0
self.errors = 0
self.bytes_processed = 0
self.last_logged = now
time_auditing += (now - loop_time)
# Avoid divide by zero during very short runs
elapsed = (time.time() - begin) or 0.000001
self.logger.info(_(
'Object audit (%(type)s) "%(mode)s" mode '
'completed: %(elapsed).02fs. Total quarantined: %(quars)d, '
'Total errors: %(errors)d, Total files/sec: %(frate).2f, '
'Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, '
'Rate: %(audit_rate).2f') % {
'type': '%s%s' % (self.auditor_type, description),
'mode': mode, 'elapsed': elapsed,
'quars': total_quarantines + self.quarantines,
'errors': total_errors + self.errors,
'frate': self.total_files_processed / elapsed,
'brate': self.total_bytes_processed / elapsed,
'audit': time_auditing, 'audit_rate': time_auditing / elapsed})
for watcher in self.watchers:
watcher.end()
if self.stats_sizes:
self.logger.info(
_('Object audit stats: %s') % json.dumps(self.stats_buckets))
for policy in POLICIES:
# Unset remaining partitions to not skip them in the next run
self.diskfile_router[policy].clear_auditor_status(
policy,
self.auditor_type)
def record_stats(self, obj_size):
"""
Based on config's object_size_stats will keep track of how many objects
fall into the specified ranges. For example with the following:
object_size_stats = 10, 100, 1024
and your system has 3 objects of sizes: 5, 20, and 10000 bytes the log
will look like: {"10": 1, "100": 1, "1024": 0, "OVER": 1}
"""
for size in self.stats_sizes:
if obj_size <= size:
self.stats_buckets[size] += 1
break
else:
self.stats_buckets["OVER"] += 1
def failsafe_object_audit(self, location):
"""
Entrypoint to object_audit, with a failsafe generic exception handler.
"""
try:
self.object_audit(location)
except (Exception, Timeout):
self.logger.increment('errors')
self.errors += 1
self.logger.exception(_('ERROR Trying to audit %s'), location)
def object_audit(self, location):
"""
Audits the given object location.
:param location: an audit location
(from diskfile.object_audit_location_generator)
"""
def raise_dfq(msg):
raise DiskFileQuarantined(msg)
diskfile_mgr = self.diskfile_router[location.policy]
# this method doesn't normally raise errors, even if the audit
# location does not exist; if this raises an unexpected error it
# will get logged in failsafe
df = diskfile_mgr.get_diskfile_from_audit_location(location)
reader = None
try:
with df.open(modernize=True):
metadata = df.get_metadata()
obj_size = int(metadata['Content-Length'])
if self.stats_sizes:
self.record_stats(obj_size)
if obj_size and not self.zero_byte_only_at_fps:
reader = df.reader(_quarantine_hook=raise_dfq)
if reader:
with closing(reader):
for chunk in reader:
chunk_len = len(chunk)
self.bytes_running_time = ratelimit_sleep(
self.bytes_running_time,
self.max_bytes_per_second,
incr_by=chunk_len)
self.bytes_processed += chunk_len
self.total_bytes_processed += chunk_len
for watcher in self.watchers:
try:
watcher.see_object(
metadata,
df._ondisk_info['data_file'])
except QuarantineRequest:
raise df._quarantine(
df._data_file,
"Requested by %s" % watcher.watcher_name)
except DiskFileQuarantined as err:
self.quarantines += 1
self.logger.error(_('ERROR Object %(obj)s failed audit and was'
' quarantined: %(err)s'),
{'obj': location, 'err': err})
except DiskFileExpired:
pass # ignore expired objects
except DiskFileDeleted:
# If there is a reclaimable tombstone, we'll invalidate the hash
# to trigger the replicator to rehash/cleanup this suffix
ts = df._ondisk_info['ts_info']['timestamp']
if (not self.zero_byte_only_at_fps and
(time.time() - float(ts)) > df.manager.reclaim_age):
df.manager.invalidate_hash(dirname(df._datadir))
except DiskFileNotExist:
pass
self.passes += 1
# _ondisk_info attr is initialized to None and filled in by open
ondisk_info_dict = df._ondisk_info or {}
if 'unexpected' in ondisk_info_dict:
is_rsync_tempfile = lambda fpath: (
diskfile.RE_RSYNC_TEMPFILE.match(basename(fpath)))
rsync_tempfile_paths = filter(is_rsync_tempfile,
ondisk_info_dict['unexpected'])
mtime = time.time() - self.rsync_tempfile_timeout
unlink_paths_older_than(rsync_tempfile_paths, mtime)
class ObjectAuditor(Daemon):
"""Audit objects."""
def __init__(self, conf, logger=None, **options):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-auditor')
self.devices = conf.get('devices', '/srv/node')
self.concurrency = int(conf.get('concurrency', 1))
self.conf_zero_byte_fps = int(
conf.get('zero_byte_files_per_second', 50))
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = join(self.recon_cache_path, "object.recon")
self.interval = int(conf.get('interval', 30))
watcher_names = set(list_from_csv(conf.get('watchers', '')))
# Normally '__file__' is always in config, but tests neglect it often.
watcher_configs = \
parse_prefixed_conf(conf['__file__'], 'object-auditor:watcher:') \
if '__file__' in conf else {}
self.watcher_defs = {}
for name in watcher_names:
self.logger.debug("Loading entry point '%s'", name)
wconf = dict(conf)
wconf.update(watcher_configs.get(name, {}))
self.watcher_defs[name] = {
'conf': wconf,
'klass': load_pkg_resource("swift.object_audit_watcher", name)}
def _sleep(self):
time.sleep(self.interval)
def clear_recon_cache(self, auditor_type):
"""Clear recon cache entries"""
dump_recon_cache({'object_auditor_stats_%s' % auditor_type: {}},
self.rcache, self.logger)
def run_audit(self, **kwargs):
"""Run the object audit"""
mode = kwargs.get('mode')
zero_byte_only_at_fps = kwargs.get('zero_byte_fps', 0)
device_dirs = kwargs.get('device_dirs')
worker = AuditorWorker(self.conf, self.logger, self.rcache,
self.devices,
zero_byte_only_at_fps=zero_byte_only_at_fps,
watcher_defs=self.watcher_defs)
worker.audit_all_objects(mode=mode, device_dirs=device_dirs)
def fork_child(self, zero_byte_fps=False, sleep_between_zbf_scanner=False,
**kwargs):
"""Child execution"""
pid = os.fork()
if pid:
return pid
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
if zero_byte_fps:
kwargs['zero_byte_fps'] = self.conf_zero_byte_fps
if sleep_between_zbf_scanner:
self._sleep()
try:
self.run_audit(**kwargs)
except Exception as e:
self.logger.exception(
_("ERROR: Unable to run auditing: %s") % e)
finally:
sys.exit()
def audit_loop(self, parent, zbo_fps, override_devices=None, **kwargs):
"""Parallel audit loop"""
self.clear_recon_cache('ALL')
self.clear_recon_cache('ZBF')
once = kwargs.get('mode') == 'once'
kwargs['device_dirs'] = override_devices
if parent:
kwargs['zero_byte_fps'] = zbo_fps
self.run_audit(**kwargs)
else:
pids = set()
if self.conf_zero_byte_fps:
zbf_pid = self.fork_child(zero_byte_fps=True, **kwargs)
pids.add(zbf_pid)
if self.concurrency == 1:
# Audit all devices in 1 process
pids.add(self.fork_child(**kwargs))
else:
# Divide devices amongst parallel processes set by
# self.concurrency. Total number of parallel processes
# is self.concurrency + 1 if zero_byte_fps.
parallel_proc = self.concurrency + 1 if \
self.conf_zero_byte_fps else self.concurrency
device_list = list(override_devices) if override_devices else \
listdir(self.devices)
shuffle(device_list)
while device_list:
pid = None
if len(pids) == parallel_proc:
pid = os.wait()[0]
pids.discard(pid)
if self.conf_zero_byte_fps and pid == zbf_pid and once:
# If we're only running one pass and the ZBF scanner
# finished, don't bother restarting it.
zbf_pid = -100
elif self.conf_zero_byte_fps and pid == zbf_pid:
# When we're running forever, the ZBF scanner must
# be restarted as soon as it finishes.
kwargs['device_dirs'] = override_devices
# sleep between ZBF scanner forks
self._sleep()
zbf_pid = self.fork_child(zero_byte_fps=True, **kwargs)
pids.add(zbf_pid)
else:
kwargs['device_dirs'] = [device_list.pop()]
pids.add(self.fork_child(**kwargs))
while pids:
pid = os.wait()[0]
# ZBF scanner must be restarted as soon as it finishes
# unless we're in run-once mode
if self.conf_zero_byte_fps and pid == zbf_pid and \
len(pids) > 1 and not once:
kwargs['device_dirs'] = override_devices
# sleep between ZBF scanner forks
zbf_pid = self.fork_child(zero_byte_fps=True,
sleep_between_zbf_scanner=True,
**kwargs)
pids.add(zbf_pid)
pids.discard(pid)
def run_forever(self, *args, **kwargs):
"""Run the object audit until stopped."""
# zero byte only command line option
zbo_fps = kwargs.get('zero_byte_fps', 0)
parent = False
if zbo_fps:
# only start parent
parent = True
kwargs = {'mode': 'forever'}
while True:
try:
self.audit_loop(parent, zbo_fps, **kwargs)
except (Exception, Timeout) as err:
self.logger.exception(_('ERROR auditing: %s'), err)
self._sleep()
def run_once(self, *args, **kwargs):
"""Run the object audit once"""
# zero byte only command line option
zbo_fps = kwargs.get('zero_byte_fps', 0)
override_devices = list_from_csv(kwargs.get('devices'))
# Remove bogus entries and duplicates from override_devices
override_devices = list(
set(listdir(self.devices)).intersection(set(override_devices)))
parent = False
if zbo_fps:
# only start parent
parent = True
kwargs = {'mode': 'once'}
try:
self.audit_loop(parent, zbo_fps, override_devices=override_devices,
**kwargs)
except (Exception, Timeout) as err:
self.logger.exception(_('ERROR auditing: %s'), err)
class WatcherWrapper(object):
"""
Run the user-supplied watcher.
Simple and gets the job done. Note that we aren't doing anything
to isolate ourselves from hangs or file descriptor leaks
in the plugins.
"""
def __init__(self, watcher_class, watcher_name, conf, logger):
self.watcher_name = watcher_name
self.watcher_in_error = False
self.logger = PrefixLoggerAdapter(logger, {})
self.logger.set_prefix('[audit-watcher %s] ' % watcher_name)
try:
self.watcher = watcher_class(conf, self.logger)
except (Exception, Timeout):
self.logger.exception('Error intializing watcher')
self.watcher_in_error = True
def start(self, audit_type):
if self.watcher_in_error:
return # can't trust the state of the thing; bail
try:
self.watcher.start(audit_type=audit_type)
except (Exception, Timeout):
self.logger.exception('Error starting watcher')
self.watcher_in_error = True
def see_object(self, meta, data_file_path):
if self.watcher_in_error:
return # can't trust the state of the thing; bail
kwargs = {'object_metadata': meta,
'data_file_path': data_file_path}
try:
self.watcher.see_object(**kwargs)
except QuarantineRequest:
# Avoid extra logging.
raise
except (Exception, Timeout):
self.logger.exception(
'Error in see_object(meta=%r, data_file_path=%r)',
meta, data_file_path)
# Do *not* flag watcher as being in an error state; a failure
# to process one object shouldn't impact the ability to process
# others.
def end(self):
if self.watcher_in_error:
return # can't trust the state of the thing; bail
kwargs = {}
try:
self.watcher.end(**kwargs)
except (Exception, Timeout):
self.logger.exception('Error ending watcher')
self.watcher_in_error = True
| swiftstack/swift | swift/obj/auditor.py | Python | apache-2.0 | 23,402 |
import squeakspace.common.util as ut
import squeakspace.common.util_http as ht
import squeakspace.proxy.server.db_sqlite3 as db
import squeakspace.common.squeak_ex as ex
import config
def post_handler(environ):
query = ht.parse_post_request(environ)
cookies = ht.parse_cookies(environ)
user_id = ht.get_required_cookie(cookies, 'user_id')
session_id = ht.get_required_cookie(cookies, 'session_id')
node_name = ht.get_required(query, 'node_name')
to_user = ht.get_required(query, 'to_user')
to_user_key_hash = ht.get_optional(query, 'to_user_key_hash')
from_user_key_hash = ht.get_optional(query, 'from_user_key_hash')
message = ht.get_required(query, 'message')
passphrase = ht.get_optional(query, 'passphrase')
force_encryption = ht.convert_bool(ht.get_optional(query, 'force_encryption'), 'force_encryption')
conn = db.connect(config.db_path)
try:
c = db.cursor(conn)
resp, local_gen = db.send_message(c, user_id, session_id,
node_name, to_user, to_user_key_hash, from_user_key_hash,
message, passphrase, force_encryption)
(message_id, timestamp, message_hash, from_signature, proof_of_work) = local_gen
db.commit(conn)
raise ht.ok_json({'status' : 'ok',
'resp' : resp,
'message_id' : message_id,
'timestamp' : timestamp,
'message_hash' : message_hash,
'from_signature' : from_signature,
'proof_of_work' : proof_of_work})
except ex.SqueakException as e:
raise ht.convert_squeak_exception(e)
finally:
db.close(conn)
def get_handler(environ):
query = ht.parse_get_request(environ)
cookies = ht.parse_cookies(environ)
user_id = ht.get_required_cookie(cookies, 'user_id')
session_id = ht.get_required_cookie(cookies, 'session_id')
node_name = ht.get_required(query, 'node_name')
message_id = ht.get_required(query, 'message_id')
public_key_hash = ht.get_required(query, 'public_key_hash')
passphrase = ht.get_optional(query, 'passphrase')
to_key_passphrase = ht.get_optional(query, 'to_key_passphrase')
decrypt_message = ht.convert_bool(ht.get_optional(query, 'decrypt_message'), 'decrypt_message')
conn = db.connect(config.db_path)
try:
c = db.cursor(conn)
resp, validation = db.read_message(c, user_id, session_id, node_name, message_id,
public_key_hash, passphrase,
to_key_passphrase, decrypt_message)
raise ht.ok_json({'status' : 'ok',
'resp' : resp,
'validation' : validation})
except ex.SqueakException as e:
raise ht.convert_squeak_exception(e)
finally:
db.close(conn)
def delete_handler(environ):
query = ht.parse_post_request(environ)
cookies = ht.parse_cookies(environ)
user_id = ht.get_required_cookie(cookies, 'user_id')
session_id = ht.get_required_cookie(cookies, 'session_id')
node_name = ht.get_required(query, 'node_name')
message_id = ht.get_required(query, 'message_id')
public_key_hash = ht.get_required(query, 'public_key_hash')
passphrase = ht.get_optional(query, 'passphrase')
conn = db.connect(config.db_path)
try:
c = db.cursor(conn)
resp = db.delete_message(c, user_id, session_id, node_name, message_id, public_key_hash, passphrase)
db.commit(conn)
raise ht.ok_json({'status' : 'ok', 'resp' : resp})
except ex.SqueakException as e:
raise ht.convert_squeak_exception(e)
finally:
db.close(conn)
def main_handler(environ):
ht.dispatch_on_method(environ, {
'POST' : post_handler,
'GET' : get_handler,
'DELETE' : delete_handler})
def application(environ, start_response):
return ht.respond_with_handler(environ, start_response, main_handler)
| eek6/squeakspace | www/proxy/scripts/proxy/message.py | Python | gpl-3.0 | 4,132 |
import httplib
import json
import urlparse
import error
from six import text_type
"""Implements HTTP transport for the WebDriver wire protocol."""
class Response(object):
"""
Describes an HTTP response received from a remote end whose
body has been read and parsed as appropriate.
"""
def __init__(self, status, body):
self.status = status
self.body = body
def __repr__(self):
cls_name = self.__class__.__name__
if self.error:
return "<%s status=%s error=%s>" % (cls_name, self.status, repr(self.error))
return "<% status=%s body=%s>" % (cls_name, self.status, json.dumps(self.body))
def __str__(self):
return json.dumps(self.body, indent=2)
@property
def error(self):
if self.status != 200:
return error.from_response(self)
return None
@classmethod
def from_http(cls, http_response, decoder=json.JSONDecoder, **kwargs):
try:
body = json.load(http_response, cls=decoder, **kwargs)
except ValueError:
raise ValueError("Failed to decode response body as JSON:\n" +
http_response.read())
return cls(http_response.status, body)
class HTTPWireProtocol(object):
"""
Transports messages (commands and responses) over the WebDriver
wire protocol.
Complex objects, such as ``webdriver.Element``, ``webdriver.Frame``,
and ``webdriver.Window`` are by default not marshaled to enable
use of `session.transport.send` in WPT tests::
session = webdriver.Session("127.0.0.1", 4444)
response = transport.send("GET", "element/active", None)
print response.body["value"]
# => {u'element-6066-11e4-a52e-4f735466cecf': u'<uuid>'}
Automatic marshaling is provided by ``webdriver.protocol.Encoder``
and ``webdriver.protocol.Decoder``, which can be passed in to
``HTTPWireProtocol.send`` along with a reference to the current
``webdriver.Session``::
session = webdriver.Session("127.0.0.1", 4444)
response = transport.send("GET", "element/active", None,
encoder=protocol.Encoder, decoder=protocol.Decoder,
session=session)
print response.body["value"]
# => webdriver.Element
"""
def __init__(self, host, port, url_prefix="/", timeout=None):
"""
Construct interface for communicating with the remote server.
:param url: URL of remote WebDriver server.
:param wait: Duration to wait for remote to appear.
"""
self.host = host
self.port = port
self.url_prefix = url_prefix
self._timeout = timeout
def url(self, suffix):
return urlparse.urljoin(self.url_prefix, suffix)
def send(self,
method,
uri,
body=None,
headers=None,
encoder=json.JSONEncoder,
decoder=json.JSONDecoder,
**codec_kwargs):
"""
Send a command to the remote.
The request `body` must be JSON serialisable unless a
custom `encoder` has been provided. This means complex
objects such as ``webdriver.Element``, ``webdriver.Frame``,
and `webdriver.Window`` are not automatically made
into JSON. This behaviour is, however, provided by
``webdriver.protocol.Encoder``, should you want it.
Similarly, the response body is returned au natural
as plain JSON unless a `decoder` that converts web
element references to ``webdriver.Element`` is provided.
Use ``webdriver.protocol.Decoder`` to achieve this behaviour.
:param method: `GET`, `POST`, or `DELETE`.
:param uri: Relative endpoint of the requests URL path.
:param body: Body of the request. Defaults to an empty
dictionary if ``method`` is `POST`.
:param headers: Additional dictionary of headers to include
in the request.
:param encoder: JSON encoder class, which defaults to
``json.JSONEncoder`` unless specified.
:param decoder: JSON decoder class, which defaults to
``json.JSONDecoder`` unless specified.
:param codec_kwargs: Surplus arguments passed on to `encoder`
and `decoder` on construction.
:return: Instance of ``webdriver.transport.Response``
describing the HTTP response received from the remote end.
:raises ValueError: If `body` or the response body are not
JSON serialisable.
"""
if body is None and method == "POST":
body = {}
payload = None
if body is not None:
try:
payload = json.dumps(body, cls=encoder, **codec_kwargs)
except ValueError:
raise ValueError("Failed to encode request body as JSON:\n"
"%s" % json.dumps(body, indent=2))
if isinstance(payload, text_type):
payload = body.encode("utf-8")
if headers is None:
headers = {}
headers.update({'Connection': 'keep-alive'})
url = self.url(uri)
conn_kwargs = {}
if self._timeout is not None:
conn_kwargs["timeout"] = self._timeout
conn = httplib.HTTPConnection(
self.host, self.port, strict=True, **conn_kwargs)
conn.request(method, url, payload, headers)
try:
response = conn.getresponse()
return Response.from_http(
response, decoder=decoder, **codec_kwargs)
finally:
conn.close()
| danlrobertson/servo | tests/wpt/web-platform-tests/tools/webdriver/webdriver/transport.py | Python | mpl-2.0 | 5,652 |
import state as st
import yaml
class Graph:
'''
This class represents a graph by a list of states (its nodes) and a list of
the letters (the alphabet) from which it is constructed. As the states
contain the information about the edges, a list of them fully describe a
graph.
There are methods to read a list of states from a file and to save it in the
same format.
'''
def __init__(self, states=[], alphabet=[], path=''):
if path:
self.open_graph_file(path)
else:
self.states = states #Graph's list of states
self.alphabet = alphabet #List of letters representing the alphabet
'''
Name: save_graph_file
Input:
*path: file path where the graph file will be saved.
Description:
Saves the graph as a yaml file. The file will contain a list of saved
states and the graph alphabet. The saved state list will contain tuples
of (state name, state outedges). The state outedges have to be modified
as cannot save the whole state. The destination state in the
outedges tuple is replaced just by the name of the state. The dest state
will be recovered in the open graph file function.
'''
def save_graph_file(self, path):
savestates = []
for s in self.states:
s_state = s.serialize()
savestates.append(s_state)
save_graph = [savestates, self.alphabet]
with open(path, 'w') as file_:
yaml.dump(save_graph, file_)
return
'''
Name: open_graph_file
Input:
*path: file path where the graph file is saved.
Description:
Opens a graph saved in yaml format described above. To recover the dest
state for each outedge, the state whose name is in the saved outedge
will be searched for in the states list and it will substitute its name
in the outedge.
'''
def open_graph_file(self, path):
with open(path, 'r') as file_:
savedstates, alph = yaml.load(file_)
self.alphabet = alph
states = []
for x in savedstates:
edges = []
for y in x[1]:
edges.append(tuple(y))
s = st.State(x[0], edges)
states.append(s)
states = self.reassign_dest_edges(states)
self.states = states
return
'''
Name: reassign_dest_edges
Input:
*states: list of states with their outedges pointing only to a
destination state name, but not the whole state
Output:
*states: the corrected list, with the outedges correctly pointing to
the destination state.
'''
@staticmethod
def reassign_dest_edges(states):
for s in states:
new_outedges = []
for e in s.outedges:
e_dest = [x for x in states if x.name == e[1]]
if e_dest:
e_dest = e_dest[0]
else:
e_dest = None
new_e = []
i = 0
for element in e:
if i == 1:
new_e.append(e_dest)
else:
new_e.append(element)
i += 1
new_e = tuple(new_e)
new_outedges.append(new_e)
s.outedges = new_outedges
return states
'''
Name: root
output:
*The root of a rooted tree with probabilitites.
Description:
This function considers the element with label of length 0 as the root
of a rooted tree with probabilities. The function will search for and
return it.
'''
def root(self):
for s in self.states:
if s.name == 'e':
return s
else:
return st.State("", [])
'''
Name: state_named
Input:
*state_name: the name of the state which should be retrieved
Output:
*state: the state with the name given as input
Description:
Receives a state name as input, searches for a state with this name if
it is found, returns None if it is not.
'''
def state_named(self, state_name):
state_names = [x.name for x in self.states]
if state_name in state_names:
i = state_names.index(state_name)
return self.states[i]
else:
return None
'''
Name: remove_unreachable_states
Input: A graph described by the class Graph
Output: A graph where all unreachable states of the input graph are removed.
Description: The algorithm goes through all the outedges from each state of
the graph. Based on the outedges, it creates a list with all destination
states. It then proceeds to run through the graph's states and create a list
of them which only includes states whose names are on the list of reachable
states. The alphabet is updated accordingly. With those two elements, a new
reduced graph is created and returned.
'''
def remove_unreachable_states(self):
old_size = len(self.states)
reachable_states = [] # This will receive the reachable states' names
# Creates a list of all states' outedges:
aux = [x.outedges for x in self.states]
for outedges in aux: # Goes through each state's outedge list
for outedge in outedges: # Goes through each outedge in the outedge list
# Checks if the destination state of the current outedge is already
# in the list:
if outedge[1]:
if outedge[1].name not in reachable_states:
# If it is not, it is considered as a new reachable state.
reachable_states.append(outedge[1].name)
# A new list of states is created only with states whose names are in
# reachableStates
new_states = [x for x in self.states if x.name in reachable_states]
# List of outedges lists of the new states:
aux = [x.outedges for x in new_states]
new_alphabet = [] # Receives the new alphabet
for outedges in aux: # Goes through each state's outedge list
for outedge in outedges: # Goes through each outedge in the outedge list
# Checks if the outedge label is already in the alphabet:
if outedge[0] not in new_alphabet:
# If it's not, it is included to the new alphabet.
new_alphabet.append(outedge[0])
# Creates a new graph, without previous unreachable states:
reduced_graph = Graph(new_states, new_alphabet)
new_size = len(reduced_graph.states)
if (old_size != new_size):
reduced_graph = reduced_graph.remove_unreachable_states()
return reduced_graph
def __str__(self):
for s in self.states:
print s
r = '****************************************\n'
r += 'Number of states: ' + str(len(self.states)) + '\n'
return r
def print_state_named(self, n):
s = self.state_named(n)
print s
| franchenstein/master_project | graph.py | Python | mit | 7,296 |
"""Base material for signature backends."""
from django.urls import reverse
class SignatureBackend(object):
"""Encapsulate signature workflow and integration with vendor backend.
Here is a typical workflow:
* :class:`~django_anysign.models.SignatureType` instance is created. It
encapsulates the backend type and its configuration.
* A :class:`~django_anysign.models.Signature` instance is created.
The signature instance has a signature type attribute, hence a backend.
* Signers are notified, by email, text or whatever. They get an hyperlink
to the "signer view". The URL may vary depending on the signature
backend.
* A signer goes to the backend's "signer view" entry point: typically a
view that integrates backend specific form to sign a document.
* Most backends have a "notification view", for the third-party service to
signal updates.
* Most backends have a "signer return view", where the signer is redirected
when he ends the signature process (whatever signature status).
* The backend's specific workflow can be made of several views. At the
beginning, there is a Signature instance which carries data (typically a
document). At the end, Signature is done.
"""
def __init__(self, name, code, url_namespace='anysign', **kwargs):
"""Configure backend."""
#: Human-readable name.
self.name = name
#: Machine-readable name. Should be lowercase alphanumeric only, i.e.
#: PEP-8 compliant.
self.code = code
#: Namespace for URL resolution.
self.url_namespace = url_namespace
def send_signature(self, signature):
"""Initiate the signature process.
At this state, the signature object has been configured.
Typical implementation consists in sending signer URL to first signer.
Raise ``NotImplementedError`` if the backend does not support such a
feature.
"""
raise NotImplementedError()
def get_signer_url(self, signer):
"""Return URL where signer signs document.
Raise ``NotImplementedError`` in case the backend does not support
"signer view" feature.
Default implementation reverses :meth:`get_signer_url_name` with
``signer.pk`` as argument.
"""
return reverse(self.get_signer_url_name(), args=[signer.pk])
def get_signer_url_name(self):
"""Return URL name where signer signs document.
Raise ``NotImplementedError`` in case the backend does not support
"signer view" feature.
Default implementation returns ``anysign:signer``.
"""
return '{ns}:signer'.format(ns=self.url_namespace)
def get_signer_return_url(self, signer):
"""Return absolute URL where signer is redirected after signing.
The URL must be **absolute** because it is typically used by external
signature service: the signer uses external web UI to sign the
document(s) and then the signature service redirects the signer to
(this) `Django` website.
Raise ``NotImplementedError`` in case the backend does not support
"signer return view" feature.
Default implementation reverses :meth:`get_signer_return_url_name`
with ``signer.pk`` as argument.
"""
return reverse(
self.get_signer_return_url_name(),
args=[signer.pk])
def get_signer_return_url_name(self):
"""Return URL name where signer is redirected once document has been
signed.
Raise ``NotImplementedError`` in case the backend does not support
"signer return view" feature.
Default implementation returns ``anysign:signer_return``.
"""
return '{ns}:signer_return'.format(ns=self.url_namespace)
def get_signature_callback_url(self, signature):
"""Return URL where backend can post signature notifications.
Raise ``NotImplementedError`` in case the backend does not support
"signature callback url" feature.
Default implementation reverses :meth:`get_signature_callback_url_name`
with ``signature.pk`` as argument.
"""
return reverse(
self.get_signature_callback_url_name(),
args=[signature.pk])
def get_signature_callback_url_name(self):
"""Return URL name where backend can post signature notifications.
Raise ``NotImplementedError`` in case the backend does not support
"signer return view" feature.
Default implementation returns ``anysign:signature_callback``.
"""
return '{ns}:signature_callback'.format(ns=self.url_namespace)
def create_signature(self, signature):
"""Register ``signature`` in backend, return updated object.
This method is typically called by views which create
:class:`~django_anysign.models.Signature` instances.
If backend stores a signature object, then implementation should update
:attr:`~django_anysign.models.Signature.signature_backend_id`.
Base implementation does nothing: override this method in backends.
"""
return signature
| novafloss/django-anysign | django_anysign/backend.py | Python | bsd-3-clause | 5,255 |
'''
Created on Jul 14, 2011
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
import os
import argparse
from chimerascan.lib.seq import get_qual_conversion_func
from chimerascan.lib.base import parse_lines, open_compressed
import chimerascan.lib.config as config
def process_input_reads(fastq_files, output_prefix, quals, trim5, trim3):
"""
uncompresses reads, renames reads, and converts quality scores
to 'sanger' format
"""
# setup file iterators for input fastq files
infhs = [open_compressed(f) for f in fastq_files]
fqiters = [parse_lines(f, numlines=4) for f in infhs]
# setup output files
output_files = [(output_prefix + "_%d.fq" % (x+1))
for x in xrange(len(fastq_files))]
outfhs = [open(f, "w") for f in output_files]
read_name_file = output_prefix + ".txt"
read_name_fh = open(read_name_file, 'w')
# get quality score conversion function
qual_func = get_qual_conversion_func(quals)
linenum = 1
try:
while True:
pelines = [it.next() for it in fqiters]
# get read1 first line of fq record, and remove "@" symbol
read1_name = pelines[0][0][1:]
# remove whitespace and/or read number tags /1 or /2
read1_name = read1_name.split()[0].split("/")[0]
# write to read name database
print >>read_name_fh, read1_name
# convert reads
for i,lines in enumerate(pelines):
# rename read using line number
lines[0] = "@%d/%d" % (linenum,i+1)
# ignore redundant header
lines[2] = "+"
# trim read
total_length = len(lines[1])
pos3p = max(trim5+1, total_length - trim3)
lines[1] = lines[1][trim5:pos3p]
lines[3] = lines[3][trim5:pos3p]
# convert quality score to sanger
lines[3] = qual_func(lines[3])
print >>outfhs[i], '\n'.join(lines)
linenum += 1
except StopIteration:
pass
except:
logging.error("Unexpected error during FASTQ file processing")
for fh in outfhs:
fh.close()
read_name_fh.close()
for f in output_files:
if os.path.exists(f):
os.remove(f)
if os.path.exists(read_name_file):
os.remove(read_name_file)
return config.JOB_ERROR
# cleanup
for fh in infhs:
fh.close()
for fh in outfhs:
fh.close()
read_name_fh.close()
logging.debug("Inspected %d fragments" % (linenum))
return config.JOB_SUCCESS
def main():
logging.basicConfig(level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
parser = argparse.ArgumentParser()
parser.add_argument("--quals", dest="quals", choices=["sanger", "solexa", "illumina"],
default="sanger")
parser.add_argument("--trim5", dest="trim5", type=int, default=0)
parser.add_argument("--trim3", dest="trim3", type=int, default=0)
parser.add_argument("output_prefix")
parser.add_argument("fastq_files", nargs="+")
args = parser.parse_args()
process_input_reads(args.fastq_files, args.output_prefix, args.quals, args.trim5, args.trim3)
if __name__ == '__main__':
main()
| madhavsuresh/chimerascan | chimerascan/pipeline/process_input_reads.py | Python | gpl-3.0 | 4,077 |
import pygame
import random
# Window dimensions
width = 640
height = 400
screen = pygame.display.set_mode((width, height))
clock = pygame.time.Clock()
running = True
while running:
x = random.randint(0, width-1)
y = random.randint(0, height-1)
red = random.randint(0, 255)
green = random.randint(0, 255)
blue = random.randint(0, 255)
screen.set_at((x, y), (red, green, blue))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.display.flip()
clock.tick(240)
| vicente-gonzalez-ruiz/YAPT | multimedia/Pygame/test_examples/random_pixel.py | Python | cc0-1.0 | 553 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import nowdate, cstr, flt, cint, now, getdate
from frappe import throw, _
from frappe.utils import formatdate
# imported to enable erpnext.accounts.utils.get_account_currency
from erpnext.accounts.doctype.account.account import get_account_currency
import frappe.defaults
from erpnext.accounts.report.financial_statements import sort_root_accounts
class FiscalYearError(frappe.ValidationError): pass
@frappe.whitelist()
def get_fiscal_year(date=None, fiscal_year=None, label="Date", verbose=1, company=None, as_dict=False):
return get_fiscal_years(date, fiscal_year, label, verbose, company, as_dict=as_dict)[0]
def get_fiscal_years(transaction_date=None, fiscal_year=None, label="Date", verbose=1, company=None, as_dict=False):
# if year start date is 2012-04-01, year end date should be 2013-03-31 (hence subdate)
cond = " disabled = 0"
if fiscal_year:
cond += " and fy.name = %(fiscal_year)s"
else:
cond += " and %(transaction_date)s >= fy.year_start_date and %(transaction_date)s <= fy.year_end_date"
if company:
cond += """ and (not exists(select name from `tabFiscal Year Company` fyc where fyc.parent = fy.name)
or exists(select company from `tabFiscal Year Company` fyc where fyc.parent = fy.name and fyc.company=%(company)s ))"""
fy = frappe.db.sql("""select fy.name, fy.year_start_date, fy.year_end_date from `tabFiscal Year` fy
where %s order by fy.year_start_date desc""" % cond, {
"fiscal_year": fiscal_year,
"transaction_date": transaction_date,
"company": company
}, as_dict=as_dict)
if not fy:
error_msg = _("""{0} {1} not in any active Fiscal Year. For more details check {2}.""").format(label, formatdate(transaction_date), "https://frappe.github.io/erpnext/user/manual/en/accounts/articles/fiscal-year-error")
if verbose==1: frappe.msgprint(error_msg)
raise FiscalYearError, error_msg
return fy
def validate_fiscal_year(date, fiscal_year, label=_("Date"), doc=None):
years = [f[0] for f in get_fiscal_years(date, label=label)]
if fiscal_year not in years:
if doc:
doc.fiscal_year = years[0]
else:
throw(_("{0} '{1}' not in Fiscal Year {2}").format(label, formatdate(date), fiscal_year))
@frappe.whitelist()
def get_balance_on(account=None, date=None, party_type=None, party=None, company=None, in_account_currency=True):
if not account and frappe.form_dict.get("account"):
account = frappe.form_dict.get("account")
if not date and frappe.form_dict.get("date"):
date = frappe.form_dict.get("date")
if not party_type and frappe.form_dict.get("party_type"):
party_type = frappe.form_dict.get("party_type")
if not party and frappe.form_dict.get("party"):
party = frappe.form_dict.get("party")
cond = []
if date:
cond.append("posting_date <= '%s'" % frappe.db.escape(cstr(date)))
else:
# get balance of all entries that exist
date = nowdate()
try:
year_start_date = get_fiscal_year(date, verbose=0)[1]
except FiscalYearError:
if getdate(date) > getdate(nowdate()):
# if fiscal year not found and the date is greater than today
# get fiscal year for today's date and its corresponding year start date
year_start_date = get_fiscal_year(nowdate(), verbose=1)[1]
else:
# this indicates that it is a date older than any existing fiscal year.
# hence, assuming balance as 0.0
return 0.0
if account:
acc = frappe.get_doc("Account", account)
if not frappe.flags.ignore_account_permission:
acc.check_permission("read")
# for pl accounts, get balance within a fiscal year
if acc.report_type == 'Profit and Loss':
cond.append("posting_date >= '%s' and voucher_type != 'Period Closing Voucher'" \
% year_start_date)
# different filter for group and ledger - improved performance
if acc.is_group:
cond.append("""exists (
select name from `tabAccount` ac where ac.name = gle.account
and ac.lft >= %s and ac.rgt <= %s
)""" % (acc.lft, acc.rgt))
# If group and currency same as company,
# always return balance based on debit and credit in company currency
if acc.account_currency == frappe.db.get_value("Company", acc.company, "default_currency"):
in_account_currency = False
else:
cond.append("""gle.account = "%s" """ % (frappe.db.escape(account, percent=False), ))
if party_type and party:
cond.append("""gle.party_type = "%s" and gle.party = "%s" """ %
(frappe.db.escape(party_type), frappe.db.escape(party, percent=False)))
if company:
cond.append("""gle.company = "%s" """ % (frappe.db.escape(company, percent=False)))
if account or (party_type and party):
if in_account_currency:
select_field = "sum(debit_in_account_currency) - sum(credit_in_account_currency)"
else:
select_field = "sum(debit) - sum(credit)"
bal = frappe.db.sql("""
SELECT {0}
FROM `tabGL Entry` gle
WHERE {1}""".format(select_field, " and ".join(cond)))[0][0]
# if bal is None, return 0
return flt(bal)
def get_count_on(account, fieldname, date):
cond = []
if date:
cond.append("posting_date <= '%s'" % frappe.db.escape(cstr(date)))
else:
# get balance of all entries that exist
date = nowdate()
try:
year_start_date = get_fiscal_year(date, verbose=0)[1]
except FiscalYearError:
if getdate(date) > getdate(nowdate()):
# if fiscal year not found and the date is greater than today
# get fiscal year for today's date and its corresponding year start date
year_start_date = get_fiscal_year(nowdate(), verbose=1)[1]
else:
# this indicates that it is a date older than any existing fiscal year.
# hence, assuming balance as 0.0
return 0.0
if account:
acc = frappe.get_doc("Account", account)
if not frappe.flags.ignore_account_permission:
acc.check_permission("read")
# for pl accounts, get balance within a fiscal year
if acc.report_type == 'Profit and Loss':
cond.append("posting_date >= '%s' and voucher_type != 'Period Closing Voucher'" \
% year_start_date)
# different filter for group and ledger - improved performance
if acc.is_group:
cond.append("""exists (
select name from `tabAccount` ac where ac.name = gle.account
and ac.lft >= %s and ac.rgt <= %s
)""" % (acc.lft, acc.rgt))
# If group and currency same as company,
# always return balance based on debit and credit in company currency
if acc.account_currency == frappe.db.get_value("Company", acc.company, "default_currency"):
in_account_currency = False
else:
cond.append("""gle.account = "%s" """ % (frappe.db.escape(account, percent=False), ))
entries = frappe.db.sql("""
SELECT name, posting_date, account, party_type, party,debit,credit,
voucher_type, voucher_no, against_voucher_type, against_voucher
FROM `tabGL Entry` gle
WHERE {0}""".format(" and ".join(cond)), as_dict=True)
count = 0
for gle in entries:
if fieldname not in ('invoiced_amount','payables'):
count += 1
else:
dr_or_cr = "debit" if fieldname == "invoiced_amount" else "credit"
cr_or_dr = "credit" if fieldname == "invoiced_amount" else "debit"
select_fields = "ifnull(sum(credit-debit),0)" if fieldname == "invoiced_amount" else "ifnull(sum(debit-credit),0)"
if ((not gle.against_voucher) or (gle.against_voucher_type in ["Sales Order", "Purchase Order"]) or
(gle.against_voucher==gle.voucher_no and gle.get(dr_or_cr) > 0)):
payment_amount = frappe.db.sql("""
SELECT {0}
FROM `tabGL Entry` gle
WHERE docstatus < 2 and posting_date <= %(date)s and against_voucher = %(voucher_no)s
and party = %(party)s and name != %(name)s""".format(select_fields),
{"date": date, "voucher_no": gle.voucher_no, "party": gle.party, "name": gle.name})[0][0]
outstanding_amount = flt(gle.get(dr_or_cr)) - flt(gle.get(cr_or_dr)) - payment_amount
currency_precision = get_currency_precision() or 2
if abs(flt(outstanding_amount)) > 0.1/10**currency_precision:
count += 1
return count
@frappe.whitelist()
def add_ac(args=None):
if not args:
args = frappe.local.form_dict
args.pop("cmd")
ac = frappe.new_doc("Account")
if args.get("ignore_permissions"):
ac.flags.ignore_permissions = True
args.pop("ignore_permissions")
ac.update(args)
if not ac.parent_account:
ac.parent_account = args.get("parent")
ac.old_parent = ""
ac.freeze_account = "No"
if cint(ac.get("is_root")):
ac.parent_account = None
ac.flags.ignore_mandatory = True
ac.insert()
return ac.name
@frappe.whitelist()
def add_cc(args=None):
if not args:
args = frappe.local.form_dict
args.pop("cmd")
cc = frappe.new_doc("Cost Center")
cc.update(args)
if not cc.parent_cost_center:
cc.parent_cost_center = args.get("parent")
cc.old_parent = ""
cc.insert()
return cc.name
def reconcile_against_document(args):
"""
Cancel JV, Update aginst document, split if required and resubmit jv
"""
for d in args:
check_if_advance_entry_modified(d)
validate_allocated_amount(d)
# cancel advance entry
doc = frappe.get_doc(d.voucher_type, d.voucher_no)
doc.make_gl_entries(cancel=1, adv_adj=1)
# update ref in advance entry
if d.voucher_type == "Journal Entry":
update_reference_in_journal_entry(d, doc)
else:
update_reference_in_payment_entry(d, doc)
# re-submit advance entry
doc = frappe.get_doc(d.voucher_type, d.voucher_no)
doc.make_gl_entries(cancel = 0, adv_adj =1)
def check_if_advance_entry_modified(args):
"""
check if there is already a voucher reference
check if amount is same
check if jv is submitted
"""
ret = None
if args.voucher_type == "Journal Entry":
ret = frappe.db.sql("""
select t2.{dr_or_cr} from `tabJournal Entry` t1, `tabJournal Entry Account` t2
where t1.name = t2.parent and t2.account = %(account)s
and t2.party_type = %(party_type)s and t2.party = %(party)s
and (t2.reference_type is null or t2.reference_type in ("", "Sales Order", "Purchase Order"))
and t1.name = %(voucher_no)s and t2.name = %(voucher_detail_no)s
and t1.docstatus=1 """.format(dr_or_cr = args.get("dr_or_cr")), args)
else:
party_account_field = "paid_from" if args.party_type == "Customer" else "paid_to"
if args.voucher_detail_no:
ret = frappe.db.sql("""select t1.name
from `tabPayment Entry` t1, `tabPayment Entry Reference` t2
where
t1.name = t2.parent and t1.docstatus = 1
and t1.name = %(voucher_no)s and t2.name = %(voucher_detail_no)s
and t1.party_type = %(party_type)s and t1.party = %(party)s and t1.{0} = %(account)s
and t2.reference_doctype in ("", "Sales Order", "Purchase Order")
and t2.allocated_amount = %(unadjusted_amount)s
""".format(party_account_field), args)
else:
ret = frappe.db.sql("""select name from `tabPayment Entry`
where
name = %(voucher_no)s and docstatus = 1
and party_type = %(party_type)s and party = %(party)s and {0} = %(account)s
and unallocated_amount = %(unadjusted_amount)s
""".format(party_account_field), args)
if not ret:
throw(_("""Payment Entry has been modified after you pulled it. Please pull it again."""))
def validate_allocated_amount(args):
if args.get("allocated_amount") < 0:
throw(_("Allocated amount can not be negative"))
elif args.get("allocated_amount") > args.get("unadjusted_amount"):
throw(_("Allocated amount can not greater than unadjusted amount"))
def update_reference_in_journal_entry(d, jv_obj):
"""
Updates against document, if partial amount splits into rows
"""
jv_detail = jv_obj.get("accounts", {"name": d["voucher_detail_no"]})[0]
jv_detail.set(d["dr_or_cr"], d["allocated_amount"])
jv_detail.set('debit' if d['dr_or_cr']=='debit_in_account_currency' else 'credit',
d["allocated_amount"]*flt(jv_detail.exchange_rate))
original_reference_type = jv_detail.reference_type
original_reference_name = jv_detail.reference_name
jv_detail.set("reference_type", d["against_voucher_type"])
jv_detail.set("reference_name", d["against_voucher"])
if d['allocated_amount'] < d['unadjusted_amount']:
jvd = frappe.db.sql("""
select cost_center, balance, against_account, is_advance,
account_type, exchange_rate, account_currency
from `tabJournal Entry Account` where name = %s
""", d['voucher_detail_no'], as_dict=True)
amount_in_account_currency = flt(d['unadjusted_amount']) - flt(d['allocated_amount'])
amount_in_company_currency = amount_in_account_currency * flt(jvd[0]['exchange_rate'])
# new entry with balance amount
ch = jv_obj.append("accounts")
ch.account = d['account']
ch.account_type = jvd[0]['account_type']
ch.account_currency = jvd[0]['account_currency']
ch.exchange_rate = jvd[0]['exchange_rate']
ch.party_type = d["party_type"]
ch.party = d["party"]
ch.cost_center = cstr(jvd[0]["cost_center"])
ch.balance = flt(jvd[0]["balance"])
ch.set(d['dr_or_cr'], amount_in_account_currency)
ch.set('debit' if d['dr_or_cr']=='debit_in_account_currency' else 'credit', amount_in_company_currency)
ch.set('credit_in_account_currency' if d['dr_or_cr']== 'debit_in_account_currency'
else 'debit_in_account_currency', 0)
ch.set('credit' if d['dr_or_cr']== 'debit_in_account_currency' else 'debit', 0)
ch.against_account = cstr(jvd[0]["against_account"])
ch.reference_type = original_reference_type
ch.reference_name = original_reference_name
ch.is_advance = cstr(jvd[0]["is_advance"])
ch.docstatus = 1
# will work as update after submit
jv_obj.flags.ignore_validate_update_after_submit = True
jv_obj.save(ignore_permissions=True)
def update_reference_in_payment_entry(d, payment_entry):
reference_details = {
"reference_doctype": d.against_voucher_type,
"reference_name": d.against_voucher,
"total_amount": d.grand_total,
"outstanding_amount": d.outstanding_amount,
"allocated_amount": d.allocated_amount,
"exchange_rate": d.exchange_rate
}
if d.voucher_detail_no:
existing_row = payment_entry.get("references", {"name": d["voucher_detail_no"]})[0]
original_row = existing_row.as_dict().copy()
existing_row.update(reference_details)
if d.allocated_amount < original_row.allocated_amount:
new_row = payment_entry.append("references")
new_row.docstatus = 1
for field in reference_details.keys():
new_row.set(field, original_row[field])
new_row.allocated_amount = original_row.allocated_amount - d.allocated_amount
else:
new_row = payment_entry.append("references")
new_row.docstatus = 1
new_row.update(reference_details)
payment_entry.flags.ignore_validate_update_after_submit = True
payment_entry.setup_party_account_field()
payment_entry.set_missing_values()
payment_entry.set_amounts()
payment_entry.save(ignore_permissions=True)
def unlink_ref_doc_from_payment_entries(ref_type, ref_no):
remove_ref_doc_link_from_jv(ref_type, ref_no)
remove_ref_doc_link_from_pe(ref_type, ref_no)
frappe.db.sql("""update `tabGL Entry`
set against_voucher_type=null, against_voucher=null,
modified=%s, modified_by=%s
where against_voucher_type=%s and against_voucher=%s
and voucher_no != ifnull(against_voucher, '')""",
(now(), frappe.session.user, ref_type, ref_no))
def remove_ref_doc_link_from_jv(ref_type, ref_no):
linked_jv = frappe.db.sql_list("""select parent from `tabJournal Entry Account`
where reference_type=%s and reference_name=%s and docstatus < 2""", (ref_type, ref_no))
if linked_jv:
frappe.db.sql("""update `tabJournal Entry Account`
set reference_type=null, reference_name = null,
modified=%s, modified_by=%s
where reference_type=%s and reference_name=%s
and docstatus < 2""", (now(), frappe.session.user, ref_type, ref_no))
frappe.msgprint(_("Journal Entries {0} are un-linked".format("\n".join(linked_jv))))
def remove_ref_doc_link_from_pe(ref_type, ref_no):
linked_pe = frappe.db.sql_list("""select parent from `tabPayment Entry Reference`
where reference_doctype=%s and reference_name=%s and docstatus < 2""", (ref_type, ref_no))
if linked_pe:
frappe.db.sql("""update `tabPayment Entry Reference`
set allocated_amount=0, modified=%s, modified_by=%s
where reference_doctype=%s and reference_name=%s
and docstatus < 2""", (now(), frappe.session.user, ref_type, ref_no))
for pe in linked_pe:
pe_doc = frappe.get_doc("Payment Entry", pe)
pe_doc.set_total_allocated_amount()
pe_doc.set_unallocated_amount()
pe_doc.clear_unallocated_reference_document_rows()
frappe.db.sql("""update `tabPayment Entry` set total_allocated_amount=%s,
base_total_allocated_amount=%s, unallocated_amount=%s, modified=%s, modified_by=%s
where name=%s""", (pe_doc.total_allocated_amount, pe_doc.base_total_allocated_amount,
pe_doc.unallocated_amount, now(), frappe.session.user, pe))
frappe.msgprint(_("Payment Entries {0} are un-linked".format("\n".join(linked_pe))))
@frappe.whitelist()
def get_company_default(company, fieldname):
value = frappe.db.get_value("Company", company, fieldname)
if not value:
throw(_("Please set default {0} in Company {1}").format(frappe.get_meta("Company").get_label(fieldname), company))
return value
def fix_total_debit_credit():
vouchers = frappe.db.sql("""select voucher_type, voucher_no,
sum(debit) - sum(credit) as diff
from `tabGL Entry`
group by voucher_type, voucher_no
having sum(debit) != sum(credit)""", as_dict=1)
for d in vouchers:
if abs(d.diff) > 0:
dr_or_cr = d.voucher_type == "Sales Invoice" and "credit" or "debit"
frappe.db.sql("""update `tabGL Entry` set %s = %s + %s
where voucher_type = %s and voucher_no = %s and %s > 0 limit 1""" %
(dr_or_cr, dr_or_cr, '%s', '%s', '%s', dr_or_cr),
(d.diff, d.voucher_type, d.voucher_no))
def get_stock_and_account_difference(account_list=None, posting_date=None):
from erpnext.stock.utils import get_stock_value_on
if not posting_date: posting_date = nowdate()
difference = {}
account_warehouse = dict(frappe.db.sql("""select name, warehouse from tabAccount
where account_type = 'Stock' and (warehouse is not null and warehouse != '') and is_group=0
and name in (%s)""" % ', '.join(['%s']*len(account_list)), account_list))
for account, warehouse in account_warehouse.items():
account_balance = get_balance_on(account, posting_date, in_account_currency=False)
stock_value = get_stock_value_on(warehouse, posting_date)
if abs(flt(stock_value) - flt(account_balance)) > 0.005:
difference.setdefault(account, flt(stock_value) - flt(account_balance))
return difference
def get_currency_precision(currency=None):
if not currency:
currency = frappe.db.get_value("Company",
frappe.db.get_default("Company"), "default_currency", cache=True)
currency_format = frappe.db.get_value("Currency", currency, "number_format", cache=True)
from frappe.utils import get_number_format_info
return get_number_format_info(currency_format)[2]
def get_stock_rbnb_difference(posting_date, company):
stock_items = frappe.db.sql_list("""select distinct item_code
from `tabStock Ledger Entry` where company=%s""", company)
pr_valuation_amount = frappe.db.sql("""
select sum(pr_item.valuation_rate * pr_item.qty * pr_item.conversion_factor)
from `tabPurchase Receipt Item` pr_item, `tabPurchase Receipt` pr
where pr.name = pr_item.parent and pr.docstatus=1 and pr.company=%s
and pr.posting_date <= %s and pr_item.item_code in (%s)""" %
('%s', '%s', ', '.join(['%s']*len(stock_items))), tuple([company, posting_date] + stock_items))[0][0]
pi_valuation_amount = frappe.db.sql("""
select sum(pi_item.valuation_rate * pi_item.qty * pi_item.conversion_factor)
from `tabPurchase Invoice Item` pi_item, `tabPurchase Invoice` pi
where pi.name = pi_item.parent and pi.docstatus=1 and pi.company=%s
and pi.posting_date <= %s and pi_item.item_code in (%s)""" %
('%s', '%s', ', '.join(['%s']*len(stock_items))), tuple([company, posting_date] + stock_items))[0][0]
# Balance should be
stock_rbnb = flt(pr_valuation_amount, 2) - flt(pi_valuation_amount, 2)
# Balance as per system
stock_rbnb_account = "Stock Received But Not Billed - " + frappe.db.get_value("Company", company, "abbr")
sys_bal = get_balance_on(stock_rbnb_account, posting_date, in_account_currency=False)
# Amount should be credited
return flt(stock_rbnb) + flt(sys_bal)
def get_outstanding_invoices(party_type, party, account, condition=None):
outstanding_invoices = []
precision = frappe.get_precision("Sales Invoice", "outstanding_amount")
if party_type=="Customer":
dr_or_cr = "debit_in_account_currency - credit_in_account_currency"
payment_dr_or_cr = "payment_gl_entry.credit_in_account_currency - payment_gl_entry.debit_in_account_currency"
else:
dr_or_cr = "credit_in_account_currency - debit_in_account_currency"
payment_dr_or_cr = "payment_gl_entry.debit_in_account_currency - payment_gl_entry.credit_in_account_currency"
invoice_list = frappe.db.sql("""
select
voucher_no, voucher_type, posting_date, ifnull(sum({dr_or_cr}), 0) as invoice_amount,
(
select ifnull(sum({payment_dr_or_cr}), 0)
from `tabGL Entry` payment_gl_entry
where payment_gl_entry.against_voucher_type = invoice_gl_entry.voucher_type
and payment_gl_entry.against_voucher = invoice_gl_entry.voucher_no
and payment_gl_entry.party_type = invoice_gl_entry.party_type
and payment_gl_entry.party = invoice_gl_entry.party
and payment_gl_entry.account = invoice_gl_entry.account
and {payment_dr_or_cr} > 0
) as payment_amount
from
`tabGL Entry` invoice_gl_entry
where
party_type = %(party_type)s and party = %(party)s
and account = %(account)s and {dr_or_cr} > 0
{condition}
and ((voucher_type = 'Journal Entry'
and (against_voucher = '' or against_voucher is null))
or (voucher_type not in ('Journal Entry', 'Payment Entry')))
group by voucher_type, voucher_no
having (invoice_amount - payment_amount) > 0.005
order by posting_date, name""".format(
dr_or_cr = dr_or_cr,
payment_dr_or_cr = payment_dr_or_cr,
condition = condition or ""
), {
"party_type": party_type,
"party": party,
"account": account,
}, as_dict=True)
for d in invoice_list:
outstanding_invoices.append(frappe._dict({
'voucher_no': d.voucher_no,
'voucher_type': d.voucher_type,
'posting_date': d.posting_date,
'invoice_amount': flt(d.invoice_amount),
'payment_amount': flt(d.payment_amount),
'outstanding_amount': flt(d.invoice_amount - d.payment_amount, precision),
'due_date': frappe.db.get_value(d.voucher_type, d.voucher_no, "due_date"),
}))
outstanding_invoices = sorted(outstanding_invoices, key=lambda k: k['due_date'] or getdate(nowdate()))
return outstanding_invoices
def get_account_name(account_type=None, root_type=None, is_group=None, account_currency=None, company=None):
"""return account based on matching conditions"""
return frappe.db.get_value("Account", {
"account_type": account_type or '',
"root_type": root_type or '',
"is_group": is_group or 0,
"account_currency": account_currency or frappe.defaults.get_defaults().currency,
"company": company or frappe.defaults.get_defaults().company
}, "name")
@frappe.whitelist()
def get_companies():
"""get a list of companies based on permission"""
return [d.name for d in frappe.get_list("Company", fields=["name"],
order_by="name")]
@frappe.whitelist()
def get_children():
args = frappe.local.form_dict
doctype, company = args['doctype'], args['company']
fieldname = frappe.db.escape(doctype.lower().replace(' ','_'))
doctype = frappe.db.escape(doctype)
# root
if args['parent'] in ("Accounts", "Cost Centers"):
fields = ", root_type, report_type, account_currency" if doctype=="Account" else ""
acc = frappe.db.sql(""" select
name as value, is_group as expandable {fields}
from `tab{doctype}`
where ifnull(`parent_{fieldname}`,'') = ''
and `company` = %s and docstatus<2
order by name""".format(fields=fields, fieldname = fieldname, doctype=doctype),
company, as_dict=1)
if args["parent"]=="Accounts":
sort_root_accounts(acc)
else:
# other
fields = ", account_currency" if doctype=="Account" else ""
acc = frappe.db.sql("""select
name as value, is_group as expandable, parent_{fieldname} as parent {fields}
from `tab{doctype}`
where ifnull(`parent_{fieldname}`,'') = %s
and docstatus<2
order by name""".format(fields=fields, fieldname=fieldname, doctype=doctype),
args['parent'], as_dict=1)
if doctype == 'Account':
company_currency = frappe.db.get_value("Company", company, "default_currency")
for each in acc:
each["company_currency"] = company_currency
each["balance"] = flt(get_balance_on(each.get("value"), in_account_currency=False))
if each.account_currency != company_currency:
each["balance_in_account_currency"] = flt(get_balance_on(each.get("value")))
return acc
def create_payment_gateway_and_account(gateway):
create_payment_gateway(gateway)
create_payment_gateway_account(gateway)
def create_payment_gateway(gateway):
# NOTE: we don't translate Payment Gateway name because it is an internal doctype
if not frappe.db.exists("Payment Gateway", gateway):
payment_gateway = frappe.get_doc({
"doctype": "Payment Gateway",
"gateway": gateway
})
payment_gateway.insert(ignore_permissions=True)
def create_payment_gateway_account(gateway):
from erpnext.setup.setup_wizard.setup_wizard import create_bank_account
company = frappe.db.get_value("Global Defaults", None, "default_company")
if not company:
return
# NOTE: we translate Payment Gateway account name because that is going to be used by the end user
bank_account = frappe.db.get_value("Account", {"account_name": _(gateway), "company": company},
["name", 'account_currency'], as_dict=1)
if not bank_account:
# check for untranslated one
bank_account = frappe.db.get_value("Account", {"account_name": gateway, "company": company},
["name", 'account_currency'], as_dict=1)
if not bank_account:
# try creating one
bank_account = create_bank_account({"company_name": company, "bank_account": _(gateway)})
if not bank_account:
frappe.msgprint(_("Payment Gateway Account not created, please create one manually."))
return
# if payment gateway account exists, return
if frappe.db.exists("Payment Gateway Account",
{"payment_gateway": gateway, "currency": bank_account.account_currency}):
return
try:
frappe.get_doc({
"doctype": "Payment Gateway Account",
"is_default": 1,
"payment_gateway": gateway,
"payment_account": bank_account.name,
"currency": bank_account.account_currency
}).insert(ignore_permissions=True)
except frappe.DuplicateEntryError:
# already exists, due to a reinstall?
pass | shreyasp/erpnext | erpnext/accounts/utils.py | Python | gpl-3.0 | 26,822 |
# -*- coding: utf-8 -*-
"""
Photo Catalog v 1.0 (photocat)
Copyright (c) Karol Będkowski, 2004-2007
This file is part of Photo Catalog
"""
__author__ = 'Karol Będkowski'
__copyright__ = 'Copyright (C) Karol Będkowski 2006'
__revision__ = '$Id$'
class TreeItem(object):
def __init__(self):
self.tree_node = None
def __del__(self):
del self.tree_node
@property
def caption(self):
return ''
@property
def childs(self):
return None
# vim: encoding=utf8: ff=unix:
| KarolBedkowski/photocat | photocat/model/gui.py | Python | gpl-2.0 | 488 |
# -*- coding: utf-8 -*-
import os, time, pygame
from pygame.locals import *
class ResManager:
def __init__(self,
data_dir = 'data',
image_dir= 'image',
sound_dir= 'sound',
music_dir= 'music'):
self.data_dir = data_dir
self.image_dir = image_dir
self.sound_dir = sound_dir
self.music_dir = music_dir
def get_image(self,name):
fullname = os.path.join(self.data_dir, os.path.join(self.image_dir,name))
try:
image = pygame.image.load(fullname).convert()
except pygame.error, message:
print('Cannot load image; {0}'.format(name))
raise SystemExit, message
else:
image = image.convert_alpha()
return image
| azon1272/War-for-cookies-v2 | Base view/ResManager.py | Python | bsd-3-clause | 802 |
'''
.. module:: retrieve
High level API to retrieve cleaned files
.. moduleauthor:: Christopher Phillippi <c_phillippi@mfe.berkeley.edu>
'''
from itertools import chain
from os import listdir
import cleaner.settings as settings
import cleaner.schema as schema
import csv
import datetime
import os.path
def _getPath( store, filename ):
path = os.path.abspath( store )
return os.path.join( path, filename )
def adjustedClosesFilepath( empiricalStore = settings.EMPIRICAL_STORE,
filename = settings.ADJUSTED_CLOSE_FILENAME ):
return _getPath( empiricalStore, filename )
def benchmarkFilepath():
return adjustedClosesFilepath( empiricalStore = settings.EMPIRICAL_STORE,
filename = 'benchmarks.csv' )
def getArticles( fileList ):
def getArticle( f ):
with open( f, 'r' ) as opened:
article = opened.read()
return article
return ( getArticle( f ) for f in fileList )
def getCleanArticles( cleanStore = settings.CLEAN_STORE ):
"""Returns iterable of all cleaned articles
:param cleanStore: Absolute path to clean store
"""
return getArticles( getCleanFileList( cleanStore ) )
def getDailyArticles( date, cleanStore = settings.CLEAN_STORE ):
return getArticles( getDailyFileList( date, cleanStore ) )
def getDailyFileList( date, store = settings.CLEAN_STORE, mergeWeekendsWithMonday = False ):
fileList = getFilteredFileList( store = store,
includes = { schema.YEAR : [ str( date.year ) ],
schema.DAY : [ str( date.day ) ],
schema.MONTH : [ date.strftime( '%B' ) ],
} )
if ( mergeWeekendsWithMonday and date.weekday == 0 ):
return chain( fileList,
getDailyFileList( date - datetime.timedelta( days = 1 ),
store,
mergeWeekendsWithMonday ) )
else:
return fileList
def getFilteredFileList( includes = None,
excludes = None,
store = settings.CLEAN_STORE ):
"""Returns a list of files, based on includes and excludes filters
:param includes: Dictionary of values to be included by schema category
:type includes: :py:class:`dict`
:param excludes: Dictionary of values to be excluded by schema category
:type excludes: :py:class:`dict`
Example Usage: (Assume you want all February articles in 2011 that are not in the NYT)
>>> import cleaner.retrieve as retrieve
>>> retrieve.getFilteredFileList( includes = { \'month\' : [ \'February\' ], \'year\' : [ \'2011\' ] }, excludes = { \'paper\' = [ \'New York Times\' ] } )
~\\AFPCorpus\\LexisNexis\\2011\\February\\1\\New York Times\\40_UNDER_FORTY_Financial_advi.txt
~\\AFPCorpus\\LexisNexis\\2011\\February\\1\\New York Times\\Walker_in_three_way_battle_for.txt
~\\AFPCorpus\\LexisNexis\\2011\\February\\2\\New York Times\\Clinton_to_Grace_This_Day_Awar.txt
...
"""
def getFileListAtDepth( root, depth ):
try:
storeTag = schema.STORE_ORDER[ depth ]
except IndexError:
storeTag = "other"
for f in os.listdir( root ):
path = os.path.join( root, f )
if os.path.isfile( path ):
yield path
else:
try:
if f not in includes[ storeTag ]:
continue
except KeyError: # No specification for folder, assume included
pass
except TypeError: # includes is None
pass
try:
if f in excludes[ storeTag ]:
continue
except KeyError: # No specification, assume included
pass
except TypeError: # excludes is None
pass
for subFile in getFileListAtDepth( path, depth + 1 ):
yield subFile
return getFileListAtDepth( store , 0 )
def getCleanFileList( cleanStore = settings.CLEAN_STORE ):
"""Returns interable of all cleaned files
:param cleanStore: Absolute path to clean store
"""
directory = ( os.path.join( cleanStore, f ) for f in listdir( cleanStore ) )
for f in directory:
if os.path.isfile( f ):
yield f
else:
for subFile in getCleanFileList( f ):
yield subFile
def getEmpiricalTable( tickerList,
fromDate,
toDate,
csvFile = adjustedClosesFilepath() ):
"""Returns a table in structure of structure format ( Ticker By Date )
:param tickerList: A list of the tickers to be added into the table
:param fromDate: Time from which to begin the table
:type fromDate: :py:class:`datetime.date`
:param toDate: TIme from which to end the table
:type toDate: :py:class:`datetime.date`
:param empiricalStore: The location of the Empirical file store
:param filename: The name of the file within the Empirical file store
"""
begin = str( fromDate )
end = str( toDate )
with open( csvFile, 'r' ) as csvFile:
csvReader = csv.DictReader( csvFile )
empiricalTable = dict( ( row[ 'Date' ],
dict( ( ( ticker, row[ ticker ] )
for ticker in tickerList ) ) )
for row in csvReader
if row[ 'Date' ] >= begin and row[ 'Date' ] <= end )
return empiricalTable
if __name__ == "__main__":
files = getDailyFileList( datetime.date( 2012, 1, 30 ),
mergeWeekendsWithMonday = True )
for f in files:
print f
| ccphillippi/AFP | afp/cleaner/retrieve.py | Python | mit | 6,187 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Daniel Carvalho <idnael@pegada.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import Gtk, Gdk,GObject, WebKit
import io, urllib, subprocess, os, time, re
from gettext import gettext as _
import etc, about
class WebkitWindow(Gtk.Window):
# After this time, I will try to download again a favicon from a site
FAVICON_EXPIRE_DAYS = 14
def __init__(self, actions):
Gtk.Window.__init__(self)
self.actions = actions
# uses the same icon as the floating window
self.set_icon_from_file(os.path.join(etc.DATA_DIR,"app_icon.png"))
self.webview = WebKit.WebView()
self.webview.connect("key-press-event",self.webpage_key_press)
self.webview.connect_after("populate-popup", self.webpage_populate_popup)
self.connect("delete-event", self.webpage_window_delete)
sw = Gtk.ScrolledWindow()
sw.add(self.webview)
self.add(sw)
def webpage_window_delete(self, win, event):
# just hide the window:
self.hide()
# this will prevent the default action, which is to destroy the window
return True
def webpage_key_press(self,widget, event, data=None):
if event.hardware_keycode==9:
# ESCAPE key closes the window!
self.hide()
# Adds my menu items to the webkit standard context menu!
def webpage_populate_popup(self,view, menu):
# Based on
# http://pywebkitgtk.googlecode.com/svn-history/r159/trunk/demos/browser.py
menu.append(Gtk.SeparatorMenuItem())
menuitem = Gtk.MenuItem(_("Open in default browser"))
menuitem.connect('activate', self.on_webpage_open_default_browser)
menu.append(menuitem)
menuitem = Gtk.MenuItem(_("Edit CtxSearch actions"))
menuitem.connect('activate', self.on_edit_actions)
menu.append(menuitem)
menuitem = Gtk.MenuItem(_("About CtxSearch"))
menuitem.connect('activate', self.on_about)
menu.append(menuitem)
menuitem = Gtk.MenuItem(_("Manual"))
menuitem.connect('activate', self.on_help)
menu.append(menuitem)
# Add the itens from the actions menu!
actions_menu = self.actions.actions_menu(self.ctx)
menuitem = Gtk.MenuItem(_("Other actions"))
menuitem.set_submenu(actions_menu)
menu.append(menuitem)
menu.show_all()
return False
def on_webpage_open_default_browser(self, menuitem):
url = self.webview.get_property("uri")
# Opens the standard browser!
# TODO: devia fazer aqui o mesmo que faço no actions...
command = etc.property("browser", "xdg-open")
etc.open_url(command, url)
self.hide()
def on_edit_actions(self, menuitem):
subprocess.Popen(["xdg-open", etc.CONFIG_FILE])
def on_about(self, menuitem):
about.About()
def on_help(self, menuitem):
# TODO: podia ter uma versao html da ajuda!
import urlparse, urllib
url = urlparse.urljoin('file:', urllib.pathname2url(os.path.abspath(etc.HELP_FILE)))
self.webview.open(url)
| idnael/ctxsearch | ctxsearch/old/old_web.py | Python | gpl-2.0 | 3,446 |
from piliko import *
print
print 'example 5'
v1,v2 = vector(3,0),vector(0,4)
print 'vectors v1, v2:', v1, v2
print ' v1 + v2, v1 - v2: ', v1 + v2, v1 - v2
print ' v1 * 5/4:', v1 * Fraction(5,4)
print ' v1 perpendicular v1? ', v1.perpendicular( v1 )
print ' v1 perpendicular v2? ', v1.perpendicular( v2 )
print ' v2 perpendicular v1? ', perpendicular( v2, v1 )
print ' v1 perpendicular v1+v2? ', perpendicular( v1, v1+v2 )
print ' v1 parallel v1? ', v1.parallel( v1 )
print ' v1 parallel v2? ', v1.parallel( v2 )
print ' v1 parallel 5*v1? ', parallel( v1, 5*v1 )
print ' v1 parallel v1+v2? ', parallel( v1, v1+v2 )
v3 = v2 - v1
print 'vector v3 = v2-v1: ', v3
lhs = quadrance( v1 ) + quadrance( v2 )
rhs = quadrance( v3 )
print 'v1 dot v2, v2 dot v3, v1 dot 5*v1:', v1.dot(v2), v2.dot(v3), v1.dot(5*v1)
print 'v1 dot (v2+v3), (v1 dot v2)+(v1 dot v3):', v1.dot(v2+v3), v1.dot(v2) + v1.dot(v3)
print ' pythagoras: Q(v1)+Q(v2)=Q(v3)?: lhs:', lhs, 'rhs:',rhs
v4 = vector( -5, 0 )
v5 = 3 * v4
v6 = v5 - v4
print 'vector v4, v5, and v6=v5-v4:', v4, v5, v6
lhs = sqr( quadrance( v4 ) + quadrance( v5 ) + quadrance( v6 ) )
rhs = 2*(sqr(quadrance(v4))+sqr(quadrance(v5))+sqr(quadrance(v6)))
print ' triplequad for v4,v5,v6 : lhs:', lhs, 'rhs:',rhs
print 'spread( v1, v1 ):', spread( v1, v1 )
print 'spread( v2, v1 ):', spread( v2, v1 )
print 'spread( v2, 5*v1 ):', spread( v2, 5*v1 )
print 'spread( v1, v2 ):', spread( v1, v2 )
print 'spread( v1, v3 ):', spread( v1, v3 )
print 'spread( v1, 5*v3 ):', spread( v1, 5*v3 )
print 'spread( v2, v3 ):', spread( v2, v3 )
print 'spread( 100*v2, -20*v2 ):', spread( 100*v2, -20*v2 )
print 'quadrance v1 == v1 dot v1?', quadrance(v1), '=?=', v1.dot(v1)
| donbright/piliko | examples/example05.py | Python | bsd-3-clause | 1,688 |
from __future__ import absolute_import
from celery import Celery
app = Celery('align',
broker='amqp://idigphylo:idppass9@elk.acis.ufl.edu//',
include=['align.align'])
# Optional configuration, see the application user guide.
app.conf.update(
CELERY_TASK_RESULT_EXPIRES=3600,
CELERY_ROUTES = {"align.align.pipeline": {"queue": "align"}}
)
if __name__ == '__main__':
app.start()
| mjcollin/idigphylo | workers/align/celery.py | Python | mit | 419 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import abc
import contextlib
import datetime
import functools
import hashlib
import inspect
import logging as py_logging
import os
import pyclbr
import random
import re
import shutil
import socket
import stat
import sys
import tempfile
import time
import types
from xml.dom import minidom
from xml.parsers import expat
from xml import sax
from xml.sax import expatreader
from xml.sax import saxutils
from os_brick.initiator import connector
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
import retrying
import six
from cinder import exception
from cinder.i18n import _, _LE, _LW
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
VALID_TRACE_FLAGS = {'method', 'api'}
TRACE_METHOD = False
TRACE_API = False
synchronized = lockutils.synchronized_with_prefix('cinder-')
def find_config(config_path):
"""Find a configuration file using the given hint.
:param config_path: Full or relative path to the config.
:returns: Full path of the config, if it exists.
:raises: `cinder.exception.ConfigNotFound`
"""
possible_locations = [
config_path,
os.path.join(CONF.state_path, "etc", "cinder", config_path),
os.path.join(CONF.state_path, "etc", config_path),
os.path.join(CONF.state_path, config_path),
"/etc/cinder/%s" % config_path,
]
for path in possible_locations:
if os.path.exists(path):
return os.path.abspath(path)
raise exception.ConfigNotFound(path=os.path.abspath(config_path))
def as_int(obj, quiet=True):
# Try "2" -> 2
try:
return int(obj)
except (ValueError, TypeError):
pass
# Try "2.5" -> 2
try:
return int(float(obj))
except (ValueError, TypeError):
pass
# Eck, not sure what this is then.
if not quiet:
raise TypeError(_("Can not translate %s to integer.") % (obj))
return obj
def is_int_like(val):
"""Check if a value looks like an int."""
try:
return str(int(val)) == str(val)
except Exception:
return False
def check_exclusive_options(**kwargs):
"""Checks that only one of the provided options is actually not-none.
Iterates over all the kwargs passed in and checks that only one of said
arguments is not-none, if more than one is not-none then an exception will
be raised with the names of those arguments who were not-none.
"""
if not kwargs:
return
pretty_keys = kwargs.pop("pretty_keys", True)
exclusive_options = {}
for (k, v) in kwargs.items():
if v is not None:
exclusive_options[k] = True
if len(exclusive_options) > 1:
# Change the format of the names from pythonic to
# something that is more readable.
#
# Ex: 'the_key' -> 'the key'
if pretty_keys:
names = [k.replace('_', ' ') for k in kwargs.keys()]
else:
names = kwargs.keys()
names = ", ".join(sorted(names))
msg = (_("May specify only one of %s") % (names))
raise exception.InvalidInput(reason=msg)
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() method."""
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = get_root_helper()
return processutils.execute(*cmd, **kwargs)
def check_ssh_injection(cmd_list):
ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>',
'<']
# Check whether injection attacks exist
for arg in cmd_list:
arg = arg.strip()
# Check for matching quotes on the ends
is_quoted = re.match('^(?P<quote>[\'"])(?P<quoted>.*)(?P=quote)$', arg)
if is_quoted:
# Check for unescaped quotes within the quoted argument
quoted = is_quoted.group('quoted')
if quoted:
if (re.match('[\'"]', quoted) or
re.search('[^\\\\][\'"]', quoted)):
raise exception.SSHInjectionThreat(command=cmd_list)
else:
# We only allow spaces within quoted arguments, and that
# is the only special character allowed within quotes
if len(arg.split()) > 1:
raise exception.SSHInjectionThreat(command=cmd_list)
# Second, check whether danger character in command. So the shell
# special operator must be a single argument.
for c in ssh_injection_pattern:
if c not in arg:
continue
result = arg.find(c)
if not result == -1:
if result == 0 or not arg[result - 1] == '\\':
raise exception.SSHInjectionThreat(command=cmd_list)
def create_channel(client, width, height):
"""Invoke an interactive shell session on server."""
channel = client.invoke_shell()
channel.resize_pty(width, height)
return channel
def cinderdir():
import cinder
return os.path.abspath(cinder.__file__).split('cinder/__init__.py')[0]
def last_completed_audit_period(unit=None):
"""This method gives you the most recently *completed* audit period.
arguments:
units: string, one of 'hour', 'day', 'month', 'year'
Periods normally begin at the beginning (UTC) of the
period unit (So a 'day' period begins at midnight UTC,
a 'month' unit on the 1st, a 'year' on Jan, 1)
unit string may be appended with an optional offset
like so: 'day@18' This will begin the period at 18:00
UTC. 'month@15' starts a monthly period on the 15th,
and year@3 begins a yearly one on March 1st.
returns: 2 tuple of datetimes (begin, end)
The begin timestamp of this audit period is the same as the
end of the previous.
"""
if not unit:
unit = CONF.volume_usage_audit_period
offset = 0
if '@' in unit:
unit, offset = unit.split("@", 1)
offset = int(offset)
rightnow = timeutils.utcnow()
if unit not in ('month', 'day', 'year', 'hour'):
raise ValueError('Time period must be hour, day, month or year')
if unit == 'month':
if offset == 0:
offset = 1
end = datetime.datetime(day=offset,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
year = rightnow.year
if 1 >= rightnow.month:
year -= 1
month = 12 + (rightnow.month - 1)
else:
month = rightnow.month - 1
end = datetime.datetime(day=offset,
month=month,
year=year)
year = end.year
if 1 >= end.month:
year -= 1
month = 12 + (end.month - 1)
else:
month = end.month - 1
begin = datetime.datetime(day=offset, month=month, year=year)
elif unit == 'year':
if offset == 0:
offset = 1
end = datetime.datetime(day=1, month=offset, year=rightnow.year)
if end >= rightnow:
end = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 2)
else:
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
elif unit == 'day':
end = datetime.datetime(hour=offset,
day=rightnow.day,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
end = end - datetime.timedelta(days=1)
begin = end - datetime.timedelta(days=1)
elif unit == 'hour':
end = rightnow.replace(minute=offset, second=0, microsecond=0)
if end >= rightnow:
end = end - datetime.timedelta(hours=1)
begin = end - datetime.timedelta(hours=1)
return (begin, end)
def list_of_dicts_to_dict(seq, key):
"""Convert list of dicts to a indexted dict.
Takes a list of dicts, and converts it a nested dict
indexed by <key>
:param seq: list of dicts
:parm key: key in dicts to index by
example:
lst = [{'id': 1, ...}, {'id': 2, ...}...]
key = 'id'
returns {1:{'id': 1, ...}, 2:{'id':2, ...}
"""
return {d[key]: dict(d, index=d[key]) for (i, d) in enumerate(seq)}
class ProtectedExpatParser(expatreader.ExpatParser):
"""An expat parser which disables DTD's and entities by default."""
def __init__(self, forbid_dtd=True, forbid_entities=True,
*args, **kwargs):
# Python 2.x old style class
expatreader.ExpatParser.__init__(self, *args, **kwargs)
self.forbid_dtd = forbid_dtd
self.forbid_entities = forbid_entities
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise ValueError("Inline DTD forbidden")
def entity_decl(self, entityName, is_parameter_entity, value, base,
systemId, publicId, notationName):
raise ValueError("<!ENTITY> forbidden")
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise ValueError("<!ENTITY> forbidden")
def reset(self):
expatreader.ExpatParser.reset(self)
if self.forbid_dtd:
self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
if self.forbid_entities:
self._parser.EntityDeclHandler = self.entity_decl
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
def safe_minidom_parse_string(xml_string):
"""Parse an XML string using minidom safely.
"""
try:
return minidom.parseString(xml_string, parser=ProtectedExpatParser())
except sax.SAXParseException:
raise expat.ExpatError()
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML."""
return saxutils.escape(value, {'"': '"', "'": '''})
def get_from_path(items, path):
"""Returns a list of items matching the specified path.
Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the
intermediate results are lists it will treat each list item individually.
A 'None' in items or any child expressions will be ignored, this function
will not throw because of None (anywhere) in items. The returned list
will contain no None values.
"""
if path is None:
raise exception.Error('Invalid mini_xpath')
(first_token, sep, remainder) = path.partition('/')
if first_token == '':
raise exception.Error('Invalid mini_xpath')
results = []
if items is None:
return results
if not isinstance(items, list):
# Wrap single objects in a list
items = [items]
for item in items:
if item is None:
continue
get_method = getattr(item, 'get', None)
if get_method is None:
continue
child = get_method(first_token)
if child is None:
continue
if isinstance(child, list):
# Flatten intermediate lists
for x in child:
results.append(x)
else:
results.append(child)
if not sep:
# No more tokens
return results
else:
return get_from_path(results, remainder)
def is_valid_boolstr(val):
"""Check if the provided string is a valid bool string or not."""
val = str(val).lower()
return (val == 'true' or val == 'false' or
val == 'yes' or val == 'no' or
val == 'y' or val == 'n' or
val == '1' or val == '0')
def is_none_string(val):
"""Check if a string represents a None value."""
if not isinstance(val, six.string_types):
return False
return val.lower() == 'none'
def monkey_patch():
"""Patches decorators for all functions in a specified module.
If the CONF.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using CONF.monkey_patch_modules.
The format is "Module path:Decorator function".
Example: 'cinder.api.ec2.cloud:' \
cinder.openstack.common.notifier.api.notify_decorator'
Parameters of the decorator is as follows.
(See cinder.openstack.common.notifier.api.notify_decorator)
:param name: name of the function
:param function: object of the function
"""
# If CONF.monkey_patch is not True, this function do nothing.
if not CONF.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in CONF.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
for method, func in inspect.getmembers(clz, inspect.ismethod):
setattr(
clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
decorator("%s.%s" % (module, key), func))
def make_dev_path(dev, partition=None, base='/dev'):
"""Return a path to a particular device.
>>> make_dev_path('xvdc')
/dev/xvdc
>>> make_dev_path('xvdc', 1)
/dev/xvdc1
"""
path = os.path.join(base, dev)
if partition:
path += str(partition)
return path
def sanitize_hostname(hostname):
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
if six.PY3:
hostname = hostname.encode('latin-1', 'ignore')
hostname = hostname.decode('latin-1')
else:
if isinstance(hostname, six.text_type):
hostname = hostname.encode('latin-1', 'ignore')
hostname = re.sub('[ _]', '-', hostname)
hostname = re.sub('[^\w.-]+', '', hostname)
hostname = hostname.lower()
hostname = hostname.strip('.-')
return hostname
def hash_file(file_like_object):
"""Generate a hash for the contents of a file."""
checksum = hashlib.sha1()
any(map(checksum.update, iter(lambda: file_like_object.read(32768), b'')))
return checksum.hexdigest()
def service_is_up(service):
"""Check whether a service is up based on last heartbeat."""
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
elapsed = (timeutils.utcnow(with_timezone=True) -
last_heartbeat).total_seconds()
return abs(elapsed) <= CONF.service_down_time
def read_file_as_root(file_path):
"""Secure helper to read file as root."""
try:
out, _err = execute('cat', file_path, run_as_root=True)
return out
except processutils.ProcessExecutionError:
raise exception.FileNotFound(file_path=file_path)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
"""Temporarily chown a path.
:params owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
tmpdir = tempfile.mkdtemp(**kwargs)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
LOG.debug('Could not remove tmpdir: %s',
six.text_type(e))
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first."""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
def get_root_helper():
return 'sudo cinder-rootwrap %s' % CONF.rootwrap_config
def brick_get_connector_properties(multipath=False, enforce_multipath=False):
"""Wrapper to automatically set root_helper in brick calls.
:param multipath: A boolean indicating whether the connector can
support multipath.
:param enforce_multipath: If True, it raises exception when multipath=True
is specified but multipathd is not running.
If False, it falls back to multipath=False
when multipathd is not running.
"""
root_helper = get_root_helper()
return connector.get_connector_properties(root_helper,
CONF.my_ip,
multipath,
enforce_multipath)
def brick_get_connector(protocol, driver=None,
execute=processutils.execute,
use_multipath=False,
device_scan_attempts=3,
*args, **kwargs):
"""Wrapper to get a brick connector object.
This automatically populates the required protocol as well
as the root_helper needed to execute commands.
"""
root_helper = get_root_helper()
return connector.InitiatorConnector.factory(protocol, root_helper,
driver=driver,
execute=execute,
use_multipath=use_multipath,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
def require_driver_initialized(driver):
"""Verifies if `driver` is initialized
If the driver is not initialized, an exception will be raised.
:params driver: The driver instance.
:raises: `exception.DriverNotInitialized`
"""
# we can't do anything if the driver didn't init
if not driver.initialized:
driver_name = driver.__class__.__name__
LOG.error(_LE("Volume driver %s not initialized"), driver_name)
raise exception.DriverNotInitialized()
def get_file_mode(path):
"""This primarily exists to make unit testing easier."""
return stat.S_IMODE(os.stat(path).st_mode)
def get_file_gid(path):
"""This primarily exists to make unit testing easier."""
return os.stat(path).st_gid
def get_file_size(path):
"""Returns the file size."""
return os.stat(path).st_size
def _get_disk_of_partition(devpath, st=None):
"""Gets a disk device path and status from partition path.
Returns a disk device path from a partition device path, and stat for
the device. If devpath is not a partition, devpath is returned as it is.
For example, '/dev/sda' is returned for '/dev/sda1', and '/dev/disk1' is
for '/dev/disk1p1' ('p' is prepended to the partition number if the disk
name ends with numbers).
"""
diskpath = re.sub('(?:(?<=\d)p)?\d+$', '', devpath)
if diskpath != devpath:
try:
st_disk = os.stat(diskpath)
if stat.S_ISBLK(st_disk.st_mode):
return (diskpath, st_disk)
except OSError:
pass
# devpath is not a partition
if st is None:
st = os.stat(devpath)
return (devpath, st)
def get_bool_param(param_string, params):
param = params.get(param_string, False)
if not is_valid_boolstr(param):
msg = _('Value %(param)s for %(param_string)s is not a '
'boolean.') % {'param': param, 'param_string': param_string}
raise exception.InvalidParameterValue(err=msg)
return strutils.bool_from_string(param, strict=True)
def get_blkdev_major_minor(path, lookup_for_file=True):
"""Get 'major:minor' number of block device.
Get the device's 'major:minor' number of a block device to control
I/O ratelimit of the specified path.
If lookup_for_file is True and the path is a regular file, lookup a disk
device which the file lies on and returns the result for the device.
"""
st = os.stat(path)
if stat.S_ISBLK(st.st_mode):
path, st = _get_disk_of_partition(path, st)
return '%d:%d' % (os.major(st.st_rdev), os.minor(st.st_rdev))
elif stat.S_ISCHR(st.st_mode):
# No I/O ratelimit control is provided for character devices
return None
elif lookup_for_file:
# lookup the mounted disk which the file lies on
out, _err = execute('df', path)
devpath = out.split("\n")[1].split()[0]
if devpath[0] is not '/':
# the file is on a network file system
return None
return get_blkdev_major_minor(devpath, False)
else:
msg = _("Unable to get a block device for file \'%s\'") % path
raise exception.Error(msg)
def check_string_length(value, name, min_length=0, max_length=None):
"""Check the length of specified string.
:param value: the value of the string
:param name: the name of the string
:param min_length: the min_length of the string
:param max_length: the max_length of the string
"""
if not isinstance(value, six.string_types):
msg = _("%s is not a string or unicode") % name
raise exception.InvalidInput(message=msg)
if len(value) < min_length:
msg = _("%(name)s has a minimum character requirement of "
"%(min_length)s.") % {'name': name, 'min_length': min_length}
raise exception.InvalidInput(message=msg)
if max_length and len(value) > max_length:
msg = _("%(name)s has more than %(max_length)s "
"characters.") % {'name': name, 'max_length': max_length}
raise exception.InvalidInput(message=msg)
_visible_admin_metadata_keys = ['readonly', 'attached_mode']
def add_visible_admin_metadata(volume):
"""Add user-visible admin metadata to regular metadata.
Extracts the admin metadata keys that are to be made visible to
non-administrators, and adds them to the regular metadata structure for the
passed-in volume.
"""
visible_admin_meta = {}
if volume.get('volume_admin_metadata'):
if isinstance(volume['volume_admin_metadata'], dict):
volume_admin_metadata = volume['volume_admin_metadata']
for key in volume_admin_metadata:
if key in _visible_admin_metadata_keys:
visible_admin_meta[key] = volume_admin_metadata[key]
else:
for item in volume['volume_admin_metadata']:
if item['key'] in _visible_admin_metadata_keys:
visible_admin_meta[item['key']] = item['value']
# avoid circular ref when volume is a Volume instance
elif (volume.get('admin_metadata') and
isinstance(volume.get('admin_metadata'), dict)):
for key in _visible_admin_metadata_keys:
if key in volume['admin_metadata'].keys():
visible_admin_meta[key] = volume['admin_metadata'][key]
if not visible_admin_meta:
return
# NOTE(zhiyan): update visible administration metadata to
# volume metadata, administration metadata will rewrite existing key.
if volume.get('volume_metadata'):
orig_meta = list(volume.get('volume_metadata'))
for item in orig_meta:
if item['key'] in visible_admin_meta.keys():
item['value'] = visible_admin_meta.pop(item['key'])
for key, value in visible_admin_meta.items():
orig_meta.append({'key': key, 'value': value})
volume['volume_metadata'] = orig_meta
# avoid circular ref when vol is a Volume instance
elif (volume.get('metadata') and
isinstance(volume.get('metadata'), dict)):
volume['metadata'].update(visible_admin_meta)
else:
volume['metadata'] = visible_admin_meta
def remove_invalid_filter_options(context, filters,
allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in filters
if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
LOG.debug("Removing options '%s' from query.", bad_options)
for opt in unknown_options:
del filters[opt]
def is_blk_device(dev):
try:
if stat.S_ISBLK(os.stat(dev).st_mode):
return True
return False
except Exception:
LOG.debug('Path %s not found in is_blk_device check', dev)
return False
def retry(exceptions, interval=1, retries=3, backoff_rate=2,
wait_random=False):
def _retry_on_exception(e):
return isinstance(e, exceptions)
def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms):
exp = backoff_rate ** previous_attempt_number
wait_for = interval * exp
if wait_random:
random.seed()
wait_val = random.randrange(interval * 1000.0, wait_for * 1000.0)
else:
wait_val = wait_for * 1000.0
LOG.debug("Sleeping for %s seconds", (wait_val / 1000.0))
return wait_val
def _print_stop(previous_attempt_number, delay_since_first_attempt_ms):
delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0
LOG.debug("Failed attempt %s", previous_attempt_number)
LOG.debug("Have been at this for %s seconds",
delay_since_first_attempt)
return previous_attempt_number == retries
if retries < 1:
raise ValueError('Retries must be greater than or '
'equal to 1 (received: %s). ' % retries)
def _decorator(f):
@six.wraps(f)
def _wrapper(*args, **kwargs):
r = retrying.Retrying(retry_on_exception=_retry_on_exception,
wait_func=_backoff_sleep,
stop_func=_print_stop)
return r.call(f, *args, **kwargs)
return _wrapper
return _decorator
def convert_version_to_int(version):
try:
if isinstance(version, six.string_types):
version = convert_version_to_tuple(version)
if isinstance(version, tuple):
return six.moves.reduce(lambda x, y: (x * 1000) + y, version)
except Exception:
msg = _("Version %s is invalid.") % version
raise exception.CinderException(msg)
def convert_version_to_str(version_int):
version_numbers = []
factor = 1000
while version_int != 0:
version_number = version_int - (version_int // factor * factor)
version_numbers.insert(0, six.text_type(version_number))
version_int = version_int // factor
return '.'.join(map(str, version_numbers))
def convert_version_to_tuple(version_str):
return tuple(int(part) for part in version_str.split('.'))
def convert_str(text):
"""Convert to native string.
Convert bytes and Unicode strings to native strings:
* convert to bytes on Python 2:
encode Unicode using encodeutils.safe_encode()
* convert to Unicode on Python 3: decode bytes from UTF-8
"""
if six.PY2:
return encodeutils.safe_encode(text)
else:
if isinstance(text, bytes):
return text.decode('utf-8')
else:
return text
def trace_method(f):
"""Decorates a function if TRACE_METHOD is true."""
@functools.wraps(f)
def trace_method_logging_wrapper(*args, **kwargs):
if TRACE_METHOD:
return trace(f)(*args, **kwargs)
return f(*args, **kwargs)
return trace_method_logging_wrapper
def trace_api(f):
"""Decorates a function if TRACE_API is true."""
@functools.wraps(f)
def trace_api_logging_wrapper(*args, **kwargs):
if TRACE_API:
return trace(f)(*args, **kwargs)
return f(*args, **kwargs)
return trace_api_logging_wrapper
def trace(f):
"""Trace calls to the decorated function.
This decorator should always be defined as the outermost decorator so it
is defined last. This is important so it does not interfere
with other decorators.
Using this decorator on a function will cause its execution to be logged at
`DEBUG` level with arguments, return values, and exceptions.
:returns a function decorator
"""
func_name = f.__name__
@functools.wraps(f)
def trace_logging_wrapper(*args, **kwargs):
if len(args) > 0:
maybe_self = args[0]
else:
maybe_self = kwargs.get('self', None)
if maybe_self and hasattr(maybe_self, '__module__'):
logger = logging.getLogger(maybe_self.__module__)
else:
logger = LOG
# NOTE(ameade): Don't bother going any further if DEBUG log level
# is not enabled for the logger.
if not logger.isEnabledFor(py_logging.DEBUG):
return f(*args, **kwargs)
all_args = inspect.getcallargs(f, *args, **kwargs)
logger.debug('==> %(func)s: call %(all_args)r',
{'func': func_name, 'all_args': all_args})
start_time = time.time() * 1000
try:
result = f(*args, **kwargs)
except Exception as exc:
total_time = int(round(time.time() * 1000)) - start_time
logger.debug('<== %(func)s: exception (%(time)dms) %(exc)r',
{'func': func_name,
'time': total_time,
'exc': exc})
raise
total_time = int(round(time.time() * 1000)) - start_time
logger.debug('<== %(func)s: return (%(time)dms) %(result)r',
{'func': func_name,
'time': total_time,
'result': result})
return result
return trace_logging_wrapper
class TraceWrapperMetaclass(type):
"""Metaclass that wraps all methods of a class with trace_method.
This metaclass will cause every function inside of the class to be
decorated with the trace_method decorator.
To use the metaclass you define a class like so:
@six.add_metaclass(utils.TraceWrapperMetaclass)
class MyClass(object):
"""
def __new__(meta, classname, bases, classDict):
newClassDict = {}
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
# replace it with a wrapped version
attribute = functools.update_wrapper(trace_method(attribute),
attribute)
newClassDict[attributeName] = attribute
return type.__new__(meta, classname, bases, newClassDict)
class TraceWrapperWithABCMetaclass(abc.ABCMeta, TraceWrapperMetaclass):
"""Metaclass that wraps all methods of a class with trace."""
pass
def setup_tracing(trace_flags):
"""Set global variables for each trace flag.
Sets variables TRACE_METHOD and TRACE_API, which represent
whether to log method and api traces.
:param trace_flags: a list of strings
"""
global TRACE_METHOD
global TRACE_API
try:
trace_flags = [flag.strip() for flag in trace_flags]
except TypeError: # Handle when trace_flags is None or a test mock
trace_flags = []
for invalid_flag in (set(trace_flags) - VALID_TRACE_FLAGS):
LOG.warning(_LW('Invalid trace flag: %s'), invalid_flag)
TRACE_METHOD = 'method' in trace_flags
TRACE_API = 'api' in trace_flags
def resolve_hostname(hostname):
"""Resolves host name to IP address.
Resolves a host name (my.data.point.com) to an IP address (10.12.143.11).
This routine also works if the data passed in hostname is already an IP.
In this case, the same IP address will be returned.
:param hostname: Host name to resolve.
:return: IP Address for Host name.
"""
result = socket.getaddrinfo(hostname, None)[0]
(family, socktype, proto, canonname, sockaddr) = result
LOG.debug('Asked to resolve hostname %(host)s and got IP %(ip)s.',
{'host': hostname, 'ip': sockaddr[0]})
return sockaddr[0]
| nikesh-mahalka/cinder | cinder/utils.py | Python | apache-2.0 | 34,822 |
#!/usr/bin/env python3
import boto3
import logging
import logging.handlers
import time
import watchtower
import requests
from botocore.credentials import InstanceMetadataProvider, InstanceMetadataFetcher
from redo import retriable #https://github.com/mozilla-releng/redo
logger = logging.getLogger('Orchestrator') #The Module Name
auditlogger = logging.getLogger("audit_logger") #getLogger returns a reference to a logger instance with the specified name if it is provided
class InstanceMetaData(object):
def __init__(self):
self.ec2 = boto3.client('ec2')
def getInstanceID(self): # returns instance tag name from scheduler instance to log to cloudwatch
response = requests.get('http://169.254.169.254/latest/meta-data/instance-id')
id = response.text
self.response = self.ec2.describe_instances(InstanceIds=[id])
return self.response
def getInstanceEnvTag(self,response):
for i in response['Reservations']: # access reservations list of dict
for t in i['Instances']: # access reservations list of dicts
return ', '.join([x['Value'] for x in t['Tags'] if x['Key'] == 'Environment'])
def getInstanceNameTag(self,response):
for i in response['Reservations']: # access reservations list of dict
for t in i['Instances']: # access reservations list of dicts
return ', '.join([x['Value'] for x in t['Tags'] if x['Key'] == 'Name'])
def getCredentials(self):
provider = InstanceMetadataProvider(iam_role_fetcher=InstanceMetadataFetcher(timeout=1000, num_attempts=2))
creds = provider.load()
access_key = creds.access_key
return(access_key)
def initLogging(loglevel,partitionTargetValue,LogStreamName):
# Set logging level
loggingLevelSelected = logging.INFO
# Set logging level
if( loglevel == 'critical' ):
loggingLevelSelected=logging.CRITICAL
elif( loglevel == 'error' ):
loggingLevelSelected=logging.ERROR
elif( loglevel == 'warning' ):
loggingLevelSelected=logging.WARNING
elif( loglevel == 'info' ):
loggingLevelSelected=logging.INFO
elif( loglevel == 'debug' ):
loggingLevelSelected=logging.DEBUG
elif( loglevel == 'notset' ):
loggingLevelSelected=logging.NOTSET
filenameVal='Orchestrator_' + partitionTargetValue + '.log'
log_formatter = logging.Formatter('[%(asctime)s][P:%(process)d][%(levelname)s][%(module)s:%(funcName)s()][%(lineno)d]%(message)s')
# Add the rotating file handler
handler = logging.handlers.RotatingFileHandler(
filename=filenameVal,
mode='a',
maxBytes=128 * 1024,
backupCount=10)
handler.setFormatter(log_formatter)
logger.addHandler(handler)
logger.setLevel(loggingLevelSelected)
auditlogger.addHandler(watchtower.CloudWatchLogHandler(log_group='Scheduler',stream_name='Audit')) #Handler for Audit responsible for dispatch of appropriate Audit info to CW.
if (loggingLevelSelected > logging.INFO or loggingLevelSelected == logging.NOTSET ):
loggingLevelSelected = logging.INFO
auditlogger.setLevel(loggingLevelSelected)# Sets the threshold for this handler to appropriate level. specifies the severity that will be dispatched to the appropriate destination, in this case cloudwatch.
else:
auditlogger.setLevel(loggingLevelSelected)
cloud_handler = watchtower.CloudWatchLogHandler(log_group='Scheduler',stream_name=LogStreamName)
logger.addHandler(cloud_handler) #This is the Scheduler logs Handler responsible for dispatch of scheduler log messages .InstanceEnvTag is to identify instance log stream and which scheduler that the logs came from.
class SnsNotifier(object):
def __init__(self, topic,workload):
self.topic = topic
self.workload = workload
@retriable(attempts=5, sleeptime=0, jitter=0)
def sendSns(self,subject,message):
client =boto3.resource('sns')
topic = client.create_topic(Name=self.topic) # This action is idempotent.
topic.publish(Subject=subject,Message=str(" Workload : " + self.workload + '\n') + str( " Exception : " + message ))
| tangerinedream/AWS_EC2_Scheduler | Utils.py | Python | gpl-3.0 | 4,356 |
from . import views
from django.conf.urls import include, url
from django.contrib.auth.views import login
app_name = 'store'
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^login/$', login, name='login'),
url(r'^logout/$', views.logout.as_view(), name='logout'),
url(r'^register/$', views.UserCreate.as_view(), name='user_add'),
url(r'^product/$', views.ProductList.as_view(), name='product_list'),
url(r'^product/add/$', views.ProductCreate.as_view(), name='product_add'),
url(r'^product/(?P<pk>[0-9]+)/$', views.ProductDetail.as_view(), name='product_detail'),
url(r'^product/(?P<pk>[0-9]+)/update/$', views.ProductUpdate.as_view(), name='product_update'),
url(r'^product/(?P<pk>[0-9]+)/delete/$', views.ProductDelete.as_view(), name='product_delete'),
]
| dcabalas/UNI | SN/Django/MySocks/store/urls.py | Python | gpl-3.0 | 809 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import warnings
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from invenio.modules.upgrader.api import op
depends_on = []
def info():
return "Initial creation of tables for pidstore module."
def do_upgrade():
"""Implement your upgrades here."""
if not op.has_table('pidSTORE'):
op.create_table(
'pidSTORE',
sa.Column('id', mysql.INTEGER(display_width=15), nullable=False),
sa.Column('pid_type', sa.String(length=6), nullable=False),
sa.Column('pid_value', sa.String(length=255), nullable=False),
sa.Column('pid_provider', sa.String(length=255), nullable=False),
sa.Column('status', sa.CHAR(length=1), nullable=False, index=True),
sa.Column('object_type', sa.String(length=3), nullable=True),
sa.Column('object_value', sa.String(length=255), nullable=True),
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('last_modified', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.Index('idx_object', 'object_type', 'object_value', unique=False),
sa.Index('uidx_type_pid', 'pid_type', 'pid_value', unique=True),
)
else:
warnings.warn("*** Creation of 'pidSTORE' table skipped! ***")
if not op.has_table('pidLOG'):
op.create_table(
'pidLOG',
sa.Column('id', mysql.INTEGER(display_width=15), nullable=False),
sa.Column('id_pid', mysql.INTEGER(display_width=15), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.Column('action', sa.String(length=10), nullable=False, index=True),
sa.Column('message', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['id_pid'], ['pidSTORE.id'], ),
sa.PrimaryKeyConstraint('id')
)
else:
warnings.warn("*** Creation of 'pidLOG' table skipped! ***")
def estimate():
"""Estimate running time of upgrade in seconds (optional)."""
return 1
def pre_upgrade():
"""Run pre-upgrade checks (optional)."""
tables = ["pidSTORE", "pidLOG"]
for table in tables:
if op.has_table(table):
warnings.warn(
"*** Table {0} already exists! *** "
"This upgrade will *NOT* create the new table.".format(table)
)
def post_upgrade():
"""Run post-upgrade checks (optional)."""
pass
| lnielsen/invenio | invenio/modules/pidstore/upgrades/pidstore_2014_08_22_initial.py | Python | gpl-2.0 | 3,270 |
__author__ = 'pankaj'
def getPallindromeCount(string):
mid=0
right=0
left=0
counter =0
longest_length=0
while mid < len(string):
right=mid
left=mid
found = True
while right >=0 and left < len(string):
if string[right] == string[left]:
if left - right >longest_length:
longest_length= left-right
counter=1
elif left-right == longest_length:
counter+=1
else:
break
left+=1
right-=1
right=mid
left=mid+1
while right >=0 and left < len(string):
if string[right] == string[left]:
if left - right >longest_length:
longest_length= left-right
counter=1
elif left-right == longest_length:
counter+=1
else:
break
left+=1
right-=1
mid+=1
print longest_length
print counter
getPallindromeCount('ababbacababbad')
# tests=int(raw_input())
#
# while tests > 0:
# t=raw_input()
#
# tests-=1
| pankajanand18/python-tests | trees/pallindrome_count.py | Python | mit | 1,211 |
"""
Module to define url helpers functions
"""
from urllib import urlencode
from django.urls import reverse
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.search import navigation_index, path_to_location
def get_redirect_url(course_key, usage_key):
""" Returns the redirect url back to courseware
Args:
course_id(str): Course Id string
location(str): The location id of course component
Raises:
ItemNotFoundError if no data at the location or NoPathToItem if location not in any class
Returns:
Redirect url string
"""
(
course_key, chapter, section, vertical_unused,
position, final_target_id
) = path_to_location(modulestore(), usage_key)
# choose the appropriate view (and provide the necessary args) based on the
# args provided by the redirect.
# Rely on index to do all error handling and access control.
if chapter is None:
redirect_url = reverse('courseware', args=(unicode(course_key), ))
elif section is None:
redirect_url = reverse('courseware_chapter', args=(unicode(course_key), chapter))
elif position is None:
redirect_url = reverse(
'courseware_section',
args=(unicode(course_key), chapter, section)
)
else:
# Here we use the navigation_index from the position returned from
# path_to_location - we can only navigate to the topmost vertical at the
# moment
redirect_url = reverse(
'courseware_position',
args=(unicode(course_key), chapter, section, navigation_index(position))
)
redirect_url += "?{}".format(urlencode({'activate_block_id': unicode(final_target_id)}))
return redirect_url
| a-parhom/edx-platform | lms/djangoapps/courseware/url_helpers.py | Python | agpl-3.0 | 1,773 |
#!/usr/bin/python
# The purpose of this script is to email users when an automated build breaks.
# Caveats:
# - it has a few hardcoded spots that require the build user to be cndrauto.
# - it only works for builds. I think tests are too flaky and would generate too much mail
#
# Written by Scot Kronenfeld 2011-06
# The current list of wranglers:
# - All failures will go to them, even if they are not in the author list.
# - Error emails will go to them and nobody else.
#WRANGLERS = ["kronenfe@cs.wisc.edu", "gthain@cs.wisc.edu", "johnkn@cs.wisc.edu"]
WRANGLERS = ["kronenfe@cs.wisc.edu", "johnkn@cs.wisc.edu"]
EMAIL_FROM_ADDRESS = 'noreply@cs.wisc.edu'
###############
import re
import os
import sys
import time
import socket
# For connecting to the DB
import MySQLdb
# For sending email
import smtplib
from email.MIMEText import MIMEText
#################################################
# Helper functions that implement all the logic #
#################################################
def validate_input():
""" Validate the input """
if len(sys.argv) != 2:
die_nice("Must pass in exactly one argument, the NMI Run ID that failed")
if re.search("\D", sys.argv[1]):
die_nice("Argument supplied does not look like an NMI Run ID. It contains a non-digit: %s" % sys.argv[1])
return sys.argv[1]
def load_config(file):
""" Load a config file of the format key=value. """
config = {}
f = open(file, 'r')
for line in f.readlines():
if re.match("\s*$", line) or re.match("\s*\#", line):
continue
m = re.match("\s*(\S+)\s*=\s*(.+)$", line)
if m:
config[m.group(1)] = m.group(2).strip()
return config
def check_runid(config, runid):
""" Connect to MySQL and determine the SHA1 value for the run and the previous run """
# Connect to MySQL using credentials from config file
conn = MySQLdb.connect(host = config["WEB_DB_HOST"], user = config["DB_READER_USER"], passwd = config["DB_READER_PASS"], db = "nmi_history")
cursor = conn.cursor()
# To get the last two runs we will first get details about the RunID that we know, then use that info to constrain the second query
cursor.execute("SELECT description FROM Run WHERE runid = " + runid + ";")
description = cursor.fetchone()[0]
# If this is a continuous build then we can leave the description alone.
# But if it is a branch build we need to strip the date off the end of it.
if re.search("-2\d\d\d-\d\d?-\d\d?$", description):
description = re.sub("-2\d\d\d-\d\d?-\d\d?$", "", description)
cursor.execute("SELECT result,project_version FROM Run WHERE user = \"cndrauto\" AND runid <= " + runid + " AND description LIKE \"" + description + "%\" ORDER BY runid DESC LIMIT 2;")
(this_result, this_sha1) = cursor.fetchone()
(prev_result, prev_sha1) = cursor.fetchone()
cursor.close()
conn.close()
print "Current result: %s - Current SHA1: %s" % (this_result, this_sha1)
print "Previous result: %s - Previous SHA1: %s" % (prev_result, prev_sha1)
# Check the status of the previous run. NMI status reference:
# Pass - 0
# Fail - >0
# Pending - NULL
if this_result == 0:
die_nice("This script was called for a run that succeeded", runid)
elif prev_result == None:
die_nice("The previous run is not complete yet", runid)
elif prev_result == 0:
# We have a failure and the previous run succeeded. Now is our time to shine!
authors = get_authors(this_sha1, prev_sha1)
msg = create_email(this_sha1, prev_sha1, description, runid, authors)
send_mail(authors.values(), msg, runid, description)
else:
# We don't need to do anything if there are two failures in a row.
# Maybe someday we will change this behavior.
print "Previous run failed - no need to send email"
return
def get_authors(new_sha1, old_sha1):
""" Given two sha1 hashes, get the list of Git authors and emails in between """
cmd = 'cd /space/git/CONDOR_SRC.git && git log --pretty=format:\'%an || %ae\' ' + old_sha1 + '..' + new_sha1 + ' 2>&1 | sort | uniq -c | sort -nr'
authors = {}
for line in os.popen(cmd).read().split("\n"):
m = re.match("\s*(.+) \|\| (.+)$", line)
if m:
authors[m.group(1)] = m.group(2).strip()
return authors
def create_email(this_sha1, prev_sha1, description, runid, authors):
""" Create the email to send out """
# Form a detailed message
msg = "A build of type '%s' failed. The previous build of this type succeeded.\n\n" % description
msg += "Build SHA1 - %s\n" % this_sha1
msg += "Previous SHA1 - %s\n\n\n" % prev_sha1
msg += "Committers in between:\n"
for author in authors.keys():
msg += "%s <%s>\n" % (author, authors[author])
msg += "\n"
msg += "Link to dashboard:\n"
msg += "http://nmi-s006.cs.wisc.edu/results/Run-condor-details.php?runid=%s&type=build&user=cndrauto\n\n" % runid
msg += "Log of commits between these two builds:\n"
msg += "http://condor-git.cs.wisc.edu/?p=condor.git;a=log;h=%s;hp=%s\n\n" % (this_sha1, prev_sha1)
msg += message_footer()
return msg
def message_footer():
""" Add some information at the bottom of the message """
footer = "\n\n---------- Script Info -------------\n"
footer += "Script that sent this mail: %s\n" % sys.argv[0]
footer += "CWD: %s\n" % os.getcwd()
footer += "Current host: %s\n" % socket.gethostname()
footer += "Current time: %s\n" % time.strftime("%X %x %Z")
return footer
def send_mail(emails, msg, runid, description):
""" Send the email to the supplied people """
msg = MIMEText(msg)
msg['Subject'] = "Build error - Run ID %s - Description '%s'" % (runid, description)
msg['From'] = EMAIL_FROM_ADDRESS
msg['To'] = str.join(",", WRANGLERS)
# Uncomment this line to go live. During testing we will just send to wranglers
#msg['To'] = str.join(",", emails) + "," + str.join(",", WRANGLERS)
s = smtplib.SMTP()
s.connect()
s.sendmail(EMAIL_FROM_ADDRESS, msg['To'], msg.as_string())
s.quit()
def die_nice(msg, runid="?"):
""" Email out an error message to peeps """
# We will send email with no extra addresses. This will just go to the wranglers.
send_mail({}, msg, runid, "Error occurred")
# Also print this to the screen so it shows up in the NMI build output
print msg
print "Exiting with error from %s" % sys.argv[0]
sys.exit(1)
################
# Control flow #
################
print "Beginning %s" % sys.argv[0]
try:
# Validate input RunID
nmi_runid = validate_input()
# Get the DB connection info from the config file
config = load_config("/nmi/etc/nmi.conf")
# Get the last runs
check_runid(config, nmi_runid)
except:
import traceback
# TODO: it would be nice to email this error message, but traceback.format_exc is missing in Python 2.3 :(
# So for now I'll just print it and then figure out a workaround at some unspecified point in the future.
traceback.print_exc()
print "Exiting successfully from %s" % sys.argv[0]
sys.exit(0)
| djw8605/htcondor | nmi_tools/glue/build/email_on_failure.py | Python | apache-2.0 | 7,280 |
# -*- coding: utf-8 -*-
"""Deletes all feedback that have a description that's an empty
string.
"""
from __future__ import unicode_literals
import sys
from django.db import models, migrations
def delete_feedback(apps, schema_editor):
"""Delete all feedback with an empty description"""
Response = apps.get_model('feedback', 'Response')
qs = Response.objects.filter(description='')
count = qs.count()
if count > 0:
qs.delete()
if 'test' not in sys.argv:
print 'Deleted {0} responses'.format(count)
def noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('feedback', '0002_make_products'),
]
operations = [
migrations.RunPython(delete_feedback, noop)
]
| staranjeet/fjord | fjord/feedback/migrations/0003_delete_empty_desc_feedback.py | Python | bsd-3-clause | 783 |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Jeremy Emerson'
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import rule_domain
from core.domain import stats_domain
from core.domain import stats_services
import feconf
import test_utils
# TODO(sfederwisch): Move off of old models. Old models use string
# versions of the rules, while the new ones take in the whole rule.
# This will require moving off of DEFAULT_RULESPEC_STR in those cases.
class EventHandlerUnitTests(test_utils.GenericTestBase):
"""Test the event handler methods."""
DEFAULT_RULESPEC_STR = exp_domain.DEFAULT_RULESPEC_STR
DEFAULT_RULESPEC = exp_domain.RuleSpec.get_default_rule_spec(
'sid', 'NormalizedString');
SUBMIT_HANDLER = stats_services.SUBMIT_HANDLER_NAME
def test_record_state_hit(self):
stats_services.EventHandler.record_state_hit('eid', 'sname', True)
counter = stats_domain.StateCounter.get('eid', 'sname')
self.assertEquals(counter.first_entry_count, 1)
self.assertEquals(counter.subsequent_entries_count, 0)
self.assertEquals(counter.resolved_answer_count, 0)
self.assertEquals(counter.active_answer_count, 0)
self.assertEquals(counter.total_entry_count, 1)
self.assertEquals(counter.no_answer_count, 1)
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
self.assertEquals(answer_log.answers, {})
stats_services.EventHandler.record_state_hit('eid', 'sname', False)
counter = stats_domain.StateCounter.get('eid', 'sname')
self.assertEquals(counter.first_entry_count, 1)
self.assertEquals(counter.subsequent_entries_count, 1)
self.assertEquals(counter.resolved_answer_count, 0)
self.assertEquals(counter.active_answer_count, 0)
self.assertEquals(counter.total_entry_count, 2)
self.assertEquals(counter.no_answer_count, 2)
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
self.assertEquals(answer_log.answers, {})
def test_record_answer_submitted(self):
stats_services.EventHandler.record_state_hit('eid', 'sname', True)
stats_services.EventHandler.record_answer_submitted(
'eid', 1, 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC, 'answer')
counter = stats_domain.StateCounter.get('eid', 'sname')
self.assertEquals(counter.first_entry_count, 1)
self.assertEquals(counter.subsequent_entries_count, 0)
self.assertEquals(counter.total_entry_count, 1)
self.assertEquals(counter.resolved_answer_count, 0)
self.assertEquals(counter.active_answer_count, 1)
self.assertEquals(counter.no_answer_count, 0)
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
self.assertEquals(answer_log.answers, {'answer': 1})
stats_services.EventHandler.record_state_hit('eid', 'sname', False)
stats_services.EventHandler.record_answer_submitted(
'eid', 1, 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC, 'answer')
counter = stats_domain.StateCounter.get('eid', 'sname')
self.assertEquals(counter.first_entry_count, 1)
self.assertEquals(counter.subsequent_entries_count, 1)
self.assertEquals(counter.total_entry_count, 2)
self.assertEquals(counter.resolved_answer_count, 0)
self.assertEquals(counter.active_answer_count, 2)
self.assertEquals(counter.no_answer_count, 0)
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
self.assertEquals(answer_log.answers, {'answer': 2})
stats_services.EventHandler.record_state_hit('eid', 'sname', False)
counter = stats_domain.StateCounter.get('eid', 'sname')
self.assertEquals(counter.first_entry_count, 1)
self.assertEquals(counter.subsequent_entries_count, 2)
self.assertEquals(counter.total_entry_count, 3)
self.assertEquals(counter.resolved_answer_count, 0)
self.assertEquals(counter.active_answer_count, 2)
self.assertEquals(counter.no_answer_count, 1)
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
self.assertEquals(answer_log.answers, {'answer': 2})
def test_resolve_answers_for_default_rule(self):
stats_services.EventHandler.record_state_hit('eid', 'sname', True)
# Submit three answers.
stats_services.EventHandler.record_answer_submitted(
'eid', 1, 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC,
'a1')
stats_services.EventHandler.record_answer_submitted(
'eid', 1, 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC,
'a2')
stats_services.EventHandler.record_answer_submitted(
'eid', 1, 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC,
'a3')
counter = stats_domain.StateCounter.get('eid', 'sname')
self.assertEquals(counter.resolved_answer_count, 0)
self.assertEquals(counter.active_answer_count, 3)
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
self.assertEquals(
answer_log.answers, {'a1': 1, 'a2': 1, 'a3': 1})
# Nothing changes if you try to resolve an invalid answer.
stats_services.EventHandler.resolve_answers_for_default_rule(
'eid', 'sname', self.SUBMIT_HANDLER, ['fake_answer'])
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
self.assertEquals(
answer_log.answers, {'a1': 1, 'a2': 1, 'a3': 1})
# Resolve two answers.
stats_services.EventHandler.resolve_answers_for_default_rule(
'eid', 'sname', self.SUBMIT_HANDLER, ['a1', 'a2'])
counter = stats_domain.StateCounter.get('eid', 'sname')
self.assertEquals(counter.resolved_answer_count, 2)
self.assertEquals(counter.active_answer_count, 1)
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
self.assertEquals(answer_log.answers, {'a3': 1})
# Nothing changes if you try to resolve an answer that has already
# been resolved.
stats_services.EventHandler.resolve_answers_for_default_rule(
'eid', 'sname', self.SUBMIT_HANDLER, ['a1'])
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
self.assertEquals(answer_log.answers, {'a3': 1})
# Resolve the last answer.
stats_services.EventHandler.resolve_answers_for_default_rule(
'eid', 'sname', self.SUBMIT_HANDLER, ['a3'])
counter = stats_domain.StateCounter.get('eid', 'sname')
self.assertEquals(counter.resolved_answer_count, 3)
self.assertEquals(counter.active_answer_count, 0)
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, 'Rule')
self.assertEquals(answer_log.answers, {})
class StatsServicesUnitTests(test_utils.GenericTestBase):
"""Test the statistics services."""
DEFAULT_RULESPEC_STR = exp_domain.DEFAULT_RULESPEC_STR
SUBMIT_HANDLER = stats_services.SUBMIT_HANDLER_NAME
def test_get_user_stats(self):
exp0 = exp_domain.Exploration.create_default_exploration(
'eid0', 'title0', 'category')
exp_services.save_new_exploration('uid0', exp0)
exp1 = exp_domain.Exploration.create_default_exploration(
'eid1', 'title1', 'category')
exp_services.save_new_exploration('uid0', exp1)
f0 = stats_domain.FeedbackItem.create_feedback_for_state(
'eid0', 'welcome', 'my feedback', None, 'uid0')
f1 = stats_domain.FeedbackItem.create_feedback_for_exploration(
'eid1', 'another feedback', None, 'uid0')
# This should return the both the feedback.
self.assertEquals(stats_services.get_user_stats('uid0'), {'feedback': {
f0.id: {
'content': 'my feedback',
'exp_id': 'eid0',
'exp_title': 'title0',
'state_name': 'welcome',
'status': 'new',
'target_id': 'state:eid0.welcome',
},
f1.id: {
'content': 'another feedback',
'exp_id': 'eid1',
'exp_title': 'title1',
'state_name': None,
'status': 'new',
'target_id': 'exploration:eid1',
},
}})
# uid1 does not have any feedbacks.
self.assertEquals(stats_services.get_user_stats('uid1'),
{'feedback':{}})
class TopImprovableStatesUnitTests(test_utils.GenericTestBase):
"""Test the get_top_improvable_states() function."""
DEFAULT_RULESPEC_STR = exp_domain.DEFAULT_RULESPEC_STR
DEFAULT_RULESPEC = exp_domain.RuleSpec.get_default_rule_spec('sid', 'NormalizedString');
SUBMIT_HANDLER = stats_services.SUBMIT_HANDLER_NAME
def test_get_top_improvable_states(self):
exp = exp_domain.Exploration.create_default_exploration(
'eid', 'A title', 'A category')
exp_services.save_new_exploration('fake@user.com', exp)
state_name = exp.init_state_name
for _ in range(5):
stats_services.EventHandler.record_state_hit(
'eid', state_name, True)
stats_services.EventHandler.record_answer_submitted(
'eid', 1, state_name, self.SUBMIT_HANDLER,
self.DEFAULT_RULESPEC, '1')
for _ in range(2):
stats_services.EventHandler.record_answer_submitted(
'eid', 1, state_name, self.SUBMIT_HANDLER,
self.DEFAULT_RULESPEC, '2')
expected_top_state = {
'exp_id': 'eid', 'type': 'default', 'rank': 3,
'state_name': exp.init_state_name
}
states = stats_services.get_top_improvable_states(['eid'], 10)
self.assertEquals(len(states), 1)
self.assertDictContainsSubset(expected_top_state, states[0])
def test_single_default_rule_hit(self):
exp = exp_domain.Exploration.create_default_exploration(
'eid', 'A title', 'A category')
exp_services.save_new_exploration('fake@user.com', exp)
state_name = exp.init_state_name
stats_services.EventHandler.record_state_hit('eid', state_name, True)
stats_services.EventHandler.record_answer_submitted(
'eid', 1, state_name, self.SUBMIT_HANDLER,
self.DEFAULT_RULESPEC, '1')
expected_top_state = {
'exp_id': 'eid', 'type': 'default', 'rank': 1,
'state_name': exp.init_state_name
}
states = stats_services.get_top_improvable_states(['eid'], 2)
self.assertEquals(len(states), 1)
self.assertDictContainsSubset(expected_top_state, states[0])
def test_no_improvement_flag_hit(self):
exp = exp_domain.Exploration.create_default_exploration(
'eid', 'A title', 'A category')
exp_services.save_new_exploration('fake@user.com', exp)
not_default_rule_spec = exp_domain.RuleSpec({
'rule_type': rule_domain.ATOMIC_RULE_TYPE,
'name': 'NotDefault',
'inputs': {},
'subject': 'answer'
}, exp.init_state_name, [], [], 'NormalizedString')
default_rule_spec = exp_domain.RuleSpec.get_default_rule_spec(feconf.END_DEST, 'NormalizedString')
exp.init_state.widget.handlers[0].rule_specs = [
not_default_rule_spec, default_rule_spec
]
exp_services._save_exploration('fake@user.com', exp, '', [])
stats_services.EventHandler.record_state_hit(
'eid', exp.init_state_name, True)
stats_services.EventHandler.record_answer_submitted(
'eid', 1, exp.init_state_name, self.SUBMIT_HANDLER,
not_default_rule_spec, '1')
states = stats_services.get_top_improvable_states(['eid'], 1)
self.assertEquals(len(states), 0)
def test_incomplete_and_default_flags(self):
exp = exp_domain.Exploration.create_default_exploration(
'eid', 'A title', 'A category')
exp_services.save_new_exploration('fake@user.com', exp)
state_name = exp.init_state_name
# Hit the default rule once, and fail to answer twice. The result
# should be classified as incomplete.
for _ in range(3):
stats_services.EventHandler.record_state_hit(
'eid', state_name, True)
stats_services.EventHandler.record_answer_submitted(
'eid', 1, state_name, self.SUBMIT_HANDLER,
self.DEFAULT_RULESPEC, '1')
states = stats_services.get_top_improvable_states(['eid'], 2)
self.assertEquals(len(states), 1)
self.assertEquals(states[0]['rank'], 2)
self.assertEquals(states[0]['type'], 'incomplete')
# Now hit the default two more times. The result should be classified
# as default.
for i in range(2):
stats_services.EventHandler.record_state_hit(
'eid', state_name, True)
stats_services.EventHandler.record_answer_submitted(
'eid', 1, state_name, self.SUBMIT_HANDLER,
self.DEFAULT_RULESPEC, '1')
states = stats_services.get_top_improvable_states(['eid'], 2)
self.assertEquals(len(states), 1)
self.assertEquals(states[0]['rank'], 3)
self.assertEquals(states[0]['type'], 'default')
def test_two_state_default_hit(self):
exp = exp_domain.Exploration.create_default_exploration(
'eid', 'A title', 'A category')
exp_services.save_new_exploration('fake@user.com', exp)
FIRST_STATE_NAME = exp.init_state_name
SECOND_STATE_NAME = 'State 2'
exp.add_states([SECOND_STATE_NAME])
exp_services._save_exploration('fake@user.com', exp, '', [])
# Hit the default rule of state 1 once, and the default rule of state 2
# twice.
stats_services.EventHandler.record_state_hit(
'eid', FIRST_STATE_NAME, True)
stats_services.EventHandler.record_answer_submitted(
'eid', 1, FIRST_STATE_NAME, self.SUBMIT_HANDLER,
self.DEFAULT_RULESPEC, '1')
for i in range(2):
stats_services.EventHandler.record_state_hit(
'eid', SECOND_STATE_NAME, True)
stats_services.EventHandler.record_answer_submitted(
'eid', 1, SECOND_STATE_NAME, self.SUBMIT_HANDLER,
self.DEFAULT_RULESPEC, '1')
states = stats_services.get_top_improvable_states(['eid'], 5)
self.assertEquals(len(states), 2)
self.assertDictContainsSubset({
'rank': 2,
'type': 'default',
'state_name': SECOND_STATE_NAME
}, states[0])
self.assertDictContainsSubset({
'rank': 1,
'type': 'default',
'state_name': FIRST_STATE_NAME
}, states[1])
# Hit the default rule of state 1 two more times.
for i in range(2):
stats_services.EventHandler.record_state_hit(
'eid', FIRST_STATE_NAME, True)
stats_services.EventHandler.record_answer_submitted(
'eid', 1, FIRST_STATE_NAME, self.SUBMIT_HANDLER,
self.DEFAULT_RULESPEC, '1')
states = stats_services.get_top_improvable_states(['eid'], 5)
self.assertEquals(len(states), 2)
self.assertDictContainsSubset({
'rank': 3,
'type': 'default',
'state_name': FIRST_STATE_NAME
}, states[0])
self.assertDictContainsSubset({
'rank': 2,
'type': 'default',
'state_name': SECOND_STATE_NAME
}, states[1])
# Try getting just the top improvable state.
states = stats_services.get_top_improvable_states(['eid'], 1)
self.assertEquals(len(states), 1)
self.assertDictContainsSubset({
'rank': 3,
'type': 'default',
'state_name': FIRST_STATE_NAME
}, states[0])
class UnresolvedAnswersTests(test_utils.GenericTestBase):
"""Test the unresolved answers methods."""
DEFAULT_RULESPEC_STR = exp_domain.DEFAULT_RULESPEC_STR
DEFAULT_RULESPEC = exp_domain.RuleSpec.get_default_rule_spec('sid', 'NormalizedString');
SUBMIT_HANDLER = stats_services.SUBMIT_HANDLER_NAME
def test_get_unresolved_answers(self):
self.assertEquals(
stats_services.get_unresolved_answers_for_default_rule(
'eid', 'sid'), {})
stats_services.EventHandler.record_answer_submitted(
'eid', 1, 'sid', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC, 'a1')
self.assertEquals(
stats_services.get_unresolved_answers_for_default_rule(
'eid', 'sid'), {'a1': 1})
stats_services.EventHandler.record_answer_submitted(
'eid', 1, 'sid', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC, 'a1')
self.assertEquals(
stats_services.get_unresolved_answers_for_default_rule(
'eid', 'sid'), {'a1': 2})
stats_services.EventHandler.resolve_answers_for_default_rule(
'eid', 'sid', self.SUBMIT_HANDLER, ['a1'])
self.assertEquals(
stats_services.get_unresolved_answers_for_default_rule(
'eid', 'sid'), {})
| sunu/oh-missions-oppia-beta | core/domain/stats_services_test.py | Python | apache-2.0 | 18,587 |
# __all__ = ['KnowledgeModel', 'MongoAccessor', 'GridFSModel']
| deleidos/de-visualization-wizard | language-processing-service/modeler/modelerserver/knowledgemodeler/__init__.py | Python | apache-2.0 | 63 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'TweetChunk.tweet'
db.delete_column(u'map_tweetchunk', 'tweet_id')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'TweetChunk.tweet'
raise RuntimeError("Cannot reverse this migration. 'TweetChunk.tweet' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'TweetChunk.tweet'
db.add_column(u'map_tweetchunk', 'tweet',
self.gf('twitter_stream.fields.PositiveBigAutoForeignKey')(to=orm['twitter_stream.Tweet']),
keep_default=False)
models = {
u'map.maptimeframe': {
'Meta': {'object_name': 'MapTimeFrame'},
'analysis_time': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'calculated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'chunks_added': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'cleanup_time': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'missing_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'node_cache_hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'node_cache_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'nodes_added': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'tweet_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'map.treenode': {
'Meta': {'object_name': 'TreeNode', 'index_together': "[['parent', 'word'], ['created_at', 'parent']]"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['map.TreeNode']"}),
'word': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'map.tweetchunk': {
'Meta': {'object_name': 'TweetChunk', 'index_together': "[['tz_country', 'node']]"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'id': ('twitter_stream.fields.PositiveBigAutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'chunks'", 'to': u"orm['map.TreeNode']"}),
'tweet_text': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '250', 'null': 'True'}),
'twitter_id': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'tz_country': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'})
},
u'map.tz_country': {
'Meta': {'object_name': 'Tz_Country'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_time_zone': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['map'] | michaelbrooks/twitter-feels | twitter_feels/apps/map/migrations/0014_auto__del_field_tweetchunk_tweet.py | Python | mit | 3,927 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'api.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
#url(r'^home/','analytics.views.home')
url(r'^valid/','analytics.views.valid_check'),
url(r'^data/','analytics.views.data_view'),
url(r'^chart/','analytics.views.chart_view'),
url(r'^sheet/','analytics.views.sheet_names'),
)
| CSC-ORG/Dynamic-Dashboard-2015 | engine/api/analytics/urls.py | Python | mit | 465 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libffi(AutotoolsPackage):
"""The libffi library provides a portable, high level programming
interface to various calling conventions. This allows a programmer
to call any function specified by a call interface description at
run time."""
homepage = "https://sourceware.org/libffi/"
version('3.2.1', '83b89587607e3eb65c70d361f13bab43',
url="https://www.mirrorservice.org/sites/sourceware.org/pub/libffi/libffi-3.2.1.tar.gz")
# version('3.1', 'f5898b29bbfd70502831a212d9249d10',url =
# "ftp://sourceware.org/pub/libffi/libffi-3.1.tar.gz") # Has a bug
# $(lib64) instead of ${lib64} in libffi.pc
| TheTimmy/spack | var/spack/repos/builtin/packages/libffi/package.py | Python | lgpl-2.1 | 1,904 |
#!/usr/bin/env python
# ----------------------------------------------------------------
# Copyright 2017 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
""" Simple proxy server forwarding messages between ConfD TCP session
and YDK TCP client.
"""
__author__ = "Xiaoqin Zhu"
__email__ = "xiaoqinz@cisco.com"
__usage__ = """ ./tcp_proxy_server.py -b 12307 -c 2023"""
import os
import sys
import time
import socket
import logging
import argparse
if sys.version_info < (3,):
import SocketServer as socketserver
from urlparse import urlparse
else:
import socketserver
from urllib.parse import urlparse
sys.tracebacklimit = 0
logging.basicConfig(level=logging.INFO,
format='%(name)s: %(message)s',
)
FOUR_k = 4096
EOM_10 = "]]>]]>"
EOM_11 = "\n##\n"
HELLO = """
<?xml version="1.0" encoding="UTF-8"?>
<hello xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
<capabilities>
<capability>urn:ietf:params:netconf:base:1.0</capability>
<capability>urn:ietf:params:netconf:base:1.1</capability>
</capabilities>
</hello>
"""
class DummyHandler(socketserver.BaseRequestHandler):
"""Dummy request handler"""
def handle(self):
"""Nothing ..."""
pass
class SimpleProxyServer(socketserver.TCPServer):
"""Simple proxy server: ConfD TCP <--> YDK TCP.
Attribute:
request_queue_size (int): maximum size of request queue acceptable
confd_socket (socket.socket): socket for confd connection
reset_confd (bool): reset ConfD connection if True
logger (logging.Logger): logger.
"""
def __init__(self, server_address, confd_address, handler_class=DummyHandler):
self.logger = logging.getLogger('SimpleProxyServer')
self.logger.debug('__init__')
self.request_queue_size = 50
self.reset_confd = True
socketserver.TCPServer.__init__(self, server_address, handler_class)
return
def confd_connect(self):
"""Start connection, Send ConfD TCP header."""
self.confd_socket = socket.create_connection(confd_address)
confd_header = "[admin;%s;tcp;%d;%d;%s;%s;%s;]\n" % (confd_address[0],
os.getuid(),
os.getgid(),
"",
os.getenv("HOME", "/tmp"),
"")
self.send_confd(confd_header)
def serve_forever(self):
"""Hijacked socketserver.TCPServer.serve_forever. Main loop moved to
SimpleProxyServer.process_request.
"""
while True:
try:
self.handle_request()
except Exception as e:
pass
return
def process_request(self, request, client_address):
"""Ping-pang data between ConfD TCP session and YDK TCP client.
Args:
request(socket.socket): socket object for client connection.
client_address(tuple of str): (hostname, port)
"""
while True:
self.logger.debug('Starting new send/recv request...')
if self.reset_confd == True:
self.confd_connect()
self.finish_confd_connection(request)
else:
self.forward_client(request, EOM_11)
time.sleep(0.1) # wait for reply
self.forward(request, EOM_11)
self.logger.debug('Finished one send/recv request...')
def finish_confd_connection(self, request):
"""Finish ConfD connection:
- Drop username sent from the client.
- Drop password sent from the client.
- Send hello request to ConfD.
- Send hello reply back to the client.
- Drop hello request sent from the client.
"""
request.send("Username: ".encode('utf-8'))
self.client_username = request.recv(FOUR_k)
request.send("Password: ".encode('utf-8'))
self.client_password = request.recv(FOUR_k)
hello_requst = '\n#%d\n' % len(HELLO) + HELLO
self.send_confd(hello_requst)
self.send_confd(EOM_10)
self.forward(request, EOM_10)
# need to drop this hello request,
# the hello message exchange order for ConfD is different from XR
_ = self.recv_client(request, EOM_10)
self.reset_confd = False
def forward(self, request, eom):
"""ConfD --> Client."""
self.send_client(request, self.recv_confd(eom))
def forward_client(self, request, eom):
"""Client --> ConfD."""
self.send_confd(self.recv_client(request, eom))
def _recv(self, request, eom):
"""Receive data ends with eom through request socket.
Args:
request (socket.socket): request socket.
eom (str): end of message marker.
"""
data = []
last_chunk = ""
while True:
last_chunk = request.recv(FOUR_k).decode('utf-8')
if not last_chunk:
request.close()
self.reset_confd = True
raise Exception("No data")
elif last_chunk.endswith(eom):
data.append(last_chunk)
break
data.append(last_chunk)
data = ''.join(data)
self.logger.debug("Receiving...\n\t{}".format(data))
return data
def _send(self, request, data):
"""Send data through request socket.
Args:
request (socket.socket): request socket.
data (str): data payload.
"""
self.logger.debug("Sending...\n\t{}".format(data))
try:
request.sendall(data.encode('utf-8'))
except:
request.close()
self.reset_confd = True
def recv_confd(self, eom):
"""Receive data from ConfD until eom.
Args:
eom (str): end of message marker.
"""
return self._recv(self.confd_socket, eom)
def send_confd(self, data):
"""Send data to ConfD.
Args:
data (str): data payload.
"""
self._send(self.confd_socket, data)
def recv_client(self, request, eom):
"""Receive data through client socket until eom.
Args:
request (socket.socket): client socket
eom (str): end of message marker.
"""
return self._recv(request, eom)
def send_client(self, request, data):
"""Send data to client through request socket.
Args:
request (socket.socket): client request socket.
data (str): data payload send to the client.
"""
self._send(request, data)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="confd_tcp_proxy_server", usage="%(prog)s [options]")
parser.add_argument("-v", "--verbose", help="verbose mode")
parser.add_argument("-b", "--bind", dest='bind', type=int, help="binding port, 12307")
parser.add_argument("-c", "--confd", dest='confd', type=int, help="ConfD address, 2023")
args = parser.parse_args()
bind_port = args.bind
confd_port = args.confd
binding_address = ('127.0.0.1', bind_port)
confd_address = ('127.0.0.1', confd_port)
server = SimpleProxyServer(binding_address, confd_address, DummyHandler)
server.serve_forever()
exit()
| psykokwak4/ydk-gen | test/tcp_proxy_server.py | Python | apache-2.0 | 8,117 |
import megamenu
| OdooCommunityWidgets/website_navigation_megamenu | __init__.py | Python | mit | 16 |
"""Test cloudbridge.security modules."""
import cloudbridge.base.helpers as cb_helpers
from cloudbridge.interfaces.exceptions import DuplicateResourceException
from cloudbridge.interfaces.resources import KeyPair
from cloudbridge.interfaces.resources import TrafficDirection
from cloudbridge.interfaces.resources import VMFirewall
from cloudbridge.interfaces.resources import VMFirewallRule
from tests import helpers
from tests.helpers import ProviderTestBase
from tests.helpers import standard_interface_tests as sit
class CloudSecurityServiceTestCase(ProviderTestBase):
_multiprocess_can_split_ = True
@helpers.skipIfNoService(['security.vm_firewalls'])
def test_storage_services_event_pattern(self):
self.assertEqual(
self.provider.security.key_pairs.
_service_event_pattern,
"provider.security.key_pairs",
"Event pattern for {} service should be '{}', "
"but found '{}'.".format("key_pairs",
"provider.security.key_pairs",
self.provider.security.
key_pairs.
_service_event_pattern))
self.assertEqual(
self.provider.security.vm_firewalls._service_event_pattern,
"provider.security.vm_firewalls",
"Event pattern for {} service should be '{}', "
"but found '{}'.".format("vm_firewalls",
"provider.security.vm_firewalls",
self.provider.security.vm_firewalls.
_service_event_pattern))
@helpers.skipIfNoService(['security.key_pairs'])
def test_crud_key_pair_service(self):
def create_kp(name):
return self.provider.security.key_pairs.create(name=name)
def cleanup_kp(kp):
if kp:
self.provider.security.key_pairs.delete(kp.id)
def extra_tests(kp):
# Recreating existing keypair should raise an exception
with self.assertRaises(DuplicateResourceException):
self.provider.security.key_pairs.create(name=kp.name)
sit.check_crud(self, self.provider.security.key_pairs, KeyPair,
"cb-crudkp", create_kp, cleanup_kp,
extra_test_func=extra_tests)
@helpers.skipIfNoService(['security.key_pairs'])
def test_key_pair_properties(self):
name = 'cb-kpprops-{0}'.format(helpers.get_uuid())
kp = self.provider.security.key_pairs.create(name=name)
with cb_helpers.cleanup_action(lambda: kp.delete()):
self.assertIsNotNone(
kp.material,
"KeyPair material is empty but it should not be.")
# get the keypair again - keypair material should now be empty
kp = self.provider.security.key_pairs.get(kp.id)
self.assertIsNone(kp.material,
"Keypair material should now be empty")
@helpers.skipIfNoService(['security.key_pairs'])
def test_import_key_pair(self):
name = 'cb-kpimport-{0}'.format(helpers.get_uuid())
public_key, _ = cb_helpers.generate_key_pair()
kp = self.provider.security.key_pairs.create(
name=name, public_key_material=public_key)
with cb_helpers.cleanup_action(lambda: kp.delete()):
self.assertIsNone(kp.material, "Private KeyPair material should"
" be None when key is imported.")
@helpers.skipIfNoService(['security.vm_firewalls'])
def test_crud_vm_firewall(self):
subnet = helpers.get_or_create_default_subnet(self.provider)
net = subnet.network
def create_fw(label):
return self.provider.security.vm_firewalls.create(
label=label, description=label, network=net.id)
def cleanup_fw(fw):
if fw:
fw.delete()
def network_id_test(fw):
# Checking that the network ID is returned correctly
self.assertEqual(fw.network_id, net.id)
sit.check_crud(self, self.provider.security.vm_firewalls,
VMFirewall, "cb-crudfw", create_fw, cleanup_fw,
extra_test_func=network_id_test)
@helpers.skipIfNoService(['security.vm_firewalls'])
def test_vm_firewall_properties(self):
label = 'cb-propfw-{0}'.format(helpers.get_uuid())
# Declare these variables and late binding will allow
# the cleanup method access to the most current values
fw = None
with cb_helpers.cleanup_action(lambda: helpers.cleanup_test_resources(
vm_firewall=fw)):
subnet = helpers.get_or_create_default_subnet(self.provider)
net = subnet.network
fw = self.provider.security.vm_firewalls.create(
label=label, description=label, network=net.id)
self.assertEqual(label, fw.description)
@helpers.skipIfNoService(['security.vm_firewalls'])
def test_crud_vm_firewall_rules(self):
label = 'cb-crudfw-rules-{0}'.format(helpers.get_uuid())
subnet = helpers.get_or_create_default_subnet(self.provider)
net = subnet.network
fw = None
with cb_helpers.cleanup_action(lambda: fw.delete()):
fw = self.provider.security.vm_firewalls.create(
label=label, description=label, network=net.id)
def create_fw_rule(label):
return fw.rules.create(
direction=TrafficDirection.INBOUND, protocol='tcp',
from_port=1111, to_port=1111, cidr='0.0.0.0/0')
def cleanup_fw_rule(rule):
if rule:
rule.delete()
sit.check_crud(self, fw.rules, VMFirewallRule, "cb-crudfwrule",
create_fw_rule, cleanup_fw_rule,
skip_name_check=True)
@helpers.skipIfNoService(['security.vm_firewalls'])
def test_vm_firewall_rule_properties(self):
label = 'cb-propfwrule-{0}'.format(helpers.get_uuid())
# Declare these variables and late binding will allow
# the cleanup method access to the most current values
fw = None
with cb_helpers.cleanup_action(lambda: helpers.cleanup_test_resources(
vm_firewall=fw)):
subnet = helpers.get_or_create_default_subnet(self.provider)
net = subnet.network
fw = self.provider.security.vm_firewalls.create(
label=label, description=label, network=net.id)
rule = fw.rules.create(
direction=TrafficDirection.INBOUND, protocol='tcp',
from_port=1111, to_port=1111, cidr='0.0.0.0/0')
self.assertEqual(rule.direction, TrafficDirection.INBOUND)
self.assertEqual(rule.protocol, 'tcp')
self.assertEqual(rule.from_port, 1111)
self.assertEqual(rule.to_port, 1111)
self.assertEqual(rule.cidr, '0.0.0.0/0')
@helpers.skipIfNoService(['security.vm_firewalls'])
def test_vm_firewall_rule_add_twice(self):
label = 'cb-fwruletwice-{0}'.format(helpers.get_uuid())
# Declare these variables and late binding will allow
# the cleanup method access to the most current values
fw = None
with cb_helpers.cleanup_action(lambda: helpers.cleanup_test_resources(
vm_firewall=fw)):
subnet = helpers.get_or_create_default_subnet(self.provider)
net = subnet.network
fw = self.provider.security.vm_firewalls.create(
label=label, description=label, network=net.id)
rule = fw.rules.create(
direction=TrafficDirection.INBOUND, protocol='tcp',
from_port=1111, to_port=1111, cidr='0.0.0.0/0')
# attempting to add the same rule twice should succeed
same_rule = fw.rules.create(
direction=TrafficDirection.INBOUND, protocol='tcp',
from_port=1111, to_port=1111, cidr='0.0.0.0/0')
self.assertEqual(rule, same_rule)
@helpers.skipIfNoService(['security.vm_firewalls'])
def test_vm_firewall_group_rule(self):
label = 'cb-fwrule-{0}'.format(helpers.get_uuid())
# Declare these variables and late binding will allow
# the cleanup method access to the most current values
fw = None
with cb_helpers.cleanup_action(lambda: helpers.cleanup_test_resources(
vm_firewall=fw)):
subnet = helpers.get_or_create_default_subnet(self.provider)
net = subnet.network
fw = self.provider.security.vm_firewalls.create(
label=label, description=label, network=net.id)
rule = fw.rules.create(
direction=TrafficDirection.INBOUND, src_dest_fw=fw,
protocol='tcp', from_port=1, to_port=65535)
self.assertTrue(
rule.src_dest_fw.label == fw.label,
"Expected VM firewall rule label {0}. Got {1}."
.format(fw.label, rule.src_dest_fw.label))
for r in fw.rules:
r.delete()
fw = self.provider.security.vm_firewalls.get(fw.id) # update
self.assertTrue(
len(list(fw.rules)) == 0,
"Deleting VMFirewallRule should delete it: {0}".format(
fw.rules))
fwl = self.provider.security.vm_firewalls.list()
found_fw = [f for f in fwl if f.label == label]
self.assertTrue(
len(found_fw) == 0,
"VM firewall {0} should have been deleted but still exists."
.format(label))
| gvlproject/libcloudbridge | tests/test_security_service.py | Python | mit | 9,831 |
from examples_shared import init_parser, init_client, print_response
def list_product_configurations(client):
"""
List product specific configurations for this reseller.
"""
return client.reseller.product_configuration()
if __name__ == "__main__":
parser = init_parser(description='Domain related examples.')
args = parser.parse_args()
client = init_client(args)
print_response(list_product_configurations(client=client))
| duct-tape/resello | examples/reseller.py | Python | bsd-3-clause | 462 |
from __future__ import unicode_literals
from django.db import models
from django.test import TestCase
from django.utils import six
from .models import (
First, Third, Parent, Child, Category, Record, Relation, Car, Driver)
class ManyToOneRegressionTests(TestCase):
def test_object_creation(self):
Third.objects.create(id='3', name='An example')
parent = Parent(name='fred')
parent.save()
Child.objects.create(name='bam-bam', parent=parent)
def test_fk_assignment_and_related_object_cache(self):
# Tests of ForeignKey assignment and the related-object cache (see #6886).
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
# Look up the object again so that we get a "fresh" object.
c = Child.objects.get(name="Child")
p = c.parent
# Accessing the related object again returns the exactly same object.
self.assertTrue(c.parent is p)
# But if we kill the cache, we get a new object.
del c._parent_cache
self.assertFalse(c.parent is p)
# Assigning a new object results in that object getting cached immediately.
p2 = Parent.objects.create(name="Parent 2")
c.parent = p2
self.assertTrue(c.parent is p2)
# Assigning None succeeds if field is null=True.
p.bestchild = None
self.assertTrue(p.bestchild is None)
# bestchild should still be None after saving.
p.save()
self.assertTrue(p.bestchild is None)
# bestchild should still be None after fetching the object again.
p = Parent.objects.get(name="Parent")
self.assertTrue(p.bestchild is None)
# Assigning None fails: Child.parent is null=False.
self.assertRaises(ValueError, setattr, c, "parent", None)
# You also can't assign an object of the wrong type here
self.assertRaises(ValueError, setattr, c, "parent", First(id=1, second=1))
# Nor can you explicitly assign None to Child.parent during object
# creation (regression for #9649).
self.assertRaises(ValueError, Child, name='xyzzy', parent=None)
self.assertRaises(ValueError, Child.objects.create, name='xyzzy', parent=None)
# Creation using keyword argument should cache the related object.
p = Parent.objects.get(name="Parent")
c = Child(parent=p)
self.assertTrue(c.parent is p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Parent()
c = Child(parent=p)
self.assertTrue(c.parent is p)
# Creation using attname keyword argument and an id will cause the
# related object to be fetched.
p = Parent.objects.get(name="Parent")
c = Child(parent_id=p.id)
self.assertFalse(c.parent is p)
self.assertEqual(c.parent, p)
def test_multiple_foreignkeys(self):
# Test of multiple ForeignKeys to the same model (bug #7125).
c1 = Category.objects.create(name='First')
c2 = Category.objects.create(name='Second')
c3 = Category.objects.create(name='Third')
r1 = Record.objects.create(category=c1)
r2 = Record.objects.create(category=c1)
r3 = Record.objects.create(category=c2)
r4 = Record.objects.create(category=c2)
r5 = Record.objects.create(category=c3)
Relation.objects.create(left=r1, right=r2)
Relation.objects.create(left=r3, right=r4)
Relation.objects.create(left=r1, right=r3)
Relation.objects.create(left=r5, right=r2)
Relation.objects.create(left=r3, right=r2)
q1 = Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second'])
self.assertQuerysetEqual(q1, ["<Relation: First - Second>"])
q2 = Category.objects.filter(record__left_set__right__category__name='Second').order_by('name')
self.assertQuerysetEqual(q2, ["<Category: First>", "<Category: Second>"])
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
self.assertRaises(ValueError, Child.objects.create, name="Grandchild", parent=c)
def test_fk_instantiation_outside_model(self):
# Regression for #12190 -- Should be able to instantiate a FK outside
# of a model, and interrogate its related field.
cat = models.ForeignKey(Category)
self.assertEqual('id', cat.rel.get_related_field().name)
def test_relation_unsaved(self):
# Test that the <field>_set manager does not join on Null value fields (#17541)
Third.objects.create(name='Third 1')
Third.objects.create(name='Third 2')
th = Third(name="testing")
# The object isn't saved an thus the relation field is null - we won't even
# execute a query in this case.
with self.assertNumQueries(0):
self.assertEqual(th.child_set.count(), 0)
th.save()
# Now the model is saved, so we will need to execute an query.
with self.assertNumQueries(1):
self.assertEqual(th.child_set.count(), 0)
def test_related_null_to_field(self):
c1 = Car.objects.create()
d1 = Driver.objects.create()
self.assertIs(d1.car, None)
with self.assertNumQueries(0):
self.assertEqual(list(c1.drivers.all()), [])
| ericholscher/django | tests/many_to_one_regress/tests.py | Python | bsd-3-clause | 5,426 |
import json
import re
from lib.validators import RegexValidator
from lib import util
class Properties:
def __init__(self, script_root, property_file):
self.__script_root = script_root
self._property_file = property_file
self.__load_properties()
msg = "Version number does not match pattern: "
msg += self.version_num_pattern
self._version_num_validator = RegexValidator(
self.version_num_pattern, msg)
def __load_properties(self):
with open(self._property_file, 'r') as property_file:
props = json.load(property_file)
self._version_num_pattern = props["version_number_pattern"]
matching = props["tag_matching"]
self._tag_prefix = prefix = matching["prefix"]
self._tag_suffix = suffix = matching["suffix"]
tag_pattern = prefix + self._version_num_pattern + suffix
self._tag_pattern = "^" + tag_pattern + "$"
exclude = props["exclude_commits"]
self._excluded_emails = exclude["from_email"]
self._exclude_commits_containing_version_num = exclude[
"containing_version_number"]
project = props["project"]
self._project_name = project["name"]
self._project_path = util.unify_paths(
self.__script_root, project["path"])
self._repo_web_url = util.sanitize_path(project["repo_web_url"])
git = project["git"]
self._project_remote = git["remote"]
tickets = props["tickets"]
self._ticket_pattern = tickets["pattern"]
self._ticket_url = util.sanitize_path(tickets["url"])
wiki = props["wiki"]
self.wiki_path = util.unify_paths(self.__script_root, wiki["path"])
release_notes = wiki["release_notes"]
self._release_notes_index = release_notes["index"]
self._release_notes_archive = util.sanitize_path(
release_notes["archive"])
self._release_notes_archive = util.sanitize_subpath(
self.release_notes_archive)
git = wiki["git"]
self._wiki_remote = git["remote"]
self._wiki_branch = git["branch"]
def get_tag_name_from_version_num(self, version_num):
return self.tag_prefix + version_num + self.tag_suffix
def get_version_num_from_tag_name(self, tag_name):
return util.get_pattern_from_string(tag_name, self.version_num_pattern)
def validate_version_num(self, version_num):
return self._version_num_validator(version_num)
@property
def version_num_pattern(self):
return self._version_num_pattern
@property
def tag_pattern(self):
return self._tag_pattern
@property
def tag_prefix(self):
return self._tag_prefix
@property
def tag_suffix(self):
return self._tag_suffix
@property
def project_name(self):
return self._project_name
@property
def project_path(self):
return self._project_path
@property
def project_remote(self):
return self._project_remote
@property
def repo_web_url(self):
return self._repo_web_url
@property
def ticket_pattern(self):
return self._ticket_pattern
@property
def ticket_url(self):
return self._ticket_url
@property
def wiki_path(self):
return self._wiki_path
@property
def release_notes_index(self):
return self._release_notes_index
@property
def release_notes_archive(self):
return self._release_notes_archive
@property
def wiki_remote(self):
return self._wiki_remote
@property
def wiki_branch(self):
return self._wiki_branch
@property
def excluded_emails(self):
return self._excluded_emails
@property
def exclude_commits_containing_version_num(self):
return self._exclude_commits_containing_version_num
| dgarlitt/release_notes_generator | lib/properties.py | Python | mit | 4,032 |
import numpy as np
from numpy.testing import *
from skimage.transform import *
def test_frt():
SIZE = 59
try:
import sympy.ntheory as sn
assert sn.isprime(SIZE) == True
except ImportError:
pass
# Generate a test image
L = np.tri(SIZE, dtype=np.int32) + np.tri(SIZE, dtype=np.int32)[::-1]
f = frt2(L)
fi = ifrt2(f)
assert len(np.nonzero(L - fi)[0]) == 0
| emmanuelle/scikits.image | skimage/transform/tests/test_finite_radon_transform.py | Python | bsd-3-clause | 413 |
from __future__ import absolute_import
import mimetypes
from os import path
from django.conf import settings
from django.conf.urls.static import static
from django.test import TestCase
from django.http import HttpResponseNotModified
from django.views.static import STREAM_CHUNK_SIZE
from .. import urls
from ..urls import media_dir
class StaticTests(TestCase):
"""Tests django views in django/views/static.py"""
def setUp(self):
self.prefix = 'site_media'
self.old_debug = settings.DEBUG
settings.DEBUG = True
def tearDown(self):
settings.DEBUG = self.old_debug
def test_serve(self):
"The static view can serve static media"
media_files = ['file.txt', 'file.txt.gz']
for filename in media_files:
response = self.client.get('/views/%s/%s' % (self.prefix, filename))
file_path = path.join(media_dir, filename)
content = response.content
self.assertEqual(open(file_path).read(), content)
self.assertEqual(len(content), int(response['Content-Length']))
self.assertEqual(mimetypes.guess_type(file_path)[1], response.get('Content-Encoding', None))
def test_chunked(self):
"The static view should stream files in chunks to avoid large memory usage"
response = self.client.get('/views/%s/%s' % (self.prefix, 'long-line.txt'))
first_chunk = iter(response).next()
self.assertEqual(len(first_chunk), STREAM_CHUNK_SIZE)
second_chunk = response.next()
# strip() to prevent OS line endings from causing differences
self.assertEqual(len(second_chunk.strip()), 1449)
def test_unknown_mime_type(self):
response = self.client.get('/views/%s/file.unknown' % self.prefix)
self.assertEqual('application/octet-stream', response['Content-Type'])
def test_copes_with_empty_path_component(self):
file_name = 'file.txt'
response = self.client.get('/views/%s//%s' % (self.prefix, file_name))
file = open(path.join(media_dir, file_name))
self.assertEqual(file.read(), response.content)
def test_is_modified_since(self):
file_name = 'file.txt'
response = self.client.get('/views/%s/%s' % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE='Thu, 1 Jan 1970 00:00:00 GMT')
file = open(path.join(media_dir, file_name))
self.assertEqual(file.read(), response.content)
def test_not_modified_since(self):
file_name = 'file.txt'
response = self.client.get(
'/views/%s/%s' % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE='Mon, 18 Jan 2038 05:14:07 GMT'
# This is 24h before max Unix time. Remember to fix Django and
# update this test well before 2038 :)
)
self.assertTrue(isinstance(response, HttpResponseNotModified))
def test_invalid_if_modified_since(self):
"""Handle bogus If-Modified-Since values gracefully
Assume that a file is modified since an invalid timestamp as per RFC
2616, section 14.25.
"""
file_name = 'file.txt'
invalid_date = 'Mon, 28 May 999999999999 28:25:26 GMT'
response = self.client.get('/views/%s/%s' % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE=invalid_date)
file = open(path.join(media_dir, file_name))
content = response.content
self.assertEqual(file.read(), content)
self.assertEqual(len(content), int(response['Content-Length']))
def test_invalid_if_modified_since2(self):
"""Handle even more bogus If-Modified-Since values gracefully
Assume that a file is modified since an invalid timestamp as per RFC
2616, section 14.25.
"""
file_name = 'file.txt'
invalid_date = ': 1291108438, Wed, 20 Oct 2010 14:05:00 GMT'
response = self.client.get('/views/%s/%s' % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE=invalid_date)
file = open(path.join(media_dir, file_name))
content = response.content
self.assertEqual(file.read(), content)
self.assertEqual(len(content), int(response['Content-Length']))
class StaticHelperTest(StaticTests):
"""
Test case to make sure the static URL pattern helper works as expected
"""
def setUp(self):
super(StaticHelperTest, self).setUp()
self.prefix = 'media'
self._old_views_urlpatterns = urls.urlpatterns[:]
urls.urlpatterns += static('/media/', document_root=media_dir)
def tearDown(self):
super(StaticHelperTest, self).tearDown()
urls.urlpatterns = self._old_views_urlpatterns
| smartfile/django-1.4 | tests/regressiontests/views/tests/static.py | Python | bsd-3-clause | 4,749 |
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')),
neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)))
| open-mmlab/mmdetection | configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py | Python | apache-2.0 | 577 |
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit
# for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2021 by the BurnMan team, released under the GNU
# GPL v2 or later.
from __future__ import absolute_import
import importlib
import numpy as np
from sympy import Rational
from fractions import Fraction
from scipy.spatial import Delaunay
from scipy.special import comb
from copy import copy
from .material import cached_property
from ..tools.math import independent_row_indices
try:
cdd = importlib.import_module('cdd')
except ImportError as err:
print(f'Warning: {err}. '
'For full functionality of BurnMan, please install pycddlib.')
class SimplexGrid(object):
"""
A class that creates objects that can efficiently generate a set of points
that grid a simplex with a user-defined number of vertices. The class
contains both a generator method and a grid method. It also contains
an n_points attribute that returns the number of points in the gridded
simplex.
This class is available as :class:`burnman.polytope.SimplexGrid`.
"""
def __init__(self, vertices, points_per_edge):
"""
Initialize SimplexGrid object with the desired number of vertices
and points per edge.
"""
assert vertices >= 2, 'need at least two vertices'
assert points_per_edge >= 2, 'need at least 2 points per edge'
self.vertices = vertices
self.points_per_edge = points_per_edge
def generate(self, generate_type='list'):
"""
Generates the grid points of the simplex in lexicographic order.
Parameters
----------
generate_type : 'list' or 'array'
Determines whether the generator returns lists or arrays
corresponding to each point in the simplex grid.
Returns
-------
generator of lists or ndarrays (int, ndim=1)
Grid points of the simplex.
"""
if generate_type == 'list':
x = [0]*self.vertices
elif generate_type == 'array':
x = np.zeros(self.vertices, dtype=int)
else:
raise Exception('generate_type must be of type list or array.')
x[self.vertices-1] = self.points_per_edge-1
h = self.vertices
while True:
yield copy(x)
h -= 1
if h == 0:
return
val = x[h]
x[h] = 0
x[self.vertices-1] = val - 1
x[h-1] += 1
if val != 1:
h = self.vertices
def grid(self, generate_type='list'):
"""
Returns either a list or a numpy array
corresponding the the points in the simplex grid, depending on
whether the user chooses 'list' (default) or 'array' as
the generate_type parameter.
"""
if generate_type == 'list':
return list(self.generate(generate_type))
elif generate_type == 'array':
return np.array(list(self.generate(generate_type)))
else:
raise Exception('generate_type must be of type list or array.')
def n_points(self):
"""
The number of points corresponding to the number of vertices and
points per edge chosen by the user.
"""
return comb(self.vertices+self.points_per_edge-2,
self.vertices-1, exact=True)
class MaterialPolytope(object):
"""
A class that can be instantiated to create pycddlib polytope objects.
These objects can be interrogated to provide the vertices satisfying the
input constraints.
This class is available as :class:`burnman.polytope.MaterialPolytope`.
"""
def __init__(self, equalities,
inequalities,
number_type='fraction',
return_fractions=False,
independent_endmember_occupancies=None):
"""
Initialization function for the MaterialPolytope class.
Declares basis attributes of the class.
Parameters
----------
equalities: 2D numpy array
A numpy array containing all the equalities of the polytope.
Each row should evaluate to 0.
inequalities: 2D numpy array
A numpy array containing all the inequalities of the polytope.
Each row should evaluate to <= 0.
number_type: 'fraction' or 'float' (default is 'fraction')
Whether pycddlib should read the input arrays as
fractions or floats.
return_fractions : boolean (default is False)
Whether the generated polytope object should return fractions or
floats.
independent_endmember_occupancies : 2D numpy array (or None)
If specified, this array provides the independent endmember set
against which the dependent endmembers are defined.
"""
self.set_return_type(return_fractions)
self.equality_matrix = equalities[:, 1:]
self.equality_vector = -equalities[:, 0]
self.polytope_matrix = cdd.Matrix(equalities, linear=True,
number_type=number_type)
self.polytope_matrix.rep_type = cdd.RepType.INEQUALITY
self.polytope_matrix.extend(inequalities, linear=False)
self.polytope = cdd.Polyhedron(self.polytope_matrix)
if independent_endmember_occupancies is not None:
self.independent_endmember_occupancies = independent_endmember_occupancies
def set_return_type(self, return_fractions=False):
"""
Sets the return_type for the polytope object. Also deletes the cached
endmember_occupancies property.
Parameters
----------
return_fractions : boolean (default is False)
Whether the generated polytope object should return fractions or
floats.
"""
try:
del self.__dict__['endmember_occupancies']
except KeyError:
pass
self.return_fractions = return_fractions
@cached_property
def raw_vertices(self):
"""
Returns a list of the vertices of the polytope without any
postprocessing. See also endmember_occupancies.
"""
return self.polytope.get_generators()[:]
@cached_property
def limits(self):
"""
Return the limits of the polytope (the set of bounding inequalities).
"""
return np.array(self.polytope.get_inequalities(), dtype=float)
@cached_property
def n_endmembers(self):
"""
Return the number of endmembers
(the number of vertices of the polytope).
"""
return len(self.raw_vertices)
@cached_property
def endmember_occupancies(self):
"""
Return the endmember occupancies
(a processed list of all of the vertex locations).
"""
if self.return_fractions:
if self.polytope.number_type == 'fraction':
v = np.array([[Fraction(value) for value in v]
for v in self.raw_vertices])
else:
v = np.array([[Rational(value).limit_denominator(1000000)
for value in v]
for v in self.raw_vertices])
else:
v = np.array([[float(value) for value in v]
for v in self.raw_vertices])
if len(v.shape) == 1:
raise ValueError("The combined equality and positivity "
"constraints result in a null polytope.")
return v[:, 1:] / v[:, 0, np.newaxis]
@cached_property
def independent_endmember_occupancies(self):
"""
Return an independent set of endmember occupancies
(a linearly-independent set of vertex locations)
"""
arr = self.endmember_occupancies
return arr[independent_row_indices(arr)]
@cached_property
def endmembers_as_independent_endmember_amounts(self):
"""
Return a list of all the endmembers as a linear sum of
the independent endmembers.
"""
ind = self.independent_endmember_occupancies
sol = np.linalg.lstsq(np.array(ind.T).astype(float),
np.array(self.endmember_occupancies.T).astype(
float),
rcond=0)[0].round(decimals=12).T
return sol
def _decompose_vertices_into_simplices(self, vertices):
"""
Decomposes a set of vertices into simplices by Delaunay triangulation.
"""
# Delaunay triangulation only works in dimensions > 1
# and we remove the nullspace (sum(fractions) = 1)
if len(vertices) > 2:
nulls = np.repeat(vertices[:, -1],
vertices.shape[1]).reshape(vertices.shape)
tri = Delaunay((vertices - nulls)[:, :-1])
return tri.simplices
else:
return [[0, 1]]
@cached_property
def independent_endmember_polytope(self):
"""
Returns the polytope expressed in terms of proportions of the
independent endmembers. The polytope involves the first
n-1 independent endmembers. The last endmember proportion makes
the sum equal to one.
"""
arr = self.endmembers_as_independent_endmember_amounts
arr = np.hstack((np.ones((len(arr), 1)), arr[:, :-1]))
M = cdd.Matrix(arr, number_type='fraction')
M.rep_type = cdd.RepType.GENERATOR
return cdd.Polyhedron(M)
@cached_property
def independent_endmember_limits(self):
"""
Gets the limits of the polytope as a function of the independent
endmembers.
"""
return np.array(self.independent_endmember_polytope.get_inequalities(),
dtype=float)
def subpolytope_from_independent_endmember_limits(self, limits):
"""
Returns a smaller polytope by applying additional limits to the amounts
of the independent endmembers.
"""
modified_limits = self.independent_endmember_polytope.get_inequalities().copy()
modified_limits.extend(limits, linear=False)
return cdd.Polyhedron(modified_limits)
def subpolytope_from_site_occupancy_limits(self, limits):
"""
Returns a smaller polytope by applying additional limits to the
individual site occupancies.
"""
modified_limits = self.polytope_matrix.copy()
modified_limits.extend(limits, linear=False)
return cdd.Polyhedron(modified_limits)
def grid(self, points_per_edge=2, unique_sorted=True,
grid_type='independent endmember proportions', limits=None):
"""
Create a grid of points which span the polytope.
Parameters
----------
points_per_edge : integer (default is 2)
Number of points per edge of the polytope.
unique_sorted : boolean (default is True)
The gridding is done by splitting the polytope into
a set of simplices. This means that points will be duplicated along
vertices, faces etc. If unique_sorted is True, this function
will sort and make the points unique. This is an expensive
operation for large polytopes, and may not always be necessary.
grid_type : 'independent endmember proportions' (default) or 'site occupancies'
Whether to grid the polytope in terms of
independent endmember proportions or site occupancies.
limits : 2D numpy array
Additional inequalities restricting the gridded area of the polytope.
Returns
-------
points : 2D numpy array
A list of points gridding the polytope.
"""
if limits is None:
if grid_type == 'independent endmember proportions':
f_occ = (self.endmembers_as_independent_endmember_amounts
/ (points_per_edge - 1))
elif grid_type == 'site occupancies':
f_occ = self.endmember_occupancies/(points_per_edge-1)
else:
raise Exception('grid type not recognised. Should be one of '
'independent endmember proportions '
'or site occupancies')
simplices = self._decompose_vertices_into_simplices(
self.endmembers_as_independent_endmember_amounts)
else:
if grid_type == 'independent endmember proportions':
ppns = np.array(self.subpolytope_from_independent_endmember_limits(
limits).get_generators()[:])[:, 1:]
last_ppn = np.array([1. - sum(p)
for p in ppns]).reshape((len(ppns), 1))
vertices_as_independent_endmember_proportions = np.hstack(
(ppns, last_ppn))
f_occ = vertices_as_independent_endmember_proportions / \
(points_per_edge-1)
elif grid_type == 'site occupancies':
occ = np.array(self.subpolytope_from_site_occupancy_limits(
limits).get_generators()[:])[:, 1:]
f_occ = occ/(points_per_edge-1)
ind = self.independent_endmember_occupancies
vertices_as_independent_endmember_proportions = np.linalg.lstsq(np.array(ind.T).astype(float),
np.array(occ.T).astype(
float),
rcond=None)[0].round(decimals=12).T
else:
raise Exception('grid_type not recognised. '
'Should be one of '
'independent endmember proportions '
'or site occupancies')
simplices = self._decompose_vertices_into_simplices(
vertices_as_independent_endmember_proportions)
n_ind = f_occ.shape[1]
n_simplices = len(simplices)
dim = len(simplices[0])
simplex_grid = SimplexGrid(dim, points_per_edge)
grid = simplex_grid.grid('array')
points_per_simplex = simplex_grid.n_points()
n_points = n_simplices*points_per_simplex
points = np.empty((n_points, n_ind))
idx = 0
for i in range(0, n_simplices):
points[idx:idx+points_per_simplex] = grid.dot(f_occ[simplices[i]])
idx += points_per_simplex
if unique_sorted:
points = np.unique(points, axis=0)
return points
| bobmyhill/burnman | burnman/classes/polytope.py | Python | gpl-2.0 | 14,930 |
import sys
import csv
import logging
from collections import defaultdict
from functools import lru_cache
from path import path
import flask
from flask.ext.script import Manager
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
placenames_manager = Manager()
def fix_comma_below(text):
return (text.replace('ş', 'ș')
.replace('Ş', 'Ș')
.replace('ţ', 'ț')
.replace('Ţ', 'Ț'))
def strip_prefix(name):
for prefix in ['Județul ', 'Municipiul ', 'Oraș ', 'Comuna ']:
if name.startswith(prefix):
return name[len(prefix):]
else:
return name
stop_words = [
"agenția",
"ministerul",
]
class SirutaLoader:
def __init__(self):
from sirutalib import SirutaDatabase
self.siruta = SirutaDatabase()
self.county_code = {}
for entry in self.siruta._data.values():
if entry['type'] == 40:
name = strip_prefix(entry['name'].title())
self.county_code[name] = entry['siruta']
def walk_siruta(self, code):
name = self.siruta.get_name(code, prefix=False)
yield name.title()
for thing in self.siruta.get_inf_codes(code):
yield from self.walk_siruta(thing)
def get_siruta_names(self, county_name):
code = self.county_code[county_name]
names = set(self.walk_siruta(code))
return sorted(names)
@placenames_manager.command
def load_placenames():
columns = ['geonameid', 'name', 'asciiname', 'alternatenames', 'latitude',
'longitude', 'feature class', 'feature code', 'country code',
'cc2', 'admin1 code', 'admin2 code', 'admin3 code',
'admin4 code', 'population', 'elevation', 'dem', 'timezone',
'modification date']
siruta_loader = SirutaLoader()
reader = csv.DictReader(sys.stdin, delimiter='\t', fieldnames=columns)
place_names_by_county = defaultdict(set)
counties = []
for row in reader:
adm1_code = int(row['admin1 code']) if row['admin1 code'] else None
name = strip_prefix(fix_comma_below(row['name']))
if any(w in name.lower() for w in stop_words):
continue
county_place_names = place_names_by_county[adm1_code]
county_place_names.add(name)
if row['feature code'] == 'ADM1':
county = {
'code': adm1_code,
'name': name,
'place_names': county_place_names,
}
counties.append(county)
for county in counties:
county['place_names'].update(
siruta_loader.get_siruta_names(county['name']))
county['place_names'] = sorted(county['place_names'])
out_name = '%02d.json' % county['code']
out_path = (path(flask.current_app.root_path) /
'placename_data' / out_name)
with out_path.open('w', encoding='utf-8') as f:
flask.json.dump(county, f, indent=2, sort_keys=True)
f.write('\n')
logger.info("Saved county %s (%s) with %d names",
county['name'], county['code'], len(county['place_names']))
@lru_cache(100)
def get_county_data(code):
json_name = '%02d.json' % code
json_path = (path(flask.current_app.root_path) /
'placename_data' / json_name)
with json_path.open('r', encoding='utf-8') as f:
return flask.json.load(f)
@placenames_manager.command
def expand_minority_names():
from mptracker.scraper.common import Scraper, get_cached_session, pqitems
scraper = Scraper(get_cached_session())
scraper.use_cdep_opener = False
doc = get_minority_names()
roots = doc['root_names']
names = set()
for root in roots:
url = ('http://dexonline.ro/definitie'
'/{root}/paradigma'.format(root=root))
page = scraper.fetch_url(url)
for td in pqitems(page, 'table.lexem td.form'):
names.add(td.text().replace(' ', ''))
if '—' in names:
names.remove('—')
doc['search_names'] = sorted(names)
print(flask.json.dumps(doc, indent=2, sort_keys=True))
@lru_cache()
def get_minority_names():
json_path = (path(flask.current_app.root_path) /
'placename_data' / 'minority.json')
with json_path.open('r', encoding='utf-8') as f:
return flask.json.load(f)
| mgax/mptracker | mptracker/placenames.py | Python | mit | 4,418 |
from wptserve.handlers import json_handler
@json_handler
def main(request, response):
key = request.GET.first(b"key")
return request.server.stash.take(key, b'/fetch/range/')
| scheib/chromium | third_party/blink/web_tests/external/wpt/fetch/range/resources/stash-take.py | Python | bsd-3-clause | 184 |
#
class Abc:
@staticmethod
def def_ghi(jkl):
#
| general-language-syntax/GLS | test/integration/StaticFunctionDeclareStart/public one parameter.py | Python | mit | 55 |
from django.utils import six
if six.PY2:
import urlparse
from urllib import urlencode, quote_plus, quote
from urllib2 import urlopen, Request, HTTPError
else:
from urllib import parse as urlparse
from urllib.parse import urlencode, quote_plus, quote
from urllib.request import urlopen, Request
from urllib.error import HTTPError | uptown/django-town | django_town/utils/with3.py | Python | mit | 357 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
)
| ui/django-cached_authentication_middleware | test_project/test_project/urls.py | Python | mit | 186 |
import unittest
from unittest.mock import Mock, patch
from tests.test_api import TestAPIStatus
from yams_api.plugins.dev.aws.methods import AWSPublicResource
class TestAWSAPI(TestAPIStatus):
def test_aws_endpoint_reachable(self):
resp = self.client.get("/plugins/aws/")
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.mimetype, 'application/json')
# Todo: Exception handling tests
@unittest.skip('Exceptions not fully implemented. skipping')
def test_aws_endpoint_server_error(self):
with self.assertRaises(Exception):
resp = self.client.get("/plugins/aws/")
self.assertEqual(resp.status_code, 500)
self.assertEqual(resp.mimetype, 'application/json')
#self.assertEqual(resp.data, b'{\n "status": "ok"\n}')
@patch('yams_api.plugins.dev.aws.views.methods.AWSResource')
def test_aws_ec2_awsresource_returns_successfully(self, m_methods):
m_aws_obj = Mock()
m_methods.return_value = m_aws_obj
m_aws_obj.get_resource.return_value = "my_resource"
resp = self.client.get("/plugins/aws/ec2/some_resource")
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.mimetype, 'application/json')
self.assertEqual(resp.data, b'{\n "response": "my_resource"\n}')
@unittest.skip("method not implemented yet. skipping")
def test_aws_ec2_resource_returns_successfully(self):
pass
@patch.object(AWSPublicResource, 'get_aws_endpoint_status')
def test_aws_ec2_awspublicresource_returns_successfully(self, m_obj):
m_obj.return_value = "running"
resp = self.client.get("/plugins/aws/status")
self.assertEqual(resp.mimetype, 'application/json')
# todo: get proper response for public resource response
self.assertEqual(resp.data, b'{\n "response": "running"\n}')
if __name__ == '__main__':
unittest.main(verbosity=2)
| tristanfisher/yams | tests/test_plugins/test_aws.py | Python | gpl-3.0 | 1,949 |
import psycopg2
from Vertex import Vertex
from Graph import Graph
from GeoCode import GeoCode
from distCal import lonlat2ID, cor2ID, distanceCal, calPathDis, R
def createMiniWorld(miniGraph, startPt, endPt):
#define the boundary of the miniworld
padding = 0.005
minLng = min(startPt[0], endPt[0])-padding
maxLng = max(startPt[0], endPt[0])+padding
minLat = min(startPt[1], endPt[1])-padding
maxLat = max(startPt[1], endPt[1])+padding
try:
conn = psycopg2.connect("dbname='CamBosRoad' user = 'postgres' host = 'localhost' password='********'")
cur = conn.cursor()
except:
print 'connection problem'
cur.execute("SELECT * FROM Edges WHERE fnodelng>{} and fnodelat>{} \
and fnodelng<{} and fnodelat<{} \
".format(minLng, minLat, maxLng, maxLat))
miniDB = cur.fetchall()
#for data in miniDB:
# print data
print ('{} edges found within miniGraph').format(len(miniDB))
for edge in miniDB:
fnode = (edge[0], edge[1])
tnode = (edge[2], edge[3])
miniGraph.addEdge_directional(fnode, tnode, edge[4], edge[5])
return [[minLat, minLng],[maxLat, maxLng]]
def PathTestMashUp(startPt, endPt, runDis, ):
startCor = GeoCode(startPt);
endCor = GeoCode(endPt)
miniGraph = Graph()
bounds = createMiniWorld(miniGraph, startCor, endCor)
startNode = [0, 0]
dist = miniGraph.findNearestNode(startCor, startNode)
print 'the closest node found to startPt is '+ str(startNode) +', with dist '+str(dist)
endNode = [0, 0]
dist = miniGraph.findNearestNode(endCor, endNode)
print 'the closest node found to endPt is '+str(endNode) +', with dist '+str(dist)
startNode = cor2ID(startNode)
endNode = cor2ID(endNode)
K=5
pathDict = miniGraph.findPath(startNode, endNode, runDis, K)
for k in range(0, K):
print 'The actual path dis is '+ str(pathDict[k]['cost'])
#print pathDict[k]['path']
return {'miniGraph': miniGraph,
'startPt':startCor,
'endPt':endCor,
'startNode':startNode,
'endNode':endNode,
'pathDict':pathDict}
| SummerZheng/iRun_YN | app/static/py/TestPath.py | Python | mit | 2,206 |
def extractWwwComegatranslationsCom(item):
'''
Parser for 'www.comegatranslations.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if "WATTT" in item['tags']:
return buildReleaseMessageWithType(item, "WATTT", vol, chp, frag=frag, postfix=postfix)
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractWwwComegatranslationsCom.py | Python | bsd-3-clause | 381 |
"""Extension argument processing code
"""
__all__ = [
'Message', 'NamespaceMap', 'no_default', 'registerNamespaceAlias',
'OPENID_NS', 'BARE_NS', 'OPENID1_NS', 'OPENID2_NS', 'SREG_URI',
'IDENTIFIER_SELECT'
]
import copy
import warnings
import urllib.request
import urllib.error
from openid import oidutil
from openid import kvform
try:
ElementTree = oidutil.importElementTree()
except ImportError:
# No elementtree found, so give up, but don't fail to import,
# since we have fallbacks.
ElementTree = None
# This doesn't REALLY belong here, but where is better?
IDENTIFIER_SELECT = 'http://specs.openid.net/auth/2.0/identifier_select'
# URI for Simple Registration extension, the only commonly deployed
# OpenID 1.x extension, and so a special case
SREG_URI = 'http://openid.net/sreg/1.0'
# The OpenID 1.X namespace URI
OPENID1_NS = 'http://openid.net/signon/1.0'
THE_OTHER_OPENID1_NS = 'http://openid.net/signon/1.1'
OPENID1_NAMESPACES = OPENID1_NS, THE_OTHER_OPENID1_NS
# The OpenID 2.0 namespace URI
OPENID2_NS = 'http://specs.openid.net/auth/2.0'
# The namespace consisting of pairs with keys that are prefixed with
# "openid." but not in another namespace.
NULL_NAMESPACE = oidutil.Symbol('Null namespace')
# The null namespace, when it is an allowed OpenID namespace
OPENID_NS = oidutil.Symbol('OpenID namespace')
# The top-level namespace, excluding all pairs with keys that start
# with "openid."
BARE_NS = oidutil.Symbol('Bare namespace')
# Limit, in bytes, of identity provider and return_to URLs, including
# response payload. See OpenID 1.1 specification, Appendix D.
OPENID1_URL_LIMIT = 2047
# All OpenID protocol fields. Used to check namespace aliases.
OPENID_PROTOCOL_FIELDS = [
'ns',
'mode',
'error',
'return_to',
'contact',
'reference',
'signed',
'assoc_type',
'session_type',
'dh_modulus',
'dh_gen',
'dh_consumer_public',
'claimed_id',
'identity',
'realm',
'invalidate_handle',
'op_endpoint',
'response_nonce',
'sig',
'assoc_handle',
'trust_root',
'openid',
]
class UndefinedOpenIDNamespace(ValueError):
"""Raised if the generic OpenID namespace is accessed when there
is no OpenID namespace set for this message."""
class InvalidOpenIDNamespace(ValueError):
"""Raised if openid.ns is not a recognized value.
For recognized values, see L{Message.allowed_openid_namespaces}
"""
def __str__(self):
s = "Invalid OpenID Namespace"
if self.args:
s += " %r" % (self.args[0], )
return s
# Sentinel used for Message implementation to indicate that getArg
# should raise an exception instead of returning a default.
no_default = object()
# Global namespace / alias registration map. See
# registerNamespaceAlias.
registered_aliases = {}
class NamespaceAliasRegistrationError(Exception):
"""
Raised when an alias or namespace URI has already been registered.
"""
pass
def registerNamespaceAlias(namespace_uri, alias):
"""
Registers a (namespace URI, alias) mapping in a global namespace
alias map. Raises NamespaceAliasRegistrationError if either the
namespace URI or alias has already been registered with a
different value. This function is required if you want to use a
namespace with an OpenID 1 message.
"""
global registered_aliases
if registered_aliases.get(alias) == namespace_uri:
return
if namespace_uri in list(registered_aliases.values()):
raise NamespaceAliasRegistrationError(
'Namespace uri %r already registered' % (namespace_uri, ))
if alias in registered_aliases:
raise NamespaceAliasRegistrationError('Alias %r already registered' %
(alias, ))
registered_aliases[alias] = namespace_uri
class Message(object):
"""
In the implementation of this object, None represents the global
namespace as well as a namespace with no key.
@cvar namespaces: A dictionary specifying specific
namespace-URI to alias mappings that should be used when
generating namespace aliases.
@ivar ns_args: two-level dictionary of the values in this message,
grouped by namespace URI. The first level is the namespace
URI.
"""
allowed_openid_namespaces = [OPENID1_NS, THE_OTHER_OPENID1_NS, OPENID2_NS]
def __init__(self, openid_namespace=None):
"""Create an empty Message.
@raises InvalidOpenIDNamespace: if openid_namespace is not in
L{Message.allowed_openid_namespaces}
"""
self.args = {}
self.namespaces = NamespaceMap()
if openid_namespace is None:
self._openid_ns_uri = None
else:
implicit = openid_namespace in OPENID1_NAMESPACES
self.setOpenIDNamespace(openid_namespace, implicit)
@classmethod
def fromPostArgs(cls, args):
"""Construct a Message containing a set of POST arguments.
"""
self = cls()
# Partition into "openid." args and bare args
openid_args = {}
for key, value in args.items():
if isinstance(value, list):
raise TypeError("query dict must have one value for each key, "
"not lists of values. Query is %r" % (args, ))
try:
prefix, rest = key.split('.', 1)
except ValueError:
prefix = None
if prefix != 'openid':
self.args[(BARE_NS, key)] = value
else:
openid_args[rest] = value
self._fromOpenIDArgs(openid_args)
return self
@classmethod
def fromOpenIDArgs(cls, openid_args):
"""Construct a Message from a parsed KVForm message.
@raises InvalidOpenIDNamespace: if openid.ns is not in
L{Message.allowed_openid_namespaces}
"""
self = cls()
self._fromOpenIDArgs(openid_args)
return self
def _fromOpenIDArgs(self, openid_args):
ns_args = []
# Resolve namespaces
for rest, value in openid_args.items():
try:
ns_alias, ns_key = rest.split('.', 1)
except ValueError:
ns_alias = NULL_NAMESPACE
ns_key = rest
if ns_alias == 'ns':
self.namespaces.addAlias(value, ns_key)
elif ns_alias == NULL_NAMESPACE and ns_key == 'ns':
# null namespace
self.setOpenIDNamespace(value, False)
else:
ns_args.append((ns_alias, ns_key, value))
# Implicitly set an OpenID namespace definition (OpenID 1)
if not self.getOpenIDNamespace():
self.setOpenIDNamespace(OPENID1_NS, True)
# Actually put the pairs into the appropriate namespaces
for (ns_alias, ns_key, value) in ns_args:
ns_uri = self.namespaces.getNamespaceURI(ns_alias)
if ns_uri is None:
# we found a namespaced arg without a namespace URI defined
ns_uri = self._getDefaultNamespace(ns_alias)
if ns_uri is None:
ns_uri = self.getOpenIDNamespace()
ns_key = '%s.%s' % (ns_alias, ns_key)
else:
self.namespaces.addAlias(ns_uri, ns_alias, implicit=True)
self.setArg(ns_uri, ns_key, value)
def _getDefaultNamespace(self, mystery_alias):
"""OpenID 1 compatibility: look for a default namespace URI to
use for this alias."""
global registered_aliases
# Only try to map an alias to a default if it's an
# OpenID 1.x message.
if self.isOpenID1():
return registered_aliases.get(mystery_alias)
else:
return None
def setOpenIDNamespace(self, openid_ns_uri, implicit):
"""Set the OpenID namespace URI used in this message.
@raises InvalidOpenIDNamespace: if the namespace is not in
L{Message.allowed_openid_namespaces}
"""
if isinstance(openid_ns_uri, bytes):
openid_ns_uri = str(openid_ns_uri, encoding="utf-8")
if openid_ns_uri not in self.allowed_openid_namespaces:
raise InvalidOpenIDNamespace(openid_ns_uri)
self.namespaces.addAlias(openid_ns_uri, NULL_NAMESPACE, implicit)
self._openid_ns_uri = openid_ns_uri
def getOpenIDNamespace(self):
return self._openid_ns_uri
def isOpenID1(self):
return self.getOpenIDNamespace() in OPENID1_NAMESPACES
def isOpenID2(self):
return self.getOpenIDNamespace() == OPENID2_NS
def fromKVForm(cls, kvform_string):
"""Create a Message from a KVForm string"""
return cls.fromOpenIDArgs(kvform.kvToDict(kvform_string))
fromKVForm = classmethod(fromKVForm)
def copy(self):
return copy.deepcopy(self)
def toPostArgs(self):
"""
Return all arguments with openid. in front of namespaced arguments.
@return bytes
"""
args = {}
# Add namespace definitions to the output
for ns_uri, alias in self.namespaces.items():
if self.namespaces.isImplicit(ns_uri):
continue
if alias == NULL_NAMESPACE:
ns_key = 'openid.ns'
else:
ns_key = 'openid.ns.' + alias
args[ns_key] = oidutil.toUnicode(ns_uri)
for (ns_uri, ns_key), value in self.args.items():
key = self.getKey(ns_uri, ns_key)
# Ensure the resulting value is an UTF-8 encoded *bytestring*.
args[key] = oidutil.toUnicode(value)
return args
def toArgs(self):
"""Return all namespaced arguments, failing if any
non-namespaced arguments exist."""
# FIXME - undocumented exception
post_args = self.toPostArgs()
kvargs = {}
for k, v in post_args.items():
if not k.startswith('openid.'):
raise ValueError(
'This message can only be encoded as a POST, because it '
'contains arguments that are not prefixed with "openid."')
else:
kvargs[k[7:]] = v
return kvargs
def toFormMarkup(self,
action_url,
form_tag_attrs=None,
submit_text="Continue"):
"""Generate HTML form markup that contains the values in this
message, to be HTTP POSTed as x-www-form-urlencoded UTF-8.
@param action_url: The URL to which the form will be POSTed
@type action_url: str
@param form_tag_attrs: Dictionary of attributes to be added to
the form tag. 'accept-charset' and 'enctype' have defaults
that can be overridden. If a value is supplied for
'action' or 'method', it will be replaced.
@type form_tag_attrs: {unicode: unicode}
@param submit_text: The text that will appear on the submit
button for this form.
@type submit_text: unicode
@returns: A string containing (X)HTML markup for a form that
encodes the values in this Message object.
@rtype: str
"""
if ElementTree is None:
raise RuntimeError('This function requires ElementTree.')
assert action_url is not None
form = ElementTree.Element('form')
if form_tag_attrs:
for name, attr in form_tag_attrs.items():
form.attrib[name] = attr
form.attrib['action'] = oidutil.toUnicode(action_url)
form.attrib['method'] = 'post'
form.attrib['accept-charset'] = 'UTF-8'
form.attrib['enctype'] = 'application/x-www-form-urlencoded'
for name, value in self.toPostArgs().items():
attrs = {
'type': 'hidden',
'name': oidutil.toUnicode(name),
'value': oidutil.toUnicode(value)
}
form.append(ElementTree.Element('input', attrs))
submit = ElementTree.Element(
'input',
{'type': 'submit',
'value': oidutil.toUnicode(submit_text)})
form.append(submit)
return str(ElementTree.tostring(form, encoding='utf-8'),
encoding="utf-8")
def toURL(self, base_url):
"""Generate a GET URL with the parameters in this message
attached as query parameters."""
return oidutil.appendArgs(base_url, self.toPostArgs())
def toKVForm(self):
"""Generate a KVForm string that contains the parameters in
this message. This will fail if the message contains arguments
outside of the 'openid.' prefix.
"""
return kvform.dictToKV(self.toArgs())
def toURLEncoded(self):
"""Generate an x-www-urlencoded string"""
args = sorted(self.toPostArgs().items())
return urllib.parse.urlencode(args)
def _fixNS(self, namespace):
"""Convert an input value into the internally used values of
this object
@param namespace: The string or constant to convert
@type namespace: str or unicode or BARE_NS or OPENID_NS
"""
if isinstance(namespace, bytes):
namespace = str(namespace, encoding="utf-8")
if namespace == OPENID_NS:
if self._openid_ns_uri is None:
raise UndefinedOpenIDNamespace('OpenID namespace not set')
else:
namespace = self._openid_ns_uri
if namespace != BARE_NS and not isinstance(namespace, str):
raise TypeError(
"Namespace must be BARE_NS, OPENID_NS or a string. got %r" %
(namespace, ))
if namespace != BARE_NS and ':' not in namespace:
fmt = 'OpenID 2.0 namespace identifiers SHOULD be URIs. Got %r'
warnings.warn(fmt % (namespace, ), DeprecationWarning)
if namespace == 'sreg':
fmt = 'Using %r instead of "sreg" as namespace'
warnings.warn(
fmt % (SREG_URI, ),
DeprecationWarning, )
return SREG_URI
return namespace
def hasKey(self, namespace, ns_key):
namespace = self._fixNS(namespace)
return (namespace, ns_key) in self.args
def getKey(self, namespace, ns_key):
"""Get the key for a particular namespaced argument"""
namespace = self._fixNS(namespace)
if namespace == BARE_NS:
return ns_key
ns_alias = self.namespaces.getAlias(namespace)
# No alias is defined, so no key can exist
if ns_alias is None:
return None
if ns_alias == NULL_NAMESPACE:
tail = ns_key
else:
tail = '%s.%s' % (ns_alias, ns_key)
return 'openid.' + tail
def getArg(self, namespace, key, default=None):
"""Get a value for a namespaced key.
@param namespace: The namespace in the message for this key
@type namespace: str
@param key: The key to get within this namespace
@type key: str
@param default: The value to use if this key is absent from
this message. Using the special value
openid.message.no_default will result in this method
raising a KeyError instead of returning the default.
@rtype: str or the type of default
@raises KeyError: if default is no_default
@raises UndefinedOpenIDNamespace: if the message has not yet
had an OpenID namespace set
"""
namespace = self._fixNS(namespace)
args_key = (namespace, key)
try:
return self.args[args_key]
except KeyError:
if default is no_default:
raise KeyError((namespace, key))
else:
return default
def getArgs(self, namespace):
"""Get the arguments that are defined for this namespace URI
@returns: mapping from namespaced keys to values
@returntype: dict of {str:bytes}
"""
namespace = self._fixNS(namespace)
args = []
for ((pair_ns, ns_key), value) in self.args.items():
if pair_ns == namespace:
if isinstance(ns_key, bytes):
k = str(ns_key, encoding="utf-8")
else:
k = ns_key
if isinstance(value, bytes):
v = str(value, encoding="utf-8")
else:
v = value
args.append((k, v))
return dict(args)
def updateArgs(self, namespace, updates):
"""Set multiple key/value pairs in one call
@param updates: The values to set
@type updates: {unicode:unicode}
"""
namespace = self._fixNS(namespace)
for k, v in updates.items():
self.setArg(namespace, k, v)
def setArg(self, namespace, key, value):
"""Set a single argument in this namespace"""
assert key is not None
assert value is not None
namespace = self._fixNS(namespace)
# try to ensure that internally it's consistent, at least: str -> str
if isinstance(value, bytes):
value = str(value, encoding="utf-8")
self.args[(namespace, key)] = value
if not (namespace is BARE_NS):
self.namespaces.add(namespace)
def delArg(self, namespace, key):
namespace = self._fixNS(namespace)
del self.args[(namespace, key)]
def __repr__(self):
return "<%s.%s %r>" % (self.__class__.__module__,
self.__class__.__name__, self.args)
def __eq__(self, other):
return self.args == other.args
def __ne__(self, other):
return not (self == other)
def getAliasedArg(self, aliased_key, default=None):
if aliased_key == 'ns':
return self.getOpenIDNamespace()
if aliased_key.startswith('ns.'):
uri = self.namespaces.getNamespaceURI(aliased_key[3:])
if uri is None:
if default == no_default:
raise KeyError
else:
return default
else:
return uri
try:
alias, key = aliased_key.split('.', 1)
except ValueError:
# need more than x values to unpack
ns = None
else:
ns = self.namespaces.getNamespaceURI(alias)
if ns is None:
key = aliased_key
ns = self.getOpenIDNamespace()
return self.getArg(ns, key, default)
class NamespaceMap(object):
"""Maintains a bijective map between namespace uris and aliases.
"""
def __init__(self):
self.alias_to_namespace = {}
self.namespace_to_alias = {}
self.implicit_namespaces = []
def getAlias(self, namespace_uri):
return self.namespace_to_alias.get(namespace_uri)
def getNamespaceURI(self, alias):
return self.alias_to_namespace.get(alias)
def iterNamespaceURIs(self):
"""Return an iterator over the namespace URIs"""
return iter(self.namespace_to_alias)
def iterAliases(self):
"""Return an iterator over the aliases"""
return iter(self.alias_to_namespace)
def items(self):
"""Iterate over the mapping
@returns: iterator of (namespace_uri, alias)
"""
return self.namespace_to_alias.items()
def addAlias(self, namespace_uri, desired_alias, implicit=False):
"""Add an alias from this namespace URI to the desired alias
"""
if isinstance(namespace_uri, bytes):
namespace_uri = str(namespace_uri, encoding="utf-8")
# Check that desired_alias is not an openid protocol field as
# per the spec.
assert desired_alias not in OPENID_PROTOCOL_FIELDS, \
"%r is not an allowed namespace alias" % (desired_alias,)
# Check that desired_alias does not contain a period as per
# the spec.
if isinstance(desired_alias, str):
assert '.' not in desired_alias, \
"%r must not contain a dot" % (desired_alias,)
# Check that there is not a namespace already defined for
# the desired alias
current_namespace_uri = self.alias_to_namespace.get(desired_alias)
if (current_namespace_uri is not None and
current_namespace_uri != namespace_uri):
fmt = ('Cannot map %r to alias %r. '
'%r is already mapped to alias %r')
msg = fmt % (namespace_uri, desired_alias, current_namespace_uri,
desired_alias)
raise KeyError(msg)
# Check that there is not already a (different) alias for
# this namespace URI
alias = self.namespace_to_alias.get(namespace_uri)
if alias is not None and alias != desired_alias:
fmt = ('Cannot map %r to alias %r. '
'It is already mapped to alias %r')
raise KeyError(fmt % (namespace_uri, desired_alias, alias))
assert (desired_alias == NULL_NAMESPACE or
type(desired_alias) in [str, str]), repr(desired_alias)
assert namespace_uri not in self.implicit_namespaces
self.alias_to_namespace[desired_alias] = namespace_uri
self.namespace_to_alias[namespace_uri] = desired_alias
if implicit:
self.implicit_namespaces.append(namespace_uri)
return desired_alias
def add(self, namespace_uri):
"""Add this namespace URI to the mapping, without caring what
alias it ends up with"""
# See if this namespace is already mapped to an alias
alias = self.namespace_to_alias.get(namespace_uri)
if alias is not None:
return alias
# Fall back to generating a numerical alias
i = 0
while True:
alias = 'ext' + str(i)
try:
self.addAlias(namespace_uri, alias)
except KeyError:
i += 1
else:
return alias
assert False, "Not reached"
def isDefined(self, namespace_uri):
return namespace_uri in self.namespace_to_alias
def __contains__(self, namespace_uri):
return self.isDefined(namespace_uri)
def isImplicit(self, namespace_uri):
return namespace_uri in self.implicit_namespaces
| necaris/python3-openid | openid/message.py | Python | apache-2.0 | 22,680 |
import click
# --------------------------------------------
# Call koheron-server
# --------------------------------------------
class ConnectionType(object):
def __init__(self, host="", unixsock=""):
self.host = host
@click.group()
@click.option('--host', default='', help='Host ip address', envvar='HOST')
@click.pass_context
def cli(ctx, host):
if host != "":
ctx.obj = ConnectionType(host=str(host))
@cli.command()
def version():
''' Get the version of koheron python library '''
from .version import __version__
click.echo(__version__)
@cli.command()
@click.pass_obj
def devices(conn_type):
''' Get the list of devices '''
from .koheron import KoheronClient
client = KoheronClient(host=conn_type.host)
click.echo(client.devices_idx)
@cli.command()
@click.pass_obj
@click.option('--device', default=None)
def commands(conn_type, device):
''' Get the list of commands for a specified device '''
from .koheron import KoheronClient
client = KoheronClient(host=conn_type.host)
if device is None:
click.echo(client.commands)
else:
device_idx = client.devices_idx[device]
click.echo(client.commands[device_idx])
# --------------------------------------------
# Call HTTP API
# --------------------------------------------
@cli.command()
@click.pass_obj
@click.argument('instrument_zip')
@click.option('--run', is_flag=True)
def upload(conn_type, instrument_zip, run):
''' Upload instrument.zip '''
from .koheron import upload_instrument
upload_instrument(conn_type.host, instrument_zip, run=run)
@cli.command()
@click.pass_obj
@click.argument('instrument_name', required=False)
@click.option('--restart', is_flag=True)
def run(conn_type, instrument_name, restart):
''' Run a given instrument '''
from .koheron import run_instrument
run_instrument(conn_type.host, instrument_name, restart=restart)
| Koheron/zynq-sdk | python/koheron/cli.py | Python | mit | 1,924 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class ReverseSequenceTest(test.TestCase):
def _testReverseSequence(self,
x,
batch_axis,
seq_axis,
seq_lengths,
truth,
use_gpu=False,
expected_err_re=None):
with self.cached_session(use_gpu=use_gpu):
ans = array_ops.reverse_sequence(
x, batch_axis=batch_axis, seq_axis=seq_axis, seq_lengths=seq_lengths)
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertAllClose(tf_ans, truth, atol=1e-10)
self.assertShapeEqual(truth, ans)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testBothReverseSequence(self,
x,
batch_axis,
seq_axis,
seq_lengths,
truth,
expected_err_re=None):
self._testReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth, True,
expected_err_re)
self._testReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth,
False, expected_err_re)
def _testBasic(self, dtype, len_dtype=np.int64):
x = np.asarray(
[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]],
[[17, 18, 19, 20], [21, 22, 23, 24]]],
dtype=dtype)
x = x.reshape(3, 2, 4, 1, 1)
x = x.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
# reverse dim 2 up to (0:3, none, 0:4) along dim=0
seq_lengths = np.asarray([3, 0, 4], dtype=len_dtype)
truth_orig = np.asarray(
[
[[3, 2, 1, 4], [7, 6, 5, 8]], # reverse 0:3
[[9, 10, 11, 12], [13, 14, 15, 16]], # reverse none
[[20, 19, 18, 17], [24, 23, 22, 21]]
], # reverse 0:4 (all)
dtype=dtype)
truth_orig = truth_orig.reshape(3, 2, 4, 1, 1)
truth = truth_orig.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
seq_axis = 0 # permute seq_axis and batch_axis (originally 2 and 0, resp.)
batch_axis = 2
self._testBothReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth)
def testSeqLengthInt32(self):
self._testBasic(np.float32, np.int32)
def testFloatBasic(self):
self._testBasic(np.float32)
def testDoubleBasic(self):
self._testBasic(np.float64)
def testInt32Basic(self):
self._testBasic(np.int32)
def testInt64Basic(self):
self._testBasic(np.int64)
def testComplex64Basic(self):
self._testBasic(np.complex64)
def testComplex128Basic(self):
self._testBasic(np.complex128)
@test_util.run_deprecated_v1
def testFloatReverseSequenceGrad(self):
x = np.asarray(
[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]],
[[17, 18, 19, 20], [21, 22, 23, 24]]],
dtype=np.float)
x = x.reshape(3, 2, 4, 1, 1)
x = x.transpose([2, 1, 0, 3, 4]) # transpose axes 0 <=> 2
# reverse dim 0 up to (0:3, none, 0:4) along dim=2
seq_axis = 0
batch_axis = 2
seq_lengths = np.asarray([3, 0, 4], dtype=np.int64)
with self.cached_session():
input_t = constant_op.constant(x, shape=x.shape)
seq_lengths_t = constant_op.constant(seq_lengths, shape=seq_lengths.shape)
reverse_sequence_out = array_ops.reverse_sequence(
input_t,
batch_axis=batch_axis,
seq_axis=seq_axis,
seq_lengths=seq_lengths_t)
err = gradient_checker.compute_gradient_error(
input_t, x.shape, reverse_sequence_out, x.shape, x_init_value=x)
print("ReverseSequence gradient error = %g" % err)
self.assertLess(err, 1e-8)
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
t = array_ops.reverse_sequence(
array_ops.placeholder(
dtypes.float32, shape=None),
seq_lengths=array_ops.placeholder(
dtypes.int64, shape=(32,)),
batch_axis=0,
seq_axis=1)
self.assertIs(t.get_shape().ndims, None)
# Batch size mismatched between input and seq_lengths.
with self.assertRaises(ValueError):
array_ops.reverse_sequence(
array_ops.placeholder(
dtypes.float32, shape=(32, 2, 3)),
seq_lengths=array_ops.placeholder(
dtypes.int64, shape=(33,)),
seq_axis=3)
# seq_axis out of bounds.
with self.assertRaisesRegexp(ValueError, "seq_dim must be < input rank"):
array_ops.reverse_sequence(
array_ops.placeholder(
dtypes.float32, shape=(32, 2, 3)),
seq_lengths=array_ops.placeholder(
dtypes.int64, shape=(32,)),
seq_axis=3)
# batch_axis out of bounds.
with self.assertRaisesRegexp(ValueError, "batch_dim must be < input rank"):
array_ops.reverse_sequence(
array_ops.placeholder(
dtypes.float32, shape=(32, 2, 3)),
seq_lengths=array_ops.placeholder(
dtypes.int64, shape=(32,)),
seq_axis=0,
batch_axis=3)
with self.cached_session():
inputs = array_ops.placeholder(dtypes.float32, shape=(32, 2, 3))
seq_lengths = array_ops.placeholder(dtypes.int64, shape=(32,))
output = array_ops.reverse_sequence(
inputs, seq_lengths=seq_lengths,
seq_axis=0) # batch_axis default is 0
with self.assertRaisesOpError("batch_dim == seq_dim"):
output.eval(feed_dict={
inputs: np.random.rand(32, 2, 3),
seq_lengths: xrange(32)
})
if __name__ == "__main__":
test.main()
| jbedorf/tensorflow | tensorflow/python/kernel_tests/reverse_sequence_op_test.py | Python | apache-2.0 | 6,952 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.