code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
USER_COMMAND_DICT = {}
| sgedward/hubcommander | command_plugins/repeat/config.py | Python | apache-2.0 | 23 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-28 07:07
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('partner', '0004_auto_20170814_0841'),
]
operations = [
migrations.AddField(
model_name='partner',
name='country_presents',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(choices=[('Ara', 'Arabic'), ('Chi', 'Chinese'), ('Eng', 'English'), ('Fre', 'French'), ('Rus', 'Russian'), ('Spa', 'Spanish'), ('Oth', 'Other')], max_length=2), default=list, null=True, size=None),
),
]
| unicef/un-partner-portal | backend/unpp_api/apps/partner/migrations/0005_partner_country_presents.py | Python | apache-2.0 | 733 |
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
import quark.plugin
from quark.tests.functional.base import BaseFunctionalTest
class TestQuarkPlugin(BaseFunctionalTest):
def setUp(self):
super(TestQuarkPlugin, self).setUp()
cfg.CONF.set_override('quota_ports_per_network', 1, 'QUOTAS')
self.plugin = quark.plugin.Plugin()
class TestQuarkAPIExtensions(TestQuarkPlugin):
"""Adds coverage for appending the API extension path."""
def test_append_quark_extensions(self):
conf = mock.MagicMock()
conf.__contains__.return_value = False
quark.plugin.append_quark_extensions(conf)
self.assertEqual(conf.set_override.call_count, 0)
def test_append_no_extension_path(self):
conf = mock.MagicMock()
conf.__contains__.return_value = True
with mock.patch("quark.plugin.extensions") as extensions:
extensions.__path__ = ["apple", "banana", "carrot"]
quark.plugin.append_quark_extensions(conf)
conf.__contains__.assert_called_once_with("api_extensions_path")
conf.set_override.assert_called_once_with(
"api_extensions_path",
"apple:banana:carrot")
| Cerberus98/quark | quark/tests/test_quark_plugin.py | Python | apache-2.0 | 1,818 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Invite',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('key', models.CharField(max_length=255)),
('deeplink', models.CharField(max_length=255)),
('timestamp', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='Ship',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('speed', models.DecimalField(decimal_places=2, max_digits=65)),
('courseAngle', models.DecimalField(decimal_places=2, max_digits=65)),
('lat', models.DecimalField(decimal_places=2, max_digits=65)),
('lng', models.DecimalField(decimal_places=2, max_digits=65)),
('name', models.CharField(max_length=200)),
('timestamp', models.DateTimeField(default=django.utils.timezone.now)),
],
),
]
| topa51/eis | api/migrations/0001_initial.py | Python | mit | 1,355 |
# Copyright (C) 2018 The Photogal Team.
#
# This file is part of Photogal.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .schema import schema
| jsamarziya/photogal | src/photogal/graphql/__init__.py | Python | gpl-3.0 | 739 |
import numpy as np
from perceptron import Perceptron
class Adaline(Perceptron):
"""
Implementation of an Adaptive Linear Neuron, that can be abstracted to
various input sizes or dimensions. Displays using pyplot.
"""
ETA = 1
def __init__(self, grph, eta, max_t):
Perceptron.__init__(self, grph)
self.ETA = eta
self.max_t = max_t
def update(self, y_t, x):
r = []
s_t = np.sign(np.inner(self.grph.w, x))
for i in range(self.DIM):
r.append(self.grph.w[i] + (self.ETA * (y_t - s_t) * x[i]))
return r
def fit(self):
t = 0
c = True
while c:
n = self.random_check()
if n == -1 or t == self.max_t:
c = False
else:
self.grph.w = self.update(self.grph.y[n], self.grph.training_matrix[n])
t += 1
print("t: {0}, w: {1}".format(t, self.grph.w))
if self.grph.PLOT:
self.grph.plot_g() # In calling g() the 0th value is 1, corresponding to w_0
self.grph.show_plot()
# and the last value is not used in calculation, so is set as 0
return t
| cbporch/perceptron | adaline.py | Python | mit | 1,193 |
# -*- coding: utf-8 -*-
"""
unit test for the undefined types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2008 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
test_default_undefined = '''
>>> from jinja2 import Environment, Undefined
>>> env = Environment(undefined=Undefined)
>>> env.from_string('{{ missing }}').render()
u''
>>> env.from_string('{{ missing.attribute }}').render()
Traceback (most recent call last):
...
UndefinedError: 'missing' is undefined
>>> env.from_string('{{ missing|list }}').render()
u'[]'
>>> env.from_string('{{ missing is not defined }}').render()
u'True'
>>> env.from_string('{{ foo.missing }}').render(foo=42)
u''
>>> env.from_string('{{ not missing }}').render()
u'True'
'''
test_debug_undefined = '''
>>> from jinja2 import Environment, DebugUndefined
>>> env = Environment(undefined=DebugUndefined)
>>> env.from_string('{{ missing }}').render()
u'{{ missing }}'
>>> env.from_string('{{ missing.attribute }}').render()
Traceback (most recent call last):
...
UndefinedError: 'missing' is undefined
>>> env.from_string('{{ missing|list }}').render()
u'[]'
>>> env.from_string('{{ missing is not defined }}').render()
u'True'
>>> env.from_string('{{ foo.missing }}').render(foo=42)
u"{{ no such element: int['missing'] }}"
>>> env.from_string('{{ not missing }}').render()
u'True'
'''
test_strict_undefined = '''
>>> from jinja2 import Environment, StrictUndefined
>>> env = Environment(undefined=StrictUndefined)
>>> env.from_string('{{ missing }}').render()
Traceback (most recent call last):
...
UndefinedError: 'missing' is undefined
>>> env.from_string('{{ missing.attribute }}').render()
Traceback (most recent call last):
...
UndefinedError: 'missing' is undefined
>>> env.from_string('{{ missing|list }}').render()
Traceback (most recent call last):
...
UndefinedError: 'missing' is undefined
>>> env.from_string('{{ missing is not defined }}').render()
u'True'
>>> env.from_string('{{ foo.missing }}').render(foo=42)
Traceback (most recent call last):
...
UndefinedError: 'int' object has no attribute 'missing'
>>> env.from_string('{{ not missing }}').render()
Traceback (most recent call last):
...
UndefinedError: 'missing' is undefined
'''
| minixalpha/SourceLearning | jinja2/jinja2-2.0/tests/test_undefined.py | Python | apache-2.0 | 2,242 |
from .fama_mcbeth import *
| khrapovs/famamcbeth | famamcbeth/__init__.py | Python | mit | 27 |
#!/usr/bin/env python
"""
Harness for pysdl2,
some simple classes to make working with pysdl2 easier.
Copyright (C) 2015 by Juan J. Martinez <jjm@usebox.net>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import division
import sys
import os
import ctypes
version = "0.2"
try:
import sdl2
except ImportError as ex:
if not hasattr(sys, "_gen_docs"):
sys.exit("SDL2 library not found: %s" % ex)
try:
from sdl2 import sdlmixer
except ImportError as ex:
if not hasattr(sys, "_gen_docs"):
sys.exit("SDL2_Mixer library not found: %s" % ex)
# loads game controller definitions
from .GameControllerDB import init_game_controller
class Harness(object):
"""
Harness object
Parameters:
title: windows title.
width: with in pixels of the draw area.
height: height in pixels of the draw area.
zoom: scale up the output, or use 1 to disable.
"""
UFPS = 80
UFPS_DT = 1.0 / 80
FONT_MAP = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!?()@:/'., "
AUDIO_CHANNELS = 6
def __init__(self, title=None, width=320, height=200, zoom=1):
self.title = title.encode() if title else b"SDL2 Harness"
self.width = width
self.height = height
self.zoom = zoom
self._quit = False
self._update_dt = 0
self.update_handlers = []
self.draw_handlers = []
self._controllers = {}
# try to find the script directory
if "__main__" in globals():
main = globals().__main__
else:
import __main__ as main
main_dir = os.path.dirname(os.path.realpath(getattr(main, "__file__", ".")))
# heuristic intended to work with packaged scripts (eg, py2exe)
while not os.path.isdir(main_dir):
main_dir = os.path.dirname(main_dir)
self.resource_path = [
os.path.join(main_dir, "data"),
]
self.resources = {}
for attr in dir(sdl2):
if attr.startswith("SDL_SCANCODE_"):
setattr(self, attr.replace("SDL_SCANCODE_", "KEY_"), getattr(sdl2, attr))
sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO|sdl2.SDL_INIT_AUDIO|sdl2.SDL_INIT_TIMER|sdl2.SDL_INIT_JOYSTICK)
init_game_controller()
sdlmixer.Mix_Init(sdlmixer.MIX_INIT_OGG)
sdlmixer.Mix_OpenAudio(44100, sdlmixer.MIX_DEFAULT_FORMAT, self.AUDIO_CHANNELS, 1024)
self.window = sdl2.SDL_CreateWindow(self.title,
sdl2.SDL_WINDOWPOS_CENTERED,
sdl2.SDL_WINDOWPOS_CENTERED,
self.width * self.zoom,
self.height * self.zoom,
sdl2.SDL_WINDOW_HIDDEN
)
self.renderer = sdl2.SDL_CreateRenderer(self.window, -1,
sdl2.SDL_RENDERER_ACCELERATED|sdl2.SDL_RENDERER_PRESENTVSYNC)
self.renderer_obj = Renderer(self.renderer)
if self.zoom != 1:
sdl2.SDL_RenderSetScale(self.renderer, self.zoom, self.zoom)
def set_icon(self, filename):
"""
Sets the window icon from an image
NOT a texture (don't use load_resource method).
"""
from sdl2 import sdlimage
found_path = self._find_path(filename)
image = sdlimage.IMG_Load(found_path.encode())
if not image:
sys.exit("Error loading %r: %s" % (filename, sdlimage.IMG_GetError()))
sdl2.SDL_SetWindowIcon(self.window, image)
sdl2.SDL_FreeSurface(image)
def _update(self, dt):
self._update_dt += dt
while self._update_dt > self.UFPS_DT:
for update in self.update_handlers:
update(self.UFPS_DT)
self._update_dt -= self.UFPS_DT
def _draw(self):
for draw in self.draw_handlers:
draw(self.renderer_obj)
def quit(self):
"""Quits the game"""
self._quit = True
def loop(self):
"""The game loop!"""
sdl2.SDL_ShowWindow(self.window)
current = sdl2.SDL_GetPerformanceCounter()
freq = sdl2.SDL_GetPerformanceFrequency()
while not self._quit:
event = sdl2.SDL_Event()
while sdl2.SDL_PollEvent(ctypes.byref(event)) != 0:
if event.type == sdl2.SDL_QUIT:
self._quit = True
break
self.keys = sdl2.SDL_GetKeyboardState(None)
for controller in self._controllers.values():
controller.poll()
new = sdl2.SDL_GetPerformanceCounter()
self._update((new - current) / freq)
current = new
sdl2.SDL_RenderClear(self.renderer)
self._draw()
sdl2.SDL_RenderPresent(self.renderer)
for resource in self.resources.copy().keys():
self.free_resource(resource)
for controller in list(self._controllers.values()):
if controller.handler:
controller.close()
sdl2.SDL_DestroyRenderer(self.renderer)
sdl2.SDL_HideWindow(self.window)
sdl2.SDL_DestroyWindow(self.window)
sdlmixer.Mix_Quit()
sdl2.SDL_Quit()
def remove_handler(self, fn):
"""
Remove a draw or update handler
Parameters:
fn: handler to remove.
"""
if fn in self.draw_handlers:
self.draw_handlers.remove(fn)
if fn in self.update_handlers:
self.update_handlers.remove(fn)
def draw(self, fn):
self.draw_handlers.append(fn)
return fn
def update(self, fn):
self.update_handlers.append(fn)
return fn
def play(self, sample, loops=0):
"""
Plays a sample loaded with load_resource
Parameters:
sample: sample to play.
loops: number of times to play the sample (-1 for infinite loop).
"""
return sdlmixer.Mix_PlayChannel(-1, sample, loops)
def stop_playback(self, channel=-1):
"""Stops the audio playback"""
return sdlmixer.Mix_HaltChannel(channel)
def free_resource(self, filename):
"""Free resources"""
try:
free_fn = self.resources[filename]
except KeyError:
return
free_fn()
del self.resources[filename]
def _find_path(self, filename):
found_path = None
for path in self.resource_path:
full_path = os.path.realpath(os.path.join(path, filename))
if os.path.isfile(full_path):
found_path = full_path
break
if found_path is None:
raise OSError("Resource not found: %r" % filename)
return found_path
def load_resource(self, filename):
"""
Loads resources
Parameters:
filename: file name of the resource to load.
The resource is identified based on its name:
.bmp: image (using SDL2).
.png, .gif, .jpg: image (using SDL2_Image)
.wav, .ogg: audio sample
If the resource type is not identified, an open file handle
is returned (is to the callee to close the file).
"""
found_path = self._find_path(filename)
if filename[-4:] == ".bmp":
image = sdl2.SDL_LoadBMP(found_path.encode())
if not image:
sys.exit("Error loading %r: %s" % (filename, sdl2.SDL_GetError()))
resource = sdl2.SDL_CreateTextureFromSurface(self.renderer, image);
free_fn = lambda : sdl2.SDL_DestroyTexture(resource)
sdl2.SDL_FreeSurface(image)
elif filename[-4:] in (".png", ".gif", ".jpg"):
from sdl2 import sdlimage
image = sdlimage.IMG_Load(found_path.encode())
if not image:
sys.exit("Error loading %r: %s" % (filename, sdlimage.IMG_GetError()))
texture = sdl2.SDL_CreateTextureFromSurface(self.renderer, image);
free_fn = lambda : sdl2.SDL_DestroyTexture(texture)
resource = Texture(texture, (0, 0, image.contents.w, image.contents.h))
sdl2.SDL_FreeSurface(image)
elif filename[-4:] in (".wav", ".ogg"):
audio = sdlmixer.Mix_LoadWAV(found_path.encode())
if not audio:
sys.exit("Error loading %r: %s" % (filename, sdlmixer.Mix_GetError()))
resource = audio
free_fn = lambda : sdlmixer.Mix_FreeChunk(resource)
else:
return open(filename, "rb")
self.resources[filename] = free_fn
return resource
def load_bitmap_font(self, filename, width, height, font_map=None):
"""
Loads a bitmap font
Parameters:
filename: image containing the font (eg, font.png).
width: width of a font character.
height: height of a font character.
font_map: string with the order of the characters in the font.
The default font map is:
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!?()@:/'., "
"""
if font_map is None:
font_map = self.FONT_MAP
font = BitmapFont(texture=self.load_resource(filename),
width=width,
height=height,
font_map=font_map,
)
return font
@property
def has_controllers(self):
"""True if there are game controllers available"""
return sdl2.SDL_NumJoysticks() > 0
@property
def controllers(self):
"""
Get a tuple of all detected game controllers
By getting a controller from this list, the controller gets automatically
enabled and ready to use.
"""
for joy in range(sdl2.SDL_NumJoysticks()):
if sdl2.SDL_IsGameController(joy):
controller = Controller(joy, self)
self._controllers[controller.name] = controller
return tuple(self._controllers.values())
class Renderer(object):
"""Wrapper for the renderer to be used by the draw functions"""
def __init__(self, renderer):
self.renderer = renderer
def _get_rect(self, texture, rect=None):
_rect = rect
if isinstance(texture, Texture):
_rect = texture.sdl_rect
if isinstance(rect, tuple):
_rect = sdl2.SDL_Rect(*rect)
return _rect
def draw(self, texture, x=None, y=None, src_rect=None, dest_rect=None, tint=None):
"""
Draws a texture
Parameters:
texture: texture created with Harness.load_resource or Texture.get_texture.
x: horizontal location to draw the whole texture.
y: vertical location to draw the whole texture.
src_rect: tuple with the rect defining the section of the texture to draw.
dest_rect: tuple with the rect defining the section of the destination. If
this parameter is used, x and y are ignored.
tint: colour the text texture, tuple with (r, g, b, alpha).
"""
_texture = texture.texture
src = self._get_rect(texture, src_rect)
_dest_rect = dest_rect
if _dest_rect is None and all([x, y]):
_dest_rect = (x, y, texture.rect[2], texture.rect[3])
dest = self._get_rect(texture, _dest_rect)
if isinstance(tint, tuple) and len(tint) == 4:
sdl2.SDL_SetTextureColorMod(_texture, *tint)
else:
tint = None
sdl2.SDL_RenderCopy(self.renderer, _texture, src, dest)
if tint:
sdl2.SDL_SetTextureColorMod(_texture, 255, 255, 255, 255)
def draw_text(self, font, x, y, text, align="left", tint=None):
"""
Draws text using a bitmap font
Parameters:
font: font (load it first with load_bitmap_font).
x: horizontal position on the screen.
y: vertical position on the screen.
text: the text to render.
align: "left", "right" or "center" (defaults to "left").
tint: colour the text texture, tuple with (r, g, b, alpha).
"""
width = len(text) * font.width
if align == "center":
x -= width // 2
y -= font.height // 2
elif align == "right":
x -= width
src = sdl2.SDL_Rect(font.rect[0],
font.rect[1],
font.width,
font.height,
)
dest = sdl2.SDL_Rect(0, y, font.width, font.height)
if isinstance(tint, tuple) and len(tint) == 4:
sdl2.SDL_SetTextureColorMod(font.texture, *tint)
else:
tint = None
for i, c in enumerate(text):
index = font.font_map.find(c)
src.x = font.rect[0] + index * font.width
dest.x = x + i * font.width
sdl2.SDL_RenderCopy(self.renderer, font.texture, src, dest)
if tint:
sdl2.SDL_SetTextureColorMod(font.texture, 255, 255, 255, 255)
class Texture(object):
"""Wrapper for SDL textures and subtextures"""
def __init__(self, texture, rect):
self.texture = texture
self.width = rect[2]
self.height = rect[3]
self.rect = rect
self.sdl_rect = sdl2.SDL_Rect(*rect)
def get_texture(self, x, y, width, height):
"""
Returns a reference to a subtexture
Parameters:
x: horizontal position on the parent texture.
y: vertical position on the parent texture.
width: width of the subtexture.
height: height of the subtexture.
"""
return Texture(self.texture, (x, y, width, height))
class BitmapFont(object):
"""Bitmap font object"""
def __init__(self, texture, width, height, font_map):
self.texture = texture.texture
self.rect = texture.rect
self.sdl_rect = texture.sdl_rect
self.width = width
self.height = height
self.font_map = font_map
class Controller(object):
"""Game controller"""
DEF_KEY_MAPPING = dict(up="KEY_UP",
down="KEY_DOWN",
left="KEY_LEFT",
right="KEY_RIGHT",
a="KEY_C",
b="KEY_V",
start="KEY_S",
back="KEY_ESCAPE",
)
MAPPING = dict(up="SDL_CONTROLLER_BUTTON_DPAD_UP",
down="SDL_CONTROLLER_BUTTON_DPAD_DOWN",
left="SDL_CONTROLLER_BUTTON_DPAD_LEFT",
right="SDL_CONTROLLER_BUTTON_DPAD_RIGHT",
a="SDL_CONTROLLER_BUTTON_A",
b="SDL_CONTROLLER_BUTTON_B",
start="SDL_CONTROLLER_BUTTON_START",
back="SDL_CONTROLLER_BUTTON_BACK",
)
def __init__(self, joy_number, harness):
self.key_mapping = self.DEF_KEY_MAPPING
self.harness = harness
self.joy_number = joy_number
self.previous = dict((key, False) for key in self.MAPPING.keys())
# unlikely
if not sdl2.SDL_IsGameController(joy_number):
raise ValueError("%r is not a support game controller" % joy_number)
self.handler = sdl2.SDL_GameControllerOpen(self.joy_number)
if not self.handler:
raise ValueError("%r is not a support game controller" % joy_number)
self.name = sdl2.SDL_GameControllerName(self.handler)
def __repr__(self):
return u"<Controller: %r>" % self.name
def close(self):
"""Deactivate a game controller"""
sdl2.SDL_GameControllerClose(self.handler)
self.handler = None
if self.name in self.harness._controllers:
del self.harness._controllers[self.name]
def set_mapping(self, **kwargs):
"""
Maps a controller action to a key
Takes named parameters in the form:
action="KEY"
Example:
start="KEY_S"
"""
for key, value in kwargs.items():
if key not in self.MAPPING.keys():
raise ValueError("%r is not a supported game controler to keyboard mapping" % key)
self.key_mapping[getattr(sdl2, key)] = value
def poll(self):
if not self.handler:
return
for key, value in self.MAPPING.items():
state = sdl2.SDL_GameControllerGetButton(self.handler, getattr(sdl2, value)) == 1
if self.previous[key] != state:
self.harness.keys[getattr(self.harness, self.key_mapping[key])] = state
self.previous[key] = state
| reidrac/pysdl2-harness | harness/__init__.py | Python | mit | 17,999 |
# -*- coding: utf-8 -*-
"""
werkzeug.formparser
~~~~~~~~~~~~~~~~~~~
This module implements the form parsing. It supports url-encoded forms
as well as non-nested multipart uploads.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import codecs
from io import BytesIO
from tempfile import TemporaryFile
from itertools import chain, repeat, tee
from functools import update_wrapper
from werkzeug._compat import to_native, text_type
from werkzeug.urls import url_decode_stream
from werkzeug.wsgi import make_line_iter, \
get_input_stream, get_content_length
from werkzeug.datastructures import Headers, FileStorage, MultiDict
from werkzeug.http import parse_options_header
#: an iterator that yields empty strings
_empty_string_iter = repeat('')
#: a regular expression for multipart boundaries
_multipart_boundary_re = re.compile('^[ -~]{0,200}[!-~]$')
#: supported http encodings that are also available in python we support
#: for multipart messages.
_supported_multipart_encodings = frozenset(['base64', 'quoted-printable'])
def default_stream_factory(total_content_length, filename, content_type,
content_length=None):
"""The stream factory that is used per default."""
if total_content_length > 1024 * 500:
return TemporaryFile('wb+')
return BytesIO()
def parse_form_data(environ, stream_factory=None, charset='utf-8',
errors='replace', max_form_memory_size=None,
max_content_length=None, cls=None,
silent=True):
"""Parse the form data in the environ and return it as tuple in the form
``(stream, form, files)``. You should only call this method if the
transport method is `POST`, `PUT`, or `PATCH`.
If the mimetype of the data transmitted is `multipart/form-data` the
files multidict will be filled with `FileStorage` objects. If the
mimetype is unknown the input stream is wrapped and returned as first
argument, else the stream is empty.
This is a shortcut for the common usage of :class:`FormDataParser`.
Have a look at :ref:`dealing-with-request-data` for more details.
.. versionadded:: 0.5
The `max_form_memory_size`, `max_content_length` and
`cls` parameters were added.
.. versionadded:: 0.5.1
The optional `silent` flag was added.
:param environ: the WSGI environment to be used for parsing.
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
:return: A tuple in the form ``(stream, form, files)``.
"""
return FormDataParser(stream_factory, charset, errors,
max_form_memory_size, max_content_length,
cls, silent).parse_from_environ(environ)
def exhaust_stream(f):
"""Helper decorator for methods that exhausts the stream on return."""
def wrapper(self, stream, *args, **kwargs):
try:
return f(self, stream, *args, **kwargs)
finally:
exhaust = getattr(stream, 'exhaust', None)
if exhaust is not None:
exhaust()
else:
while 1:
chunk = stream.read(1024 * 64)
if not chunk:
break
return update_wrapper(wrapper, f)
class FormDataParser(object):
"""This class implements parsing of form data for Werkzeug. By itself
it can parse multipart and url encoded form data. It can be subclassed
and extended but for most mimetypes it is a better idea to use the
untouched stream and expose it as separate attributes on a request
object.
.. versionadded:: 0.8
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
"""
def __init__(self, stream_factory=None, charset='utf-8',
errors='replace', max_form_memory_size=None,
max_content_length=None, cls=None,
silent=True):
if stream_factory is None:
stream_factory = default_stream_factory
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.max_content_length = max_content_length
if cls is None:
cls = MultiDict
self.cls = cls
self.silent = silent
def get_parse_func(self, mimetype, options):
return self.parse_functions.get(mimetype)
def parse_from_environ(self, environ):
"""Parses the information from the environment as form data.
:param environ: the WSGI environment to be used for parsing.
:return: A tuple in the form ``(stream, form, files)``.
"""
content_type = environ.get('CONTENT_TYPE', '')
content_length = get_content_length(environ)
mimetype, options = parse_options_header(content_type)
return self.parse(get_input_stream(environ), mimetype,
content_length, options)
def parse(self, stream, mimetype, content_length, options=None):
"""Parses the information from the given stream, mimetype,
content length and mimetype parameters.
:param stream: an input stream
:param mimetype: the mimetype of the data
:param content_length: the content length of the incoming data
:param options: optional mimetype parameters (used for
the multipart boundary for instance)
:return: A tuple in the form ``(stream, form, files)``.
"""
if self.max_content_length is not None and \
content_length is not None and \
content_length > self.max_content_length:
raise exceptions.RequestEntityTooLarge()
if options is None:
options = {}
parse_func = self.get_parse_func(mimetype, options)
if parse_func is not None:
try:
return parse_func(self, stream, mimetype,
content_length, options)
except ValueError:
if not self.silent:
raise
return stream, self.cls(), self.cls()
@exhaust_stream
def _parse_multipart(self, stream, mimetype, content_length, options):
parser = MultiPartParser(self.stream_factory, self.charset, self.errors,
max_form_memory_size=self.max_form_memory_size,
cls=self.cls)
boundary = options.get('boundary')
if boundary is None:
raise ValueError('Missing boundary')
if isinstance(boundary, text_type):
boundary = boundary.encode('ascii')
form, files = parser.parse(stream, boundary, content_length)
return stream, form, files
@exhaust_stream
def _parse_urlencoded(self, stream, mimetype, content_length, options):
if self.max_form_memory_size is not None and \
content_length is not None and \
content_length > self.max_form_memory_size:
raise exceptions.RequestEntityTooLarge()
form = url_decode_stream(stream, self.charset,
errors=self.errors, cls=self.cls)
return stream, form, self.cls()
#: mapping of mimetypes to parsing functions
parse_functions = {
'multipart/form-data': _parse_multipart,
'application/x-www-form-urlencoded': _parse_urlencoded,
'application/x-url-encoded': _parse_urlencoded
}
def is_valid_multipart_boundary(boundary):
"""Checks if the string given is a valid multipart boundary."""
return _multipart_boundary_re.match(boundary) is not None
def _line_parse(line):
"""Removes line ending characters and returns a tuple (`stripped_line`,
`is_terminated`).
"""
if line[-2:] in ['\r\n', b'\r\n']:
return line[:-2], True
elif line[-1:] in ['\r', '\n', b'\r', b'\n']:
return line[:-1], True
return line, False
def parse_multipart_headers(iterable):
"""Parses multipart headers from an iterable that yields lines (including
the trailing newline symbol). The iterable has to be newline terminated.
The iterable will stop at the line where the headers ended so it can be
further consumed.
:param iterable: iterable of strings that are newline terminated
"""
result = []
for line in iterable:
line = to_native(line)
line, line_terminated = _line_parse(line)
if not line_terminated:
raise ValueError('unexpected end of line in multipart header')
if not line:
break
elif line[0] in ' \t' and result:
key, value = result[-1]
result[-1] = (key, value + '\n ' + line[1:])
else:
parts = line.split(':', 1)
if len(parts) == 2:
result.append((parts[0].strip(), parts[1].strip()))
# we link the list to the headers, no need to create a copy, the
# list was not shared anyways.
return Headers(result)
_begin_form = 'begin_form'
_begin_file = 'begin_file'
_cont = 'cont'
_end = 'end'
class MultiPartParser(object):
def __init__(self, stream_factory=None, charset='utf-8', errors='replace',
max_form_memory_size=None, cls=None, buffer_size=64 * 1024):
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
if stream_factory is None:
stream_factory = default_stream_factory
if cls is None:
cls = MultiDict
self.cls = cls
# make sure the buffer size is divisible by four so that we can base64
# decode chunk by chunk
assert buffer_size % 4 == 0, 'buffer size has to be divisible by 4'
# also the buffer size has to be at least 1024 bytes long or long headers
# will freak out the system
assert buffer_size >= 1024, 'buffer size has to be at least 1KB'
self.buffer_size = buffer_size
def _fix_ie_filename(self, filename):
"""Internet Explorer 6 transmits the full file name if a file is
uploaded. This function strips the full path if it thinks the
filename is Windows-like absolute.
"""
if filename[1:3] == ':\\' or filename[:2] == '\\\\':
return filename.split('\\')[-1]
return filename
def _find_terminator(self, iterator):
"""The terminator might have some additional newlines before it.
There is at least one application that sends additional newlines
before headers (the python setuptools package).
"""
for line in iterator:
if not line:
break
line = line.strip()
if line:
return line
return b''
def fail(self, message):
raise ValueError(message)
def get_part_encoding(self, headers):
transfer_encoding = headers.get('content-transfer-encoding')
if transfer_encoding is not None and \
transfer_encoding in _supported_multipart_encodings:
return transfer_encoding
def get_part_charset(self, headers):
# Figure out input charset for current part
content_type = headers.get('content-type')
if content_type:
mimetype, ct_params = parse_options_header(content_type)
return ct_params.get('charset', self.charset)
return self.charset
def start_file_streaming(self, filename, headers, total_content_length):
if isinstance(filename, bytes):
filename = filename.decode(self.charset, self.errors)
filename = self._fix_ie_filename(filename)
content_type = headers.get('content-type')
try:
content_length = int(headers['content-length'])
except (KeyError, ValueError):
content_length = 0
container = self.stream_factory(total_content_length, content_type,
filename, content_length)
return filename, container
def in_memory_threshold_reached(self, bytes):
raise exceptions.RequestEntityTooLarge()
def validate_boundary(self, boundary):
if not boundary:
self.fail('Missing boundary')
if not is_valid_multipart_boundary(boundary):
self.fail('Invalid boundary: %s' % boundary)
if len(boundary) > self.buffer_size: # pragma: no cover
# this should never happen because we check for a minimum size
# of 1024 and boundaries may not be longer than 200. The only
# situation when this happens is for non debug builds where
# the assert is skipped.
self.fail('Boundary longer than buffer size')
def parse_lines(self, file, boundary, content_length):
"""Generate parts of
``('begin_form', (headers, name))``
``('begin_file', (headers, name, filename))``
``('cont', bytestring)``
``('end', None)``
Always obeys the grammar
parts = ( begin_form cont* end |
begin_file cont* end )*
"""
next_part = b'--' + boundary
last_part = next_part + b'--'
iterator = chain(make_line_iter(file, limit=content_length,
buffer_size=self.buffer_size),
_empty_string_iter)
terminator = self._find_terminator(iterator)
if terminator == last_part:
return
elif terminator != next_part:
self.fail('Expected boundary at start of multipart data')
while terminator != last_part:
headers = parse_multipart_headers(iterator)
disposition = headers.get('content-disposition')
if disposition is None:
self.fail('Missing Content-Disposition header')
disposition, extra = parse_options_header(disposition)
transfer_encoding = self.get_part_encoding(headers)
name = extra.get('name')
filename = extra.get('filename')
# if no content type is given we stream into memory. A list is
# used as a temporary container.
if filename is None:
yield _begin_form, (headers, name)
# otherwise we parse the rest of the headers and ask the stream
# factory for something we can write in.
else:
yield _begin_file, (headers, name, filename)
buf = b''
for line in iterator:
if not line:
self.fail('unexpected end of stream')
if line[:2] == b'--':
terminator = line.rstrip()
if terminator in (next_part, last_part):
break
if transfer_encoding is not None:
if transfer_encoding == 'base64':
transfer_encoding = 'base64_codec'
try:
line = codecs.decode(line, transfer_encoding)
except Exception:
self.fail('could not decode transfer encoded chunk')
# we have something in the buffer from the last iteration.
# this is usually a newline delimiter.
if buf:
yield _cont, buf
buf = b''
# If the line ends with windows CRLF we write everything except
# the last two bytes. In all other cases however we write
# everything except the last byte. If it was a newline, that's
# fine, otherwise it does not matter because we will write it
# the next iteration. this ensures we do not write the
# final newline into the stream. That way we do not have to
# truncate the stream. However we do have to make sure that
# if something else than a newline is in there we write it
# out.
if line[-2:] == b'\r\n':
buf = b'\r\n'
cutoff = -2
else:
buf = line[-1:]
cutoff = -1
yield _cont, line[:cutoff]
else: # pragma: no cover
raise ValueError('unexpected end of part')
# if we have a leftover in the buffer that is not a newline
# character we have to flush it, otherwise we will chop of
# certain values.
if buf not in (b'', b'\r', b'\n', b'\r\n'):
yield _cont, buf
yield _end, None
def parse_parts(self, file, boundary, content_length):
"""Generate ``('file', (name, val))`` and
``('form', (name, val))`` parts.
"""
in_memory = 0
for ellt, ell in self.parse_lines(file, boundary, content_length):
if ellt == _begin_file:
headers, name, filename = ell
is_file = True
guard_memory = False
filename, container = self.start_file_streaming(
filename, headers, content_length)
_write = container.write
elif ellt == _begin_form:
headers, name = ell
is_file = False
container = []
_write = container.append
guard_memory = self.max_form_memory_size is not None
elif ellt == _cont:
_write(ell)
# if we write into memory and there is a memory size limit we
# count the number of bytes in memory and raise an exception if
# there is too much data in memory.
if guard_memory:
in_memory += len(ell)
if in_memory > self.max_form_memory_size:
self.in_memory_threshold_reached(in_memory)
elif ellt == _end:
if is_file:
container.seek(0)
yield ('file',
(name, FileStorage(container, filename, name,
headers=headers)))
else:
part_charset = self.get_part_charset(headers)
yield ('form',
(name, b''.join(container).decode(
part_charset, self.errors)))
def parse(self, file, boundary, content_length):
formstream, filestream = tee(
self.parse_parts(file, boundary, content_length), 2)
form = (p[1] for p in formstream if p[0] == 'form')
files = (p[1] for p in filestream if p[0] == 'file')
return self.cls(form), self.cls(files)
from werkzeug import exceptions
| fancasy/final | lib/werkzeug/formparser.py | Python | apache-2.0 | 21,732 |
# Backend loading
# Based on the Django cache framework
# https://github.com/django/django/blob/5d263dee304fdaf95e18d2f0619d6925984a7f02/django/core/cache/__init__.py
import sys
from importlib import import_module
import warnings
from django.utils import six
from django.utils.module_loading import import_string
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
class InvalidSearchBackendError(ImproperlyConfigured):
pass
def get_search_backend_config():
search_backends = getattr(settings, 'WAGTAILSEARCH_BACKENDS', {})
# Make sure the default backend is always defined
search_backends.setdefault('default', {
'BACKEND': 'wagtail.wagtailsearch.backends.db',
})
return search_backends
def import_backend(dotted_path):
"""
Theres two formats for the dotted_path.
One with the backend class (old) and one without (new)
eg:
old: wagtail.wagtailsearch.backends.elasticsearch.ElasticsearchSearchBackend
new: wagtail.wagtailsearch.backends.elasticsearch
If a new style dotted path was specified, this function would
look for a backend class from the "SearchBackend" attribute.
"""
try:
# New
backend_module = import_module(dotted_path)
return backend_module.SearchBackend
except ImportError as e:
try:
# Old
return import_string(dotted_path)
except ImportError:
six.reraise(ImportError, e, sys.exc_info()[2])
def get_search_backend(backend='default', **kwargs):
backend = backend
search_backends = get_search_backend_config()
# Try to find the backend
try:
# Try to get the WAGTAILSEARCH_BACKENDS entry for the given backend name first
conf = search_backends[backend]
except KeyError:
try:
# Trying to import the given backend, in case it's a dotted path
import_backend(backend)
except ImportError as e:
raise InvalidSearchBackendError("Could not find backend '{}': {}".format(
backend, e))
params = kwargs
else:
# Backend is a conf entry
params = conf.copy()
params.update(kwargs)
backend = params.pop('BACKEND')
backend = backend
# Try to import the backend
try:
backend_cls = import_backend(backend)
except ImportError as e:
raise InvalidSearchBackendError("Could not find backend '{}': {}".format(
backend, e))
# Create backend
return backend_cls(params)
def _backend_requires_auto_update(backend_name, params):
if params.get('AUTO_UPDATE', True):
return True
# _WAGTAILSEARCH_FORCE_AUTO_UPDATE is only used by Wagtail tests. It allows
# us to test AUTO_UPDATE behaviour against Elasticsearch without having to
# have AUTO_UPDATE enabed for every test.
force_auto_update = getattr(settings, '_WAGTAILSEARCH_FORCE_AUTO_UPDATE', [])
if backend_name in force_auto_update:
return True
return False
def get_search_backends_with_name(with_auto_update=False):
search_backends = get_search_backend_config()
for backend, params in search_backends.items():
if with_auto_update and _backend_requires_auto_update(backend, params) is False:
continue
yield backend, get_search_backend(backend)
def get_search_backends(with_auto_update=False):
# For backwards compatibility
return (backend for _, backend in get_search_backends_with_name(with_auto_update=with_auto_update))
| praekelt/nurseconnect | nurseconnect/wagtailsearch/backends/__init__.py | Python | bsd-2-clause | 3,570 |
# Copyright 2015, 2020-2021 National Research Foundation (SARAO)
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Receive SPEAD protocol
Item format
===========
At present only a subset of the possible SPEAD format strings are accepted.
Also, the SPEAD protocol does not specify how items are to be represented in
Python. The following are accepted.
- Any descriptor with a numpy header (will be handled by numpy). If the dtype
contains only a single field which is non-native endian, it will be
converted to native endian in-place. In other cases, the value retrieved
from numpy will still be correct, but usage may be slow.
- If no numpy header is present, the following may be used in the format
with zero copy and good efficiency:
- u8, u16, u32, u64
- i8, i16, i32, i64
- f32, f64
- b8
- c8 (converted to dtype S1)
This will be converted to a numpy dtype. If there are multiple fields,
their names will be generated by numpy (`f0`, `f1`, etc). At most one
element of the shape may indicate a variable-length field, whose length
will be computed from the size of the item, or 0 if any other element of
the shape is zero.
- The `u`, `i`, `c` and `b` types may also be used with other sizes, but it
will invoke a slow conversion process and is not recommended for large
arrays. For `c`, the value is interpreted as a Unicode code point.
Two cases are treated specially:
- A zero-dimensional array is returned as a scalar, rather than a
zero-dimensional array object.
- A one-dimensional array of characters (numpy dtype 'S1') is converted to a
Python string, using ASCII encoding.
Immediate values are treated as items with heap_address_bits/8
bytes, in the order they appeared in the original packet.
"""
from spead2._spead2.recv import ( # noqa: F401
StreamConfig, RingStreamConfig, Stream, Heap, IncompleteHeap,
Chunk, ChunkStreamConfig, ChunkRingStream, ChunkRingbuffer,
StreamStats, StreamStatConfig
)
from . import stream_stat_indices # noqa: F401
try:
from spead2._spead2.recv import UdpIbvConfig # noqa: F401
except ImportError:
pass
| ska-sa/spead2 | src/spead2/recv/__init__.py | Python | lgpl-3.0 | 2,768 |
"""Test for first Graph data structure."""
import pytest
from graph_1 import Graph
@pytest.fixture
def empty_graph():
"""Return an empty graph."""
return Graph()
@pytest.fixture
def one_node_graph():
"""Return a graph with one node."""
g = Graph()
g.add_node('corn')
return g
@pytest.fixture
def two_node_no_edge_graph():
"""Return a graph with two nodes and no edges."""
g = Graph()
g.add_node('corn')
g.add_node('beans')
return g
@pytest.fixture
def two_node_with_edge_graph():
"""Return a graph with two nodes and one edge."""
g = Graph()
g.add_node('corn')
g.add_node('beans')
g.add_edge('corn', 'beans')
return g
@pytest.fixture
def two_node_two_edge_graph():
"""Return a graph with two nodes and two edges."""
g = Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2)
g.add_edge(2, 1)
return g
@pytest.fixture
def three_node_with_two_edges_graph():
"""Return a graph with two nodes and two edges."""
g = Graph()
g.add_node('corn')
g.add_node(2)
g.add_node(11)
g.add_edge('corn', 2)
g.add_edge(11, 'corn')
return g
@pytest.fixture
def three_node_cyclical_graph():
"""Return a bi-direcctional cyclical graph.."""
g = Graph()
g.add_node(1)
g.add_node(2)
g.add_node(3)
g.add_edge(1, 2)
g.add_edge(2, 1)
g.add_edge(2, 3)
g.add_edge(3, 2)
g.add_edge(1, 3)
g.add_edge(3, 1)
return g
@pytest.fixture
def four_node_cyclical_graph():
"""Return a graph with four nodes that is cyclical."""
g = Graph()
g.add_node(1)
g.add_node(2)
g.add_node(3)
g.add_node(4)
g.add_edge(1, 2)
g.add_edge(2, 3)
g.add_edge(3, 4)
g.add_edge(4, 1)
return g
@pytest.fixture
def five_node_with_five_edges_graph():
"""Return a graph with five nodes and five edges."""
g = Graph()
g.add_node('corn')
g.add_node(2)
g.add_node(11.1)
g.add_node('chocolate')
g.add_node('mustard')
g.add_edge(2, 11.1)
g.add_edge('mustard', 'corn')
g.add_edge(2, 'corn')
g.add_edge('corn', 2)
g.add_edge(11.1, 'corn')
return g
@pytest.fixture
def five_node_simple_nodes_five_edges_graph():
"""Return a graph with five nodes and four edges."""
g = Graph()
g.add_node('corn')
g.add_node(2)
g.add_node(3)
g.add_node('meep')
g.add_node(10)
g.add_edge(2, 3)
g.add_edge(3, 2)
g.add_edge(2, 'corn')
g.add_edge('meep', 10)
return g
@pytest.fixture
def seven_node_heapish_graph():
"""Return a graph with seven nodes that is heap like."""
g = Graph()
g.add_node(1)
g.add_node(2)
g.add_node(3)
g.add_node(4)
g.add_node(5)
g.add_node(6)
g.add_node(7)
g.add_edge(1, 2)
g.add_edge(1, 5)
g.add_edge(2, 3)
g.add_edge(2, 4)
g.add_edge(3, 1)
g.add_edge(5, 6)
g.add_edge(5, 7)
g.add_edge(7, 5)
return g
def test_nodes_in_empty_graph(empty_graph):
"""Test empty graph has no nodes."""
assert empty_graph.nodes() == []
def test_nodes_in_one_node_graph(one_node_graph):
"""Test one-node graph has correct node."""
assert one_node_graph.nodes() == ['corn']
def test_nodes_in_two_node_graph(two_node_no_edge_graph):
"""Test two-node graph has two nodes."""
assert 'corn' in two_node_no_edge_graph.nodes()
assert 'beans' in two_node_no_edge_graph.nodes()
def test_nodes_in_three_node_graph(three_node_with_two_edges_graph):
"""Test three-node graph has three correct nodes."""
assert 'corn' in three_node_with_two_edges_graph.nodes()
assert 2 in three_node_with_two_edges_graph.nodes()
assert 11 in three_node_with_two_edges_graph.nodes()
assert len(three_node_with_two_edges_graph.nodes()) == 3
def test_nodes_in_five_node_graph(five_node_with_five_edges_graph):
"""Test five-node graph has five correct nodes."""
assert 'corn' in five_node_with_five_edges_graph.nodes()
assert 2 in five_node_with_five_edges_graph.nodes()
assert 11.1 in five_node_with_five_edges_graph.nodes()
assert 'chocolate' in five_node_with_five_edges_graph.nodes()
assert 'mustard' in five_node_with_five_edges_graph.nodes()
assert len(five_node_with_five_edges_graph.nodes()) == 5
def test_has_node_one_node(one_node_graph):
"""Test one-node graph has_node method."""
assert one_node_graph.has_node('corn') is True
assert one_node_graph.has_node(1) is False
def test_has_node_two_nodes(two_node_no_edge_graph):
"""Test two-node graph has_node method."""
assert two_node_no_edge_graph.has_node('beans') is True
assert two_node_no_edge_graph.has_node('picante') is False
def test_has_node_five_nodes(five_node_with_five_edges_graph):
"""Test five-node graph has_node method."""
assert five_node_with_five_edges_graph.has_node('picante') is False
assert five_node_with_five_edges_graph.has_node(11.1) is True
assert five_node_with_five_edges_graph.has_node(2) is True
assert five_node_with_five_edges_graph.has_node('chocolate') is True
assert five_node_with_five_edges_graph.has_node('11.1') is False
def test_edges_empty_graph(empty_graph):
"""Test an empty graph for edges."""
assert empty_graph.edges() == []
def test_edges_one_node_graph(one_node_graph):
"""Test an empty graph for edges."""
assert one_node_graph.edges() == []
def test_edges_two_node_no_edge_graph(two_node_no_edge_graph):
"""Test an empty graph for edges."""
assert two_node_no_edge_graph.edges() == []
def test_edges_two_node_graph(two_node_with_edge_graph):
"""Test an empty graph for edges."""
assert two_node_with_edge_graph.edges() == [('corn', 'beans')]
def test_edges_three_node_graph(three_node_with_two_edges_graph):
"""Test three node graph for two edges."""
assert ('corn', 2) in three_node_with_two_edges_graph.edges()
assert (11, 'corn') in three_node_with_two_edges_graph.edges()
def test_edges_five_node_graph(five_node_with_five_edges_graph):
"""Test a five node graph with five edges."""
assert ('corn', 2) in five_node_with_five_edges_graph.edges()
assert (2, 11.1) in five_node_with_five_edges_graph.edges()
assert (2, 'corn') in five_node_with_five_edges_graph.edges()
assert (11.1, 'corn') in five_node_with_five_edges_graph.edges()
assert ('mustard', 'corn') in five_node_with_five_edges_graph.edges()
def test_add_node_no_val(one_node_graph):
"""Test adding node with no value raises ValueError."""
with pytest.raises(ValueError):
one_node_graph.add_node(None)
def test_add_node_bool(one_node_graph):
"""Test adding node with bool raises ValueError."""
with pytest.raises(ValueError):
one_node_graph.add_node(True)
def test_add_node_dupe(two_node_no_edge_graph):
"""Add two nodes to graph."""
with pytest.raises(ValueError):
two_node_no_edge_graph.add_node('corn')
def test_add_node_two_times(two_node_no_edge_graph):
"""Add two nodes to graph."""
two_node_no_edge_graph.add_node('greetings')
two_node_no_edge_graph.add_node('welcome back')
assert two_node_no_edge_graph.has_node('greetings')
assert two_node_no_edge_graph.has_node('welcome back')
assert 'corn' in two_node_no_edge_graph.nodes()
assert 'beans' in two_node_no_edge_graph.nodes()
assert 'greetings' in two_node_no_edge_graph.nodes()
assert 'welcome back' in two_node_no_edge_graph.nodes()
def test_add_node_once(two_node_no_edge_graph):
"""Add one node to graph."""
two_node_no_edge_graph.add_node('why')
assert two_node_no_edge_graph.has_node('why')
assert 'corn' in two_node_no_edge_graph.nodes()
assert 'beans' in two_node_no_edge_graph.nodes()
assert 'why' in two_node_no_edge_graph.nodes()
def test_add_edge_new_nodes_on_empty(empty_graph):
"""Test adding edge creates nodes in empty graph with weight."""
empty_graph.add_edge('chicken', 'bacon')
assert 'chicken' and 'bacon' in empty_graph.nodes()
assert ('chicken', 'bacon') in empty_graph.edges()
def test_add_edge_new_nodes(three_node_with_two_edges_graph):
"""Test adding edge with two new nodes."""
three_node_with_two_edges_graph.add_edge('beans', 'tomato')
assert ('corn', 2) in three_node_with_two_edges_graph.edges()
assert (11, 'corn') in three_node_with_two_edges_graph.edges()
assert ('beans', 'tomato') in three_node_with_two_edges_graph.edges()
assert 'beans' and 'tomato' in three_node_with_two_edges_graph.nodes()
def test_add_edge_one_new_node(five_node_simple_nodes_five_edges_graph):
"""Test adding edge to one new node, one existing node."""
five_node_simple_nodes_five_edges_graph.add_edge('hi', 3)
assert (2, 3) in five_node_simple_nodes_five_edges_graph.edges()
assert (2, 'corn') in five_node_simple_nodes_five_edges_graph.edges()
assert (3, 2) in five_node_simple_nodes_five_edges_graph.edges()
assert ('meep', 10) in five_node_simple_nodes_five_edges_graph.edges()
assert ('hi', 3) in five_node_simple_nodes_five_edges_graph.edges()
assert 'hi' and 3 in five_node_simple_nodes_five_edges_graph.nodes()
def test_add_edge_existing_nodes(five_node_simple_nodes_five_edges_graph):
"""Test adding edge to two existing nodes."""
five_node_simple_nodes_five_edges_graph.add_edge(3, 'meep')
assert (2, 3) in five_node_simple_nodes_five_edges_graph.edges()
assert (2, 'corn') in five_node_simple_nodes_five_edges_graph.edges()
assert (3, 2) in five_node_simple_nodes_five_edges_graph.edges()
assert (3, 'meep') in five_node_simple_nodes_five_edges_graph.edges()
assert ('meep', 10) in five_node_simple_nodes_five_edges_graph.edges()
assert 3 and 'meep' in five_node_simple_nodes_five_edges_graph.nodes()
def test_delete_node_empty_graph_error(empty_graph):
"""Delete node on empty graph."""
with pytest.raises(ValueError):
empty_graph.del_node('delete')
def test_delete_node_graph_error(five_node_with_five_edges_graph):
"""Delete node on populated graph."""
with pytest.raises(ValueError):
five_node_with_five_edges_graph.del_node('delete')
def test_delete_node_graph(five_node_with_five_edges_graph):
"""Delete node on populated graph."""
five_node_with_five_edges_graph.del_node(2)
assert 2 not in five_node_with_five_edges_graph.nodes()
assert (2, 'corn', 3) not in five_node_with_five_edges_graph.edges()
assert (2, 11.1, 2) not in five_node_with_five_edges_graph.edges()
def test_delete_edge_empty_graph_error(empty_graph):
"""Delete node on empty graph."""
with pytest.raises(ValueError):
empty_graph.del_edge(1, 2)
def test_delete_edge_graph_error(five_node_with_five_edges_graph):
"""Delete node on empty graph."""
with pytest.raises(ValueError):
five_node_with_five_edges_graph.del_edge(1, 2)
def test_delete_edge_graph(five_node_simple_nodes_five_edges_graph):
"""Delete node on empty graph."""
assert (2, 'corn') in five_node_simple_nodes_five_edges_graph.edges()
five_node_simple_nodes_five_edges_graph.del_edge(2, 'corn')
assert (2, 'corn') not in five_node_simple_nodes_five_edges_graph.edges()
def test_neighbors_not_in_graph(five_node_with_five_edges_graph):
"""Test if the node asked for is in the graph to have neighbors."""
g = five_node_with_five_edges_graph
with pytest.raises(ValueError):
g.neighbors('cake')
def test_neighbors_two_nodes_in_graph(two_node_two_edge_graph):
"""Test that the queried node has one neighbor."""
g = two_node_two_edge_graph
assert g.neighbors(1) == [2]
def test_neighbors_five_nodes_in_graph(five_node_with_five_edges_graph):
"""Test that the queried node has two neighbors."""
g = five_node_with_five_edges_graph
assert g.neighbors(2) == [11.1, 'corn']
def test_adjacent_first_val_not_in_graph(five_node_with_five_edges_graph):
"""Test that the first val is not a node to be adjacent."""
g = five_node_with_five_edges_graph
with pytest.raises(ValueError):
g.adjacent('cake', 11.1)
def test_adjacent_second_val_not_in_graph(five_node_with_five_edges_graph):
"""Test that the second val is not a node to be adjacent."""
g = five_node_with_five_edges_graph
with pytest.raises(ValueError):
g.adjacent('corn', 'pie')
def test_adjacent_both_vals_not_in_graph(five_node_with_five_edges_graph):
"""Test that neither val is a node in the graph to be adjacent."""
g = five_node_with_five_edges_graph
with pytest.raises(ValueError):
g.adjacent('cake', 'pie')
def test_adjacent_two_nodes_in_graph(two_node_two_edge_graph):
"""Test two nodes are adjacent returns True."""
g = two_node_two_edge_graph
assert g.adjacent(1, 2) is True
def test_adjacent_five_nodes_in_graph(five_node_with_five_edges_graph):
"""Test two nodes are adjacent returns True twice."""
g = five_node_with_five_edges_graph
assert g.adjacent(2, 11.1) is True
assert g.adjacent(2, 'corn') is True
def test_depth_on_empty_graph(empty_graph):
"""Test a depth traersal on an empty graph."""
with pytest.raises(ValueError):
empty_graph.depth_first_traversal(3)
def test_breadth_on_empty_graph(empty_graph):
"""Test a breadth traersal on an empty graph."""
with pytest.raises(ValueError):
empty_graph.breadth_first_traversal(3)
def test_depth_non_node_non_empty_graph(two_node_no_edge_graph):
"""Test depth traversal with bad val on non empty graph."""
with pytest.raises(ValueError):
two_node_no_edge_graph.depth_first_traversal(6)
def test_breadth_non_node_non_empty_graph(two_node_no_edge_graph):
"""Test breadth traversal with bad val on non empty graph."""
with pytest.raises(ValueError):
two_node_no_edge_graph.breadth_first_traversal(6)
def test_depth_two_node_one_edge_graph(two_node_with_edge_graph):
"""Test depth traversal on two node graph with one edge."""
g = two_node_with_edge_graph
assert g.depth_first_traversal('corn') == ['corn', 'beans']
assert g.depth_first_traversal('beans') == ['beans']
def test_breadth_two_node_one_edge_graph(two_node_with_edge_graph):
"""Test breadth traversal on two node graph with one edge."""
g = two_node_with_edge_graph
assert g.breadth_first_traversal('corn') == ['corn', 'beans']
assert g.breadth_first_traversal('beans') == ['beans']
def test_depth_two_node_two_edge_graph(two_node_two_edge_graph):
"""Test depth traversal on two node graph with two edges."""
g = two_node_two_edge_graph
assert g.depth_first_traversal(1) == [1, 2]
assert g.depth_first_traversal(2) == [2, 1]
def test_breadth_two_node_two_edge_graph(two_node_two_edge_graph):
"""Test breadth traversal on two node graph with two edges."""
g = two_node_two_edge_graph
assert g.breadth_first_traversal(1) == [1, 2]
assert g.breadth_first_traversal(2) == [2, 1]
def test_depth_four_node_cyclical_graph(four_node_cyclical_graph):
"""Test depth traversal on four node cyclical graph."""
g = four_node_cyclical_graph
assert g.depth_first_traversal(1) == [1, 2, 3, 4]
assert g.depth_first_traversal(4) == [4, 1, 2, 3]
def test_breadth_four_node_cyclical_graph(four_node_cyclical_graph):
"""Test breadth traversal on four node cyclical graph."""
g = four_node_cyclical_graph
assert g.breadth_first_traversal(1) == [1, 2, 3, 4]
assert g.breadth_first_traversal(3) == [3, 4, 1, 2]
def test_depth_three_node_cyclical_graph(three_node_cyclical_graph):
"""Test depth traversal on three node cyclical graph."""
g = three_node_cyclical_graph
assert g.depth_first_traversal(1) == [1, 2, 3]
assert g.depth_first_traversal(3) == [3, 2, 1]
def test_breadth_three_node_cyclical_graph(three_node_cyclical_graph):
"""Test breadth traversal on three node cyclical graph."""
g = three_node_cyclical_graph
assert g.breadth_first_traversal(1) == [1, 2, 3]
assert g.breadth_first_traversal(2) == [2, 1, 3]
def test_depth_seven_node_heapish_graph(seven_node_heapish_graph):
"""Test depth traversal on seven node heap like, semi cyclical graph."""
g = seven_node_heapish_graph
assert g.depth_first_traversal(3) == [3, 1, 2, 4, 5, 6, 7]
assert g.depth_first_traversal(5) == [5, 6, 7]
assert g.depth_first_traversal(1) == [1, 2, 3, 4, 5, 6, 7]
assert g.depth_first_traversal(7) == [7, 5, 6]
assert g.depth_first_traversal(2) == [2, 3, 1, 5, 6, 7, 4]
def test_breadth_seven_node_heapish_graph(seven_node_heapish_graph):
"""Test breadth traversal on seven node heap like, semi cyclical graph."""
g = seven_node_heapish_graph
assert g.breadth_first_traversal(1) == [1, 2, 5, 3, 4, 6, 7]
assert g.breadth_first_traversal(5) == [5, 6, 7]
assert g.breadth_first_traversal(2) == [2, 3, 4, 1, 5, 6, 7]
assert g.breadth_first_traversal(7) == [7, 5, 6]
def test_adj_no_edge_first_to_second_val(five_node_with_five_edges_graph):
"""Test if there is no edge from val 1 to val 2."""
g = five_node_with_five_edges_graph
with pytest.raises(AssertionError):
g.adjacent(11.1, 2)
| CaHudson94/data-structures | src/Completed/test_graph_1.py | Python | mit | 17,203 |
__all__ = ['euler_tour', 'expression_tree', 'linked_binary_tree', 'traversal_examples']
| consultit/Ely | ely/direct/data_structures_and_algorithms/ch08/__init__.py | Python | lgpl-3.0 | 88 |
#!/usr/bin/env python3
import argparse
import itertools
import queue
import re
import subprocess
import sys
import threading
import time
from collections import deque
from collections import namedtuple
from io import BytesIO
import datadog
import logbook
import pcc
import requests
from PIL import Image
from attrdict import AttrDict as attrdict
# noinspection PyUnresolvedReferences
import signal
logger = logbook.Logger("pr0gramm-meta")
logger.info("initialize datadog metrics")
datadog.initialize()
stats = datadog.ThreadStats()
stats.start()
Item = namedtuple("Item", ["id", "promoted", "up", "down",
"created", "image", "thumb", "fullsize", "source", "flags",
"user", "mark"])
Tag = namedtuple("Tag", ["id", "item_id", "confidence", "tag"])
User = namedtuple("User", ["id", "name", "registered", "score"])
def metric_name(suffix):
return "pr0gramm.meta.update." + suffix
class SetQueue(queue.Queue):
"""This queue only contains unique values"""
def __init__(self, maxsize=0, key=lambda x: x):
super().__init__(maxsize)
self.keyfunc = key
# Initialize the queue representation
def _init(self, maxsize):
self.keys = set()
self.queue = deque()
def _qsize(self):
assert len(self.queue) == len(self.keys), "length of queue and keys not equal"
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
key = self.keyfunc(item)
if key not in self.keys:
self.keys.add(key)
self.queue.append(item)
# Get an item from the queue
def _get(self):
item = self.queue.popleft()
self.keys.remove(self.keyfunc(item))
return item
class UserSetQueue(SetQueue):
def __init__(self):
super().__init__(key=str.lower)
def _put(self, item):
stats.gauge(metric_name("queue.users"), len(self.keys), sample_rate=0.01)
super()._put(item)
def _get(self):
stats.gauge(metric_name("queue.users"), len(self.keys), sample_rate=0.01)
return super()._get()
# just put a user in this queue to download its details
user_queue = UserSetQueue()
def iterate_posts(start=None):
base_url = "http://pr0gramm.com/api/items/get?flags=7"
while True:
url = base_url + "&older=%d" % start if start else base_url
# :type: requests.Response
with stats.timer(metric_name("request.feed")):
response = requests.get(url)
response.raise_for_status()
json = response.json()
for item in json["items"]:
item = Item(**item)
start = min(start or item.id, item.id)
yield item
if json["atEnd"]:
break
def chunker(n, iterable):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
@stats.timed(metric_name("request.user"))
def get_user_details(name):
url = "http://pr0gramm.com/api/profile/info"
response = requests.get(url, params={"name": name, "flags": "1"})
content = response.json()
user = attrdict(content).user
# convert to named tuple
return User(user.id, user.name, user.registered, user.score)
def store_user_details(database, details):
with database, database.cursor() as cursor:
cursor.execute("INSERT INTO users VALUES (%s, %s, %s, %s)"
" ON CONFLICT(id) DO UPDATE SET score=%s",
list(details) + [details.score])
cursor.execute("INSERT INTO user_score VALUES (%s, %s, %s)",
[details.id, int(time.time()), details.score])
def update_user_details(dbpool):
while True:
user = user_queue.get()
try:
# noinspection PyTypeChecker
with dbpool.active() as database:
store_user_details(database, get_user_details(user))
time.sleep(1)
except IOError:
pass
@stats.timed(metric_name("request.size"), tags=["image"])
def get_image_size(image_url, size=1024):
# :type: requests.Response
response = requests.get(image_url, headers={"Range": "bytes=0-%d" % (size - 1)}, stream=True)
response.raise_for_status()
try:
image = Image.open(response.raw)
return image.size
finally:
response.close()
@stats.timed(metric_name("request.size"), tags=["video"])
def get_video_size(video_url, size=16 * 1024):
# :type: requests.Response
response = requests.get(video_url, headers={"Range": "bytes=0-%d" % (size - 1)})
response.raise_for_status()
# ask avprobe for the size of the image
process = subprocess.Popen(
["timeout", "10s", "ffprobe", "-"], shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate(response.content)
# and extract result from output
width, height = re.search(br"Stream.* ([0-9]+)x([0-9]+)", stdout + stderr).groups()
return int(width), int(height)
def get_item_url(item):
if item.image.startswith("//"):
return "http:" + item.image
if item.image.startswith("http"):
return item.image
elif item.image.endswith((".mp4", ".webm")) :
return "http://vid.pr0gramm.com/" + item.image
else:
return "http://img.pr0gramm.com/" + item.image
def get_item_size(item):
filename = item.image.lower()
url = get_item_url(item)
if filename.endswith((".jpg", ".jpeg", ".png", ".gif")):
for byte_count in [1024, 4096, 8192, 16 * 1024, 64 * 1024]:
try:
width, height = get_image_size(url, size=byte_count)
return width, height
except IOError:
pass
if filename.endswith(".webm"):
try:
width, height = get_video_size(url)
return width, height
except (OSError, IOError):
pass
raise Exception("Could not get size of item {}".format(item.id))
def get_item_ids_in_table(db, items, table):
ids = ",".join(str(item.id) for item in items)
query = "SELECT id FROM %s WHERE id IN (%s)" % (table, ids)
with db, db.cursor() as cursor:
cursor.execute(query)
return {item_id for item_id, in cursor}
def get_items_not_in_table(db, items, table):
items_tuple = tuple(items)
item_ids = get_item_ids_in_table(db, items_tuple, table)
return [item for item in items_tuple if item.id not in item_ids]
def update_item_sizes(database, items):
"""
Downloads sizes for a list of items.
:param database: A database connection to use for storing the items.
:param tuple[items] items: The items to process
"""
# get the items that need updates
for item in get_items_not_in_table(database, items, "sizes"):
# noinspection PyBroadException
try:
width, height = get_item_size(item)
except KeyboardInterrupt:
raise
except:
logger.exception()
continue
with database, database.cursor() as cursor:
cursor.execute("INSERT INTO sizes VALUES (%s, %s, %s)"
" ON CONFLICT(id) DO NOTHING", (item.id, width, height))
def update_item_previews(database, items):
# get the items that need updates
for item in get_items_not_in_table(database, items, "item_previews"):
# noinspection PyBroadException
try:
filename = item.image.lower()
url = get_item_url(item)
logger.debug("Update preview for {}", url)
# generate thumbnail
png_bytes = subprocess.check_output([
"timeout", "10s",
"ffmpeg", "-loglevel", "panic", "-y", "-i", url,
"-vf", "scale=8:-1", "-frames", "1",
"-f", "image2", "-vcodec", "png", "-"])
image = Image.open(BytesIO(png_bytes)).convert("RGB")
width, height = image.size
preview = bytearray()
for r, g, b in image.getdata():
# convert to rgb565
pixel = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3)
preview.append(pixel >> 8)
preview.append(pixel & 0xff)
with database, database.cursor() as cursor:
cursor.execute("INSERT INTO item_previews VALUES (%s, %s, %s, %s) ON CONFLICT(id) DO NOTHING",
(item.id, width, height, preview))
except KeyboardInterrupt:
raise
except:
logger.exception()
continue
def iter_item_tags(item):
url = "http://pr0gramm.com/api/items/info?itemId=%d" % item.id
# :type: requests.Response
response = requests.get(url)
response.raise_for_status()
info = response.json()
# enqueue the commenters names
for comment in info.get("comments", []):
user_queue.put(comment["name"])
for tag in info.get("tags", []):
yield Tag(tag["id"], item.id, tag["confidence"], tag["tag"])
def update_item_infos(database, items):
for item in items:
user_queue.put(item.user)
# noinspection PyBroadException
try:
with stats.timer(metric_name("request.info")):
tags = tuple(iter_item_tags(item))
except KeyboardInterrupt:
raise
except:
logger.warn("Could not get tags for item {}", item.id)
logger.exception()
continue
if tags:
tags = [list(tag) + [tag.confidence] for tag in tags]
stmt = "INSERT INTO tags (id, item_id, confidence, tag) VALUES (%s,%s,%s,%s)" \
" ON CONFLICT(id) DO UPDATE SET confidence=%s"
with database, database.cursor() as cursor:
cursor.executemany(stmt, tags)
@stats.timed(metric_name("db.store"))
def store_items(database, items):
"""
Stores the given items in the database. They will replace any previously stored items.
:param tuple[items] items: The items to process
"""
items = [list(item) + [item.up, item.down, item.mark] for item in items]
stmt = "INSERT INTO items VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)" \
" ON CONFLICT(id) DO UPDATE SET up=%s, down=%s, mark=%s"
with database, database.cursor() as cursor:
cursor.executemany(stmt, items)
def schedule(interval, name, func, *args, **kwargs):
while True:
start = time.time()
# noinspection PyBroadException
try:
logger.info("Calling scheduled function {} now", name)
func(*args, **kwargs)
duration = time.time() - start
logger.info("{} took {:1.2f}s to complete", name, duration)
except KeyboardInterrupt:
sys.exit(1)
except:
duration = time.time() - start
logger.exception("Ignoring error in scheduled function {} after {}", name, duration)
try:
time.sleep(interval)
except KeyboardInterrupt:
sys.exit(1)
def run(dbpool, *functions):
for items in chunker(16, iterate_posts()):
stop = True
age = (time.time() - items[0].created) / 3600
for min_age, max_age, function in functions:
if age < min_age:
stop = False
continue
if age > max_age:
continue
with dbpool.active() as database:
store_items(database, items)
function(database, items)
stop = False
if stop:
break
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--postgres", type=str, required=True, help="Postgres host")
return parser.parse_args()
def start(dbpool):
def start_in_thread(func, *args):
thread = threading.Thread(target=func, args=args, daemon=True)
thread.start()
return thread
yield start_in_thread(schedule, 1, "pr0gramm.meta.update.users", update_user_details, dbpool)
yield start_in_thread(schedule, 60, "pr0gramm.meta.update.sizes",
run, dbpool,
(0, 0.5, update_item_previews),
(0, 0.5, update_item_sizes),
(0, 0.5, update_item_infos))
yield start_in_thread(schedule, 600, "pr0gramm.meta.update.infos.new",
run, dbpool, (0, 6, update_item_infos))
yield start_in_thread(schedule, 3600, "pr0gramm.meta.update.infos.more",
run, dbpool, (5, 48, update_item_infos))
yield start_in_thread(schedule, 24 * 3600, "pr0gramm.meta.update.infos.week",
run, dbpool, (47, 24 * 7, update_item_infos))
def main():
args = parse_arguments()
logger.info("opening database at {}", args.postgres)
pool = pcc.RefreshingConnectionCache(
lifetime=600,
host=args.postgres, user="postgres", password="password", dbname="postgres")
try:
threads = tuple(start(pool))
for thread in threads:
thread.join()
except KeyboardInterrupt:
sys.exit(1)
if __name__ == '__main__':
file_handler = logbook.FileHandler("logfile.log", bubble=True)
with file_handler.applicationbound():
main()
| mopsalarm/pr0gramm-meta | update/main.py | Python | apache-2.0 | 13,461 |
#! /usr/bin/env python
"""
Script to split a dataset into chunks of equal time
"""
import argparse
from argparse import RawTextHelpFormatter
import casacore.tables as pt
import numpy as np
import sys
import os
import shutil
def main(dataset, blockl, local_dir=None, clobber=True):
"""
Split dataset into time chunks
Parameters
----------
dataset : str
Name of MS file to split
blockl : int
Number of time slots per chunk
local_dir : str, optional
Path to local directory for output of t1.copy(). The file is then
copied to the original output directory
clobber : bool, optional
If True, existing files are overwritten
"""
if type(clobber) is str:
if clobber.lower() == 'true':
clobber = True
else:
clobber = False
blockl = int(blockl)
if blockl < 1:
blockl = 1
# Get time per sample and number of samples
t = pt.table(dataset, readonly=True, ack=False)
for t2 in t.iter(["ANTENNA1","ANTENNA2"]):
if (t2.getcell('ANTENNA1',0)) < (t2.getcell('ANTENNA2',0)):
timepersample = t2[1]['TIME']-t2[0]['TIME'] # sec
nsamples = t2.nrows()
break
t.close()
nchunks = int(np.ceil((np.float(nsamples) / np.float(blockl))))
# Don't allow more than 15 chunks for performance reasons
while nchunks > 15:
blockl *= 2
nchunks = int(np.ceil((np.float(nsamples) / np.float(blockl))))
tlen = timepersample * np.float(blockl) / 3600.0 # length of block in hours
tobs = timepersample * nsamples / 3600.0 # length of obs in hours
# Copy to local directory if needed
dataset_original = dataset
if local_dir is not None:
dataset = os.path.join(local_dir, os.path.basename(dataset_original))
os.system('/usr/bin/rsync -a {0} {1}'.format(dataset_original, local_dir))
files = []
for c in range(nchunks):
chunk_file = '{0}_chunk{1}.ms'.format(os.path.splitext(dataset_original)[0], c)
files.append(chunk_file)
t0 = tlen * np.float(c) # hours
t1 = t0 + tlen # hours
if c == 0:
t0 = -0.1 # make sure first chunk gets first slot
if c == nchunks-1 and t1 < tobs:
t1 = tobs + 0.1 # make sure last chunk gets all that remains
split_ms(dataset, chunk_file, t0, t1, local_dir, clobber=clobber)
if local_dir is not None and not os.path.samefile(dataset, dataset_original):
shutil.rmtree(dataset)
return {'files': '[{0}]'.format(','.join(files))}
def split_ms(msin, msout, start_out, end_out, local_dir, clobber=True):
"""
Splits an MS between start and end times in hours relative to first time
Parameters
----------
msin : str
Name of MS file to split
msout : str
Name of output MS file
start_out : float
Start time in hours relative to first time
end_out : float
End time in hours relative to first time
local_dir : str
Path to local directory for output of t1.copy(). The file is then
copied to the original output directory
clobber : bool, optional
If True, existing files are overwritten
"""
if os.path.exists(msout):
if clobber:
os.system('rm -rf {0}'.format(msout))
else:
return
msout_original = msout
if local_dir is not None:
msout = os.path.join(local_dir, os.path.basename(msout_original))
if os.path.exists(msout):
os.system('rm -rf {0}'.format(msout))
t = pt.table(msin, ack=False)
starttime = t[0]['TIME']
t1 = t.query('TIME >= ' + str(starttime+start_out*3600.0) + ' && '
'TIME < ' + str(starttime+end_out*3600.0), sortlist='TIME,ANTENNA1,ANTENNA2')
t1.copy(msout, True)
t1.close()
t.close()
if local_dir is not None:
msout_destination_dir = os.path.dirname(msout_original)
os.system('/usr/bin/rsync -a {0} {1}'.format(msout, msout_destination_dir))
if not os.path.samefile(msout, msout_original):
shutil.rmtree(msout)
if __name__ == '__main__':
descriptiontext = "Chunk a dataset in time.\n"
parser = argparse.ArgumentParser(description=descriptiontext, formatter_class=RawTextHelpFormatter)
parser.add_argument('ms_filename', help='Dataset name')
parser.add_argument('-w', '--width', help='width of chunks in number of samples', type=int, default=10)
args = parser.parse_args()
main(args.ms_filename, blockl=args.width)
| revoltek/factor | factor/scripts/chunk_by_time.py | Python | gpl-2.0 | 4,561 |
# -*- coding: utf-8 -*-
from datetime import date,datetime,timedelta
from osv import osv, fields
from tools.translate import _
class WizardInvestmentCreation(osv.osv):
_name = 'wizard.generationkwh.investment.creation'
def do_create(self, cursor, uid, ids, context=None):
""" Do selected action"""
if context is None:
context = {}
Investment = self.pool.get('generationkwh.investment')
Emission = self.pool.get('generationkwh.emission')
wiz = self.browse(cursor, uid, ids[0], context=context)
partner_id = int(wiz.partner_id_alt.id)
amount_in_e = float(wiz.amount_in_euros)
ip = str(wiz.ip)
iban = str(wiz.iban)
emission_id = int(wiz.emission_id.id)
investment_id = []
creation_errors = ''
start = datetime.now()
try:
emission_code = Emission.read(cursor, uid, emission_id, ['code'])['code']
#Compatibility 'emissio_apo'
investment_id = Investment.create_from_form(cursor, uid,
partner_id, wiz.order_date, amount_in_e, ip, iban, emission_code,
context)
except Exception as e:
creation_errors = str(e)
end = datetime.now()
result = ""
result += "Creació: \t\t{}\n".format(end - start)
result += "\n"
if investment_id:
next_state = 'done'
new_invest = Investment.read(cursor, uid, investment_id, ['name'])
result += "Inversió creada amb nom: {} - id: {}\n".format(
new_invest['name'],
investment_id
)
else:
next_state = 'init'
result += "Error en creació:\t"+ creation_errors +"\n"
result += "\n"
result += "Dades d'entrada\n"
result += "Partner_id : \t{}\n".format(partner_id)
result += "Order date : \t{}\n".format(wiz.order_date)
result += "Amount : \t\t{}\n".format(amount_in_e)
result += "IP : \t\t\t\t{}\n".format(ip)
result += "IBAN : \t\t\t{}\n".format(iban)
result += "Emission : \t\t\t{}\n".format(emission_id)
wiz.write({'info': result , 'state': next_state}, context=context)
_columns = {
'state': fields.char('State', size=16),
'info': fields.text('Info'),
'partner_id_alt': fields.many2one(
'res.partner',
'Titular',
domain=[('category_id.name','=','Soci')],
required=True,
),
'amount_in_euros': fields.float(
'Quantitat aportada',
required=True,
),
'ip': fields.char("Ip d'origen",size=16,required=True),
'iban': fields.char('Compte iban',size=35,required=True),
'order_date': fields.date(
'Data de comanda',
required=True
),
'emission_id': fields.many2one(
'generationkwh.emission',
'Emissió',
domain=[],
required=True,
),
}
_defaults = {
'state': lambda *a: 'init',
'info': lambda *a: '',
'order_date': lambda *a: str(datetime.today()),
'amount_in_euros': lambda *a: 0.0,
'ip': lambda *a: "0.0.0.0",
'iban': lambda *a: ""
}
WizardInvestmentCreation()
| Som-Energia/somenergia-generationkwh | som_generationkwh/wizard/wizard_investment_creation.py | Python | agpl-3.0 | 3,362 |
# -*- coding: utf-8 -*-
# © 2016 Eficent Business and IT Consulting Services S.L.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from . import mass_create
| factorlibre/stock-logistics-warehouse | stock_valuation_account_manual_adjustment/wizards/__init__.py | Python | agpl-3.0 | 180 |
#! /usr/bin/env python
"""GUI interface to webchecker.
This works as a Grail applet too! E.g.
<APPLET CODE=wcgui.py NAME=CheckerWindow></APPLET>
Checkpoints are not (yet??? ever???) supported.
User interface:
Enter a root to check in the text entry box. To enter more than one root,
enter them one at a time and press <Return> for each one.
Command buttons Start, Stop and "Check one" govern the checking process in
the obvious way. Start and "Check one" also enter the root from the text
entry box if one is present. There's also a check box (enabled by default)
to decide whether actually to follow external links (since this can slow
the checking down considerably). Finally there's a Quit button.
A series of checkbuttons determines whether the corresponding output panel
is shown. List panels are also automatically shown or hidden when their
status changes between empty to non-empty. There are six panels:
Log -- raw output from the checker (-v, -q affect this)
To check -- links discovered but not yet checked
Checked -- links that have been checked
Bad links -- links that failed upon checking
Errors -- pages containing at least one bad link
Details -- details about one URL; double click on a URL in any of
the above list panels (not in Log) will show details
for that URL
Use your window manager's Close command to quit.
Command line options:
-m bytes -- skip HTML pages larger than this size (default %(MAXPAGE)d)
-q -- quiet operation (also suppresses external links report)
-v -- verbose operation; repeating -v will increase verbosity
-t root -- specify root dir which should be treated as internal (can repeat)
-a -- don't check name anchors
Command line arguments:
rooturl -- URL to start checking
(default %(DEFROOT)s)
XXX The command line options (-m, -q, -v) should be GUI accessible.
XXX The roots should be visible as a list (?).
XXX The multipanel user interface is clumsy.
"""
# ' Emacs bait
import sys
import getopt
from Tkinter import *
import tktools
import webchecker
import random
# Override some for a weaker platform
if sys.platform == 'mac':
webchecker.DEFROOT = "http://grail.cnri.reston.va.us/"
webchecker.MAXPAGE = 50000
webchecker.verbose = 4
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 't:m:qva')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print __doc__%vars(webchecker)
sys.exit(2)
webchecker.verbose = webchecker.VERBOSE
webchecker.nonames = webchecker.NONAMES
webchecker.maxpage = webchecker.MAXPAGE
extra_roots = []
for o, a in opts:
if o == '-m':
webchecker.maxpage = int(a)
if o == '-q':
webchecker.verbose = 0
if o == '-v':
webchecker.verbose = webchecker.verbose + 1
if o == '-t':
extra_roots.append(a)
if o == '-a':
webchecker.nonames = not webchecker.nonames
root = Tk(className='Webchecker')
root.protocol("WM_DELETE_WINDOW", root.quit)
c = CheckerWindow(root)
c.setflags(verbose=webchecker.verbose, maxpage=webchecker.maxpage,
nonames=webchecker.nonames)
if args:
for arg in args[:-1]:
c.addroot(arg)
c.suggestroot(args[-1])
# Usually conditioned on whether external links
# will be checked, but since that's not a command
# line option, just toss them in.
for url_root in extra_roots:
# Make sure it's terminated by a slash,
# so that addroot doesn't discard the last
# directory component.
if url_root[-1] != "/":
url_root = url_root + "/"
c.addroot(url_root, add_to_do = 0)
root.mainloop()
class CheckerWindow(webchecker.Checker):
def __init__(self, parent, root=webchecker.DEFROOT):
self.__parent = parent
self.__topcontrols = Frame(parent)
self.__topcontrols.pack(side=TOP, fill=X)
self.__label = Label(self.__topcontrols, text="Root URL:")
self.__label.pack(side=LEFT)
self.__rootentry = Entry(self.__topcontrols, width=60)
self.__rootentry.pack(side=LEFT)
self.__rootentry.bind('<Return>', self.enterroot)
self.__rootentry.focus_set()
self.__controls = Frame(parent)
self.__controls.pack(side=TOP, fill=X)
self.__running = 0
self.__start = Button(self.__controls, text="Run", command=self.start)
self.__start.pack(side=LEFT)
self.__stop = Button(self.__controls, text="Stop", command=self.stop,
state=DISABLED)
self.__stop.pack(side=LEFT)
self.__step = Button(self.__controls, text="Check one",
command=self.step)
self.__step.pack(side=LEFT)
self.__cv = BooleanVar(parent)
self.__cv.set(self.checkext)
self.__checkext = Checkbutton(self.__controls, variable=self.__cv,
command=self.update_checkext,
text="Check nonlocal links",)
self.__checkext.pack(side=LEFT)
self.__reset = Button(self.__controls, text="Start over", command=self.reset)
self.__reset.pack(side=LEFT)
if __name__ == '__main__': # No Quit button under Grail!
self.__quit = Button(self.__controls, text="Quit",
command=self.__parent.quit)
self.__quit.pack(side=RIGHT)
self.__status = Label(parent, text="Status: initial", anchor=W)
self.__status.pack(side=TOP, fill=X)
self.__checking = Label(parent, text="Idle", anchor=W)
self.__checking.pack(side=TOP, fill=X)
self.__mp = mp = MultiPanel(parent)
sys.stdout = self.__log = LogPanel(mp, "Log")
self.__todo = ListPanel(mp, "To check", self, self.showinfo)
self.__done = ListPanel(mp, "Checked", self, self.showinfo)
self.__bad = ListPanel(mp, "Bad links", self, self.showinfo)
self.__errors = ListPanel(mp, "Pages w/ bad links", self, self.showinfo)
self.__details = LogPanel(mp, "Details")
self.root_seed = None
webchecker.Checker.__init__(self)
if root:
root = str(root).strip()
if root:
self.suggestroot(root)
self.newstatus()
def reset(self):
webchecker.Checker.reset(self)
for p in self.__todo, self.__done, self.__bad, self.__errors:
p.clear()
if self.root_seed:
self.suggestroot(self.root_seed)
def suggestroot(self, root):
self.__rootentry.delete(0, END)
self.__rootentry.insert(END, root)
self.__rootentry.select_range(0, END)
self.root_seed = root
def enterroot(self, event=None):
root = self.__rootentry.get()
root = root.strip()
if root:
self.__checking.config(text="Adding root "+root)
self.__checking.update_idletasks()
self.addroot(root)
self.__checking.config(text="Idle")
try:
i = self.__todo.items.index(root)
except (ValueError, IndexError):
pass
else:
self.__todo.list.select_clear(0, END)
self.__todo.list.select_set(i)
self.__todo.list.yview(i)
self.__rootentry.delete(0, END)
def start(self):
self.__start.config(state=DISABLED, relief=SUNKEN)
self.__stop.config(state=NORMAL)
self.__step.config(state=DISABLED)
self.enterroot()
self.__running = 1
self.go()
def stop(self):
self.__stop.config(state=DISABLED, relief=SUNKEN)
self.__running = 0
def step(self):
self.__start.config(state=DISABLED)
self.__step.config(state=DISABLED, relief=SUNKEN)
self.enterroot()
self.__running = 0
self.dosomething()
def go(self):
if self.__running:
self.__parent.after_idle(self.dosomething)
else:
self.__checking.config(text="Idle")
self.__start.config(state=NORMAL, relief=RAISED)
self.__stop.config(state=DISABLED, relief=RAISED)
self.__step.config(state=NORMAL, relief=RAISED)
__busy = 0
def dosomething(self):
if self.__busy: return
self.__busy = 1
if self.todo:
l = self.__todo.selectedindices()
if l:
i = l[0]
else:
i = 0
self.__todo.list.select_set(i)
self.__todo.list.yview(i)
url = self.__todo.items[i]
self.__checking.config(text="Checking "+self.format_url(url))
self.__parent.update()
self.dopage(url)
else:
self.stop()
self.__busy = 0
self.go()
def showinfo(self, url):
d = self.__details
d.clear()
d.put("URL: %s\n" % self.format_url(url))
if self.bad.has_key(url):
d.put("Error: %s\n" % str(self.bad[url]))
if url in self.roots:
d.put("Note: This is a root URL\n")
if self.done.has_key(url):
d.put("Status: checked\n")
o = self.done[url]
elif self.todo.has_key(url):
d.put("Status: to check\n")
o = self.todo[url]
else:
d.put("Status: unknown (!)\n")
o = []
if (not url[1]) and self.errors.has_key(url[0]):
d.put("Bad links from this page:\n")
for triple in self.errors[url[0]]:
link, rawlink, msg = triple
d.put(" HREF %s" % self.format_url(link))
if self.format_url(link) != rawlink: d.put(" (%s)" %rawlink)
d.put("\n")
d.put(" error %s\n" % str(msg))
self.__mp.showpanel("Details")
for source, rawlink in o:
d.put("Origin: %s" % source)
if rawlink != self.format_url(url):
d.put(" (%s)" % rawlink)
d.put("\n")
d.text.yview("1.0")
def setbad(self, url, msg):
webchecker.Checker.setbad(self, url, msg)
self.__bad.insert(url)
self.newstatus()
def setgood(self, url):
webchecker.Checker.setgood(self, url)
self.__bad.remove(url)
self.newstatus()
def newlink(self, url, origin):
webchecker.Checker.newlink(self, url, origin)
if self.done.has_key(url):
self.__done.insert(url)
elif self.todo.has_key(url):
self.__todo.insert(url)
self.newstatus()
def markdone(self, url):
webchecker.Checker.markdone(self, url)
self.__done.insert(url)
self.__todo.remove(url)
self.newstatus()
def seterror(self, url, triple):
webchecker.Checker.seterror(self, url, triple)
self.__errors.insert((url, ''))
self.newstatus()
def newstatus(self):
self.__status.config(text="Status: "+self.status())
self.__parent.update()
def update_checkext(self):
self.checkext = self.__cv.get()
class ListPanel:
def __init__(self, mp, name, checker, showinfo=None):
self.mp = mp
self.name = name
self.showinfo = showinfo
self.checker = checker
self.panel = mp.addpanel(name)
self.list, self.frame = tktools.make_list_box(
self.panel, width=60, height=5)
self.list.config(exportselection=0)
if showinfo:
self.list.bind('<Double-Button-1>', self.doubleclick)
self.items = []
def clear(self):
self.items = []
self.list.delete(0, END)
self.mp.hidepanel(self.name)
def doubleclick(self, event):
l = self.selectedindices()
if l:
self.showinfo(self.items[l[0]])
def selectedindices(self):
l = self.list.curselection()
if not l: return []
return map(int, l)
def insert(self, url):
if url not in self.items:
if not self.items:
self.mp.showpanel(self.name)
# (I tried sorting alphabetically, but the display is too jumpy)
i = len(self.items)
self.list.insert(i, self.checker.format_url(url))
self.list.yview(i)
self.items.insert(i, url)
def remove(self, url):
try:
i = self.items.index(url)
except (ValueError, IndexError):
pass
else:
was_selected = i in self.selectedindices()
self.list.delete(i)
del self.items[i]
if not self.items:
self.mp.hidepanel(self.name)
elif was_selected:
if i >= len(self.items):
i = len(self.items) - 1
self.list.select_set(i)
class LogPanel:
def __init__(self, mp, name):
self.mp = mp
self.name = name
self.panel = mp.addpanel(name)
self.text, self.frame = tktools.make_text_box(self.panel, height=10)
self.text.config(wrap=NONE)
def clear(self):
self.text.delete("1.0", END)
self.text.yview("1.0")
def put(self, s):
self.text.insert(END, s)
if '\n' in s:
self.text.yview(END)
def write(self, s):
self.text.insert(END, s)
if '\n' in s:
self.text.yview(END)
self.panel.update()
class MultiPanel:
def __init__(self, parent):
self.parent = parent
self.frame = Frame(self.parent)
self.frame.pack(expand=1, fill=BOTH)
self.topframe = Frame(self.frame, borderwidth=2, relief=RAISED)
self.topframe.pack(fill=X)
self.botframe = Frame(self.frame)
self.botframe.pack(expand=1, fill=BOTH)
self.panelnames = []
self.panels = {}
def addpanel(self, name, on=0):
v = StringVar(self.parent)
if on:
v.set(name)
else:
v.set("")
check = Checkbutton(self.topframe, text=name,
offvalue="", onvalue=name, variable=v,
command=self.checkpanel)
check.pack(side=LEFT)
panel = Frame(self.botframe)
label = Label(panel, text=name, borderwidth=2, relief=RAISED, anchor=W)
label.pack(side=TOP, fill=X)
t = v, check, panel
self.panelnames.append(name)
self.panels[name] = t
if on:
panel.pack(expand=1, fill=BOTH)
return panel
def showpanel(self, name):
v, check, panel = self.panels[name]
v.set(name)
panel.pack(expand=1, fill=BOTH)
def hidepanel(self, name):
v, check, panel = self.panels[name]
v.set("")
panel.pack_forget()
def checkpanel(self):
for name in self.panelnames:
v, check, panel = self.panels[name]
panel.pack_forget()
for name in self.panelnames:
v, check, panel = self.panels[name]
if v.get():
panel.pack(expand=1, fill=BOTH)
if __name__ == '__main__':
main()
| OS2World/APP-INTERNET-torpak_2 | Tools/webchecker/wcgui.py | Python | mit | 15,291 |
"""
Tests for the models that configures Edit LTI fields feature.
"""
from contextlib import contextmanager
import ddt
from django.test import TestCase
from edx_django_utils.cache import RequestCache
from opaque_keys.edx.locator import CourseLocator
from xblock_config.models import CourseEditLTIFieldsEnabledFlag
@contextmanager
def lti_consumer_fields_editing_flag(course_id, enabled_for_course=False):
"""
Yields CourseEditLTIFieldsEnabledFlag record for unit tests
Arguments:
course_id (CourseLocator): course locator to control this feature for.
enabled_for_course (bool): whether feature is enabled for 'course_id'
"""
RequestCache.clear_all_namespaces()
CourseEditLTIFieldsEnabledFlag.objects.create(course_id=course_id, enabled=enabled_for_course)
yield
@ddt.ddt
class TestLTIConsumerHideFieldsFlag(TestCase):
"""
Tests the behavior of the flags for lti consumer fields' editing feature.
These are set via Django admin settings.
"""
def setUp(self):
super(TestLTIConsumerHideFieldsFlag, self).setUp()
self.course_id = CourseLocator(org="edx", course="course", run="run")
@ddt.data(
(True, True),
(True, False),
(False, True),
(False, False),
)
@ddt.unpack
def test_lti_fields_editing_feature_flags(self, enabled_for_course, is_already_sharing_learner_info):
"""
Test that feature flag works correctly with course-specific configuration in combination with
a boolean which indicates whether a course-run already sharing learner username/email - given
the course-specific configuration record is present.
"""
with lti_consumer_fields_editing_flag(
course_id=self.course_id,
enabled_for_course=enabled_for_course
):
feature_enabled = CourseEditLTIFieldsEnabledFlag.lti_access_to_learners_editable(
self.course_id,
is_already_sharing_learner_info,
)
self.assertEqual(feature_enabled, enabled_for_course)
@ddt.data(True, False)
def test_lti_fields_editing_is_backwards_compatible(self, is_already_sharing_learner_info):
"""
Test that feature flag works correctly with a boolean which indicates whether a course-run already
sharing learner username/email - given the course-specific configuration record is not set previously.
This tests the backward compatibility which currently is: if an existing course run is already
sharing learner information then this feature should be enabled for that course run by default.
"""
feature_enabled = CourseEditLTIFieldsEnabledFlag.lti_access_to_learners_editable(
self.course_id,
is_already_sharing_learner_info,
)
feature_flag_created = CourseEditLTIFieldsEnabledFlag.objects.filter(course_id=self.course_id).exists()
self.assertEqual(feature_flag_created, is_already_sharing_learner_info)
self.assertEqual(feature_enabled, is_already_sharing_learner_info)
def test_enable_disable_course_flag(self):
"""
Ensures that the flag, once enabled for a course, can also be disabled.
"""
with lti_consumer_fields_editing_flag(
course_id=self.course_id,
enabled_for_course=True
):
self.assertTrue(CourseEditLTIFieldsEnabledFlag.lti_access_to_learners_editable(self.course_id, False))
with lti_consumer_fields_editing_flag(
course_id=self.course_id,
enabled_for_course=False
):
self.assertFalse(CourseEditLTIFieldsEnabledFlag.lti_access_to_learners_editable(self.course_id, False))
| cpennington/edx-platform | cms/djangoapps/xblock_config/tests/test_models.py | Python | agpl-3.0 | 3,776 |
###############################################################################
##
## Copyright 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
## don't touch: must be first import!
import choosereactor
import os, json, sys, pkg_resources
from twisted.internet import reactor
from twisted.python import log, usage
from twisted.internet.defer import Deferred
## for versions
import autobahn
import autobahntestsuite
from autobahn.utf8validator import Utf8Validator
from autobahn.xormasker import XorMaskerNull
## WebSocket testing modes
import testee
import fuzzing
## WAMP testing modes
import wamptestee
import wampfuzzing
## Misc testing modes
import echo
import broadcast
import massconnect
import wsperfcontrol
import wsperfmaster
from spectemplate import SPEC_FUZZINGSERVER, \
SPEC_FUZZINGCLIENT, \
SPEC_FUZZINGWAMPSERVER, \
SPEC_FUZZINGWAMPCLIENT, \
SPEC_WSPERFCONTROL, \
SPEC_MASSCONNECT
class WsTestOptions(usage.Options):
"""
Reads options from the command-line and checks them for plausibility.
"""
# Available modes, specified with the --mode (or short: -m) flag.
MODES = ['echoserver',
'echoclient',
'broadcastclient',
'broadcastserver',
'fuzzingserver',
'fuzzingclient',
'fuzzingwampserver',
'fuzzingwampclient',
'testeeserver',
'testeeclient',
'wsperfcontrol',
'wsperfmaster',
'wampserver',
'wamptesteeserver',
'wampclient',
'massconnect',
'web',
'import',
'export']
# Modes that need a specification file
MODES_NEEDING_SPEC = ['fuzzingclient',
'fuzzingserver',
'fuzzingwampserver',
'fuzzingwampclient',
'wsperfcontrol',
'massconnect',
'import']
# Modes that need a Websocket URI
MODES_NEEDING_WSURI = ['echoclient',
'echoserver',
'broadcastclient',
'broadcastserver',
'testeeclient',
'testeeserver',
'wsperfcontrol',
'wampserver',
'wampclient',
'wamptesteeserver']
# Default content of specification files for various modes
DEFAULT_SPECIFICATIONS = {'fuzzingclient': SPEC_FUZZINGCLIENT,
'fuzzingserver': SPEC_FUZZINGSERVER,
'wsperfcontrol': SPEC_WSPERFCONTROL,
'massconnect': SPEC_MASSCONNECT,
'fuzzingwampclient': SPEC_FUZZINGWAMPCLIENT,
'fuzzingwampserver': SPEC_FUZZINGWAMPSERVER}
optParameters = [
['mode', 'm', None, 'Test mode, one of: %s [required]' % ', '.join(MODES)],
['testset', 't', None, 'Run a test set from an import test spec.'],
['spec', 's', None, 'Test specification file [required in some modes].'],
['wsuri', 'w', None, 'WebSocket URI [required in some modes].'],
['ident', 'i', None, ('Testee client identifier [optional for client testees].')],
['key', 'k', None, ('Server private key file for secure WebSocket (WSS) [required in server modes for WSS].')],
['cert', 'c', None, ('Server certificate file for secure WebSocket (WSS) [required in server modes for WSS].')]
]
optFlags = [
['debug', 'd', 'Debug output [default: off].'],
['autobahnversion', 'a', 'Print version information for Autobahn and AutobahnTestSuite.']
]
def postOptions(self):
"""
Process the given options. Perform plausibility checks, etc...
"""
if self['autobahnversion']:
print "Autobahn %s" % autobahn.version
print "AutobahnTestSuite %s" % autobahntestsuite.version
sys.exit(0)
if not self['mode']:
raise usage.UsageError, "a mode must be specified to run!"
if self['mode'] not in WsTestOptions.MODES:
raise usage.UsageError, (
"Mode '%s' is invalid.\nAvailable modes:\n\t- %s" % (
self['mode'], "\n\t- ".join(sorted(WsTestOptions.MODES))))
if (self['mode'] in WsTestOptions.MODES_NEEDING_WSURI and not self['wsuri']):
raise usage.UsageError, "mode needs a WebSocket URI!"
class WsTestRunner(object):
"""
Testsuite driver.
"""
def __init__(self, options, spec = None):
self.options = options
self.spec = spec
self.debug = self.options.get('debug', False)
if self.debug:
log.startLogging(sys.stdout)
self.mode = str(self.options['mode'])
def startService(self):
"""
Start mode specific services.
"""
print
print "Using Twisted reactor class %s" % str(reactor.__class__)
print "Using UTF8 Validator class %s" % str(Utf8Validator)
print "Using XOR Masker classes %s" % str(XorMaskerNull)
print "Using JSON processor module '%s'" % str(autobahn.wamp.json_lib.__name__)
print
if self.mode == "import":
return self.startImportSpec(self.options['spec'])
elif self.mode == "export":
return self.startExportSpec(self.options['testset'], self.options.get('spec', None))
elif self.mode == "fuzzingwampclient":
return self.startFuzzingWampClient(self.options['testset'])
elif self.mode == "web":
return self.startWeb(debug = self.debug)
elif self.mode == "testeeclient":
return testee.startClient(self.options['wsuri'], ident = self.options['ident'], debug = self.debug)
elif self.mode == "testeeserver":
return testee.startServer(self.options['wsuri'], debug = self.debug)
elif self.mode == "broadcastclient":
return broadcast.startClient(self.options['wsuri'], debug = self.debug)
elif self.mode == "broadcastserver":
return broadcast.startServer(self.options['wsuri'], debug = self.debug)
elif self.mode == "echoclient":
return echo.startClient(self.options['wsuri'], debug = self.debug)
elif self.mode == "echoserver":
return echo.startServer(self.options['wsuri'], debug = self.debug)
elif self.mode == "fuzzingclient":
return fuzzing.startClient(self.spec, debug = self.debug)
elif self.mode == "fuzzingserver":
return fuzzing.startServer(self.spec, debug = self.debug)
elif self.mode == "wsperfcontrol":
return wsperfcontrol.startClient(self.options['wsuri'], self.spec, debug = self.debug)
elif self.mode == "wsperfmaster":
return wsperfmaster.startServer(debug = self.debug)
elif self.mode == "massconnect":
return massconnect.startClient(self.spec, debug = self.debug)
else:
raise Exception("no mode '%s'" % self.mode)
def start(options, spec = None):
"""
Actually startup a wstest run.
:param options: Global options controlling wstest.
:type options: dict
:param spec: Test specification needed for certain modes. If none is given, but
a spec is needed, a default spec is used.
:type spec: dict
"""
if options['mode'] in WsTestOptions.MODES_NEEDING_SPEC and spec is None:
spec = json.loads(WsTestOptions.DEFAULT_SPECIFICATIONS[options['mode']])
wstest = WsTestRunner(options, spec)
res = wstest.startService()
## only start reactor for modes needing it
##
if res:
## if mode wants to shutdown reactor after done (e.g. clients),
## hook up machinery to do so
##
if isinstance(res, Deferred):
def shutdown(_):
reactor.stop()
res.addBoth(shutdown)
reactor.run()
def run():
"""
Run wstest from command line. This parses command line args etc.
"""
## parse wstest command lines options
##
cmdOpts = WsTestOptions()
try:
cmdOpts.parseOptions()
except usage.UsageError, errortext:
print '%s %s\n' % (sys.argv[0], errortext)
print 'Try %s --help for usage details\n' % sys.argv[0]
sys.exit(1)
else:
options = cmdOpts.opts
## check if mode needs a spec ..
##
if options['mode'] in WsTestOptions.MODES_NEEDING_SPEC:
## .. if none was given ..
##
if not options['spec']:
## .. assume canonical specfile name ..
##
filename = "%s.json" % options['mode']
options['spec'] = filename
if not os.path.isfile(filename):
## .. if file does not exist, autocreate a spec file
##
content = WsTestOptions.DEFAULT_SPECIFICATIONS[options['mode']]
print "Auto-generating spec file '%s'" % filename
f = open(filename, 'w')
f.write(content)
f.close()
else:
## .. use existing one
##
print "Using implicit spec file '%s'" % filename
else:
## use explicitly given specfile
##
print "Using explicit spec file '%s'" % options['spec']
## now load the spec ..
##
spec_filename = os.path.abspath(options['spec'])
print "Loading spec from %s" % spec_filename
spec = json.loads(open(spec_filename).read())
else:
## mode does not rely on spec
##
spec = None
## now start a wstest run ..
##
start(options, spec)
if __name__ == '__main__':
run()
| normanmaurer/autobahntestsuite-maven-plugin | src/main/resources/autobahntestsuite/wstest.py | Python | apache-2.0 | 10,679 |
# encoding:utf-8
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# $Id $
#------------------------------------------------------------------------
#
# libaccess
#
#------------------------------------------------------------------------
register(GENERAL,
id = 'libaccess',
name = "Generic DB Access lib",
description = _("Provides a library for generic access to "
"the database and gen.lib."),
version = '1.0.29',
gramps_target_version = "5.1",
status = STABLE, # not yet tested with python 3
fname = 'libaccess.py',
authors = ["Doug Blank"],
authors_email = ["doug.blank@gmail.com"],
load_on_reg = True
)
| gramps-project/addons-source | libaccess/libaccess.gpr.py | Python | gpl-2.0 | 1,400 |
#!/usr/bin/python
# -*-coding:utf-8-*-
import random
USER_AGENT_LIST = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0']
class RandomUserAgentMiddleware(object):
''' Use a random user-agent for each request '''
def process_request(self, request, spider):
ua = random.choice(USER_AGENT_LIST)
if 'payload' in request.meta:
payload = request.meta['payload']
if 'User-Agent' in request.headers:
if payload == request.headers['User-Agent']:
return
request.headers.setdefault('User-Agent', ua)
request.meta['UA'] = ua
class CustomDownloaderMiddleware(object):
"""
this middleware allow spider to crawl the spicific domain url in google caches.
you can define the GOOGLE_CACHE_DOMAINS in settings,it is a list which you want to
visit the google cache.Or you can define a google_cache_domains in your spider and it
is as the highest priority.
"""
def __init__(self):
super(CustomDownloaderMiddleware, self).__init__()
@classmethod
def from_crawler(cls, crawler):
return cls()
def _cache_domains(self, spider):
return ""
def process_request(self, request, spider):
"""the scrapy documention said that:
If it returns a Request object, the returned request will be rescheduled (in the
Scheduler) to be downloaded in the future. The callback of the original request
will always be called. If the new request has a callback it will be called with the
response downloaded, and the output of that callback will then be passed to
the original callback. If the new request doesn’t have a callback, the response
downloaded will be just passed to the original request callback.
but actually is that if it returns a Request object,then the original request will be
droped,so you must make sure that the new request object's callback is the original callback.
"""
# gcd = self.cache[spider]
# if gcd:
# if urlparse(request.url).netloc in gcd:
# request = request.replace(url=self.google_cache + request.url)
# request.meta['google_cache'] = True
# return request
pass
def process_response(self, request, response, spider):
status = response.status
# if request.meta.get('google_cache', False):
# return response.replace(url=response.url[len(self.google_cache):])
return response
| trujunzhang/djzhang-targets | cwpoliticl/cwpoliticl/middlewares.py | Python | mit | 3,217 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import six
from heat.common import exception
from heat.engine.clients.os import nova
from heat.engine.resources.openstack.nova import nova_keypair
from heat.engine import scheduler
from heat.tests import common
from heat.tests.nova import fakes as fakes_nova
from heat.tests import utils
class NovaKeyPairTest(common.HeatTestCase):
kp_template = {
"heat_template_version": "2013-05-23",
"resources": {
"kp": {
"type": "OS::Nova::KeyPair",
"properties": {
"name": "key_pair"
}
}
}
}
def setUp(self):
super(NovaKeyPairTest, self).setUp()
self.fake_nova = self.m.CreateMockAnything()
self.fake_keypairs = self.m.CreateMockAnything()
self.fake_nova.keypairs = self.fake_keypairs
def _mock_key(self, name, pub=None, priv=None):
mkey = self.m.CreateMockAnything()
mkey.id = name
mkey.name = name
if pub:
mkey.public_key = pub
if priv:
mkey.private_key = priv
return mkey
def _get_test_resource(self, template):
self.stack = utils.parse_stack(template)
definition = self.stack.t.resource_definitions(self.stack)['kp']
kp_res = nova_keypair.KeyPair('kp', definition, self.stack)
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
nova.NovaClientPlugin._create().AndReturn(self.fake_nova)
return kp_res
def _get_mock_kp_for_create(self, key_name, public_key=None,
priv_saved=False):
template = copy.deepcopy(self.kp_template)
template['resources']['kp']['properties']['name'] = key_name
props = template['resources']['kp']['properties']
if public_key:
props['public_key'] = public_key
gen_pk = public_key or "generated test public key"
nova_key = self._mock_key(key_name, gen_pk)
if priv_saved:
nova_key.private_key = "private key for %s" % key_name
props['save_private_key'] = True
kp_res = self._get_test_resource(template)
self.fake_keypairs.create(key_name,
public_key=public_key).AndReturn(nova_key)
return kp_res, nova_key
def test_create_key(self):
"""Test basic create."""
key_name = "generate_no_save"
tp_test, created_key = self._get_mock_kp_for_create(key_name)
self.fake_keypairs.get(key_name).AndReturn(created_key)
self.m.ReplayAll()
scheduler.TaskRunner(tp_test.create)()
self.assertEqual("", tp_test.FnGetAtt('private_key'))
self.assertEqual("generated test public key",
tp_test.FnGetAtt('public_key'))
self.assertEqual((tp_test.CREATE, tp_test.COMPLETE), tp_test.state)
self.assertEqual(tp_test.resource_id, created_key.name)
self.m.VerifyAll()
def test_create_key_empty_name(self):
"""Test creation of a keypair whose name is of length zero."""
key_name = ""
template = copy.deepcopy(self.kp_template)
template['resources']['kp']['properties']['name'] = key_name
stack = utils.parse_stack(template)
definition = stack.t.resource_definitions(stack)['kp']
kp_res = nova_keypair.KeyPair('kp', definition, stack)
self.m.ReplayAll()
error = self.assertRaises(exception.StackValidationFailed,
kp_res.validate)
self.assertIn("Property error", six.text_type(error))
self.assertIn("kp.properties.name: length (0) is out of "
"range (min: 1, max: 255)", six.text_type(error))
self.m.VerifyAll()
def test_create_key_excess_name_length(self):
"""Test creation of a keypair whose name is of excess length."""
key_name = 'k' * 256
template = copy.deepcopy(self.kp_template)
template['resources']['kp']['properties']['name'] = key_name
stack = utils.parse_stack(template)
definition = stack.t.resource_definitions(stack)['kp']
kp_res = nova_keypair.KeyPair('kp', definition, stack)
self.m.ReplayAll()
error = self.assertRaises(exception.StackValidationFailed,
kp_res.validate)
self.assertIn("Property error", six.text_type(error))
self.assertIn("kp.properties.name: length (256) is out of "
"range (min: 1, max: 255)", six.text_type(error))
self.m.VerifyAll()
def test_delete_key(self):
"""Test basic delete."""
test_res = self._get_test_resource(self.kp_template)
test_res.resource_id = "key_name"
test_res.state_set(test_res.CREATE, test_res.COMPLETE)
self.fake_keypairs.delete("key_name").AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(test_res.delete)()
self.assertEqual((test_res.DELETE, test_res.COMPLETE), test_res.state)
self.m.VerifyAll()
def test_check_key(self):
res = self._get_test_resource(self.kp_template)
res.nova = mock.Mock()
scheduler.TaskRunner(res.check)()
self.assertEqual((res.CHECK, res.COMPLETE), res.state)
def test_check_key_fail(self):
res = self._get_test_resource(self.kp_template)
res.nova = mock.Mock()
res.nova().keypairs.get.side_effect = Exception("boom")
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(res.check))
self.assertIn("boom", six.text_type(exc))
self.assertEqual((res.CHECK, res.FAILED), res.state)
def test_delete_key_not_found(self):
"""Test delete non-existent key."""
test_res = self._get_test_resource(self.kp_template)
test_res.resource_id = "key_name"
test_res.state_set(test_res.CREATE, test_res.COMPLETE)
(self.fake_keypairs.delete("key_name")
.AndRaise(fakes_nova.fake_exception()))
self.m.ReplayAll()
scheduler.TaskRunner(test_res.delete)()
self.assertEqual((test_res.DELETE, test_res.COMPLETE), test_res.state)
self.m.VerifyAll()
def test_create_pub(self):
"""Test create using existing pub key."""
key_name = "existing_key"
pk = "test_create_pub"
tp_test, created_key = self._get_mock_kp_for_create(key_name,
public_key=pk)
self.m.ReplayAll()
scheduler.TaskRunner(tp_test.create)()
self.assertEqual("", tp_test.FnGetAtt('private_key'))
self.assertEqual("test_create_pub",
tp_test.FnGetAtt('public_key'))
self.assertEqual((tp_test.CREATE, tp_test.COMPLETE), tp_test.state)
self.assertEqual(tp_test.resource_id, created_key.name)
self.m.VerifyAll()
def test_save_priv_key(self):
"""Test a saved private key."""
key_name = "save_private"
tp_test, created_key = self._get_mock_kp_for_create(key_name,
priv_saved=True)
self.fake_keypairs.get(key_name).AndReturn(created_key)
self.m.ReplayAll()
scheduler.TaskRunner(tp_test.create)()
self.assertEqual("private key for save_private",
tp_test.FnGetAtt('private_key'))
self.assertEqual("generated test public key",
tp_test.FnGetAtt('public_key'))
self.assertEqual((tp_test.CREATE, tp_test.COMPLETE), tp_test.state)
self.assertEqual(tp_test.resource_id, created_key.name)
self.m.VerifyAll()
| rdo-management/heat | heat/tests/test_nova_keypair.py | Python | apache-2.0 | 8,308 |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-present, CloudZero, Inc. All rights reserved.
# Licensed under the BSD-style license. See LICENSE file in the project root for full license information.
"""
Helper functions that reduce boilerplate in plugin tests
"""
import attrdict
from unittest.mock import PropertyMock, Mock, MagicMock
from reactor.aws.provider import Provider
def create_mock_resource(metadata):
resource = Mock()
resource.meta = PropertyMock()
resource.meta.data = PropertyMock()
resource.meta.data = metadata
return resource
def create_mock_account_context():
mock_account_context = attrdict.AttrMap()
mock_provider = MagicMock(spec=Provider)()
mock_provider.resource = PropertyMock()
mock_provider.client = PropertyMock()
mock_account_context.namespace = 'test-ns'
mock_account_context.environment_id = 'my-env-id'
mock_account_context.reactor_account_id = '098765432109'
mock_account_context.connected_account_id = '123456789012'
mock_account_context.aws_provider = mock_provider
mock_account_context.account_details = {
'region': 'us-east-2'
}
return mock_account_context
| Cloudzero/cloudzero-reactor-aws | test/unit/features/accounts/probe_plugins/plugin_test_helpers.py | Python | bsd-3-clause | 1,177 |
"""Augmentations."""
from pylint.checkers.base import DocStringChecker, NameChecker
from pylint.checkers.design_analysis import MisdesignChecker
from pylint.checkers.classes import ClassChecker
from pylint.checkers.newstyle import NewStyleConflictChecker
from pylint.checkers.variables import VariablesChecker
from astroid import InferenceError, Getattr
from astroid.nodes import Class, From
from astroid.scoped_nodes import Class as ScopedClass, Module
from pylint.checkers.typecheck import TypeChecker
from pylint_django.utils import node_is_subclass, PY3
from pylint_plugin_utils import augment_visit, suppress_message
def ignore_import_warnings_for_related_fields(orig_method, self, node):
"""
Replaces the leave_module method on the VariablesChecker class to
prevent unused-import warnings which are caused by the ForeignKey
and OneToOneField transformations. By replacing the nodes in the
AST with their type rather than the django field, imports of the
form 'from django.db.models import OneToOneField' raise an unused-import
warning
"""
to_consume = self._to_consume[0] # pylint: disable=W0212
# we can disable this warning ('Access to a protected member _to_consume of a client class')
# as it's not actually a client class, but rather, this method is being monkey patched
# onto the class and so the access is valid
new_things = {}
iterat = to_consume[0].items if PY3 else to_consume[0].iteritems
for name, stmts in iterat():
if isinstance(stmts[0], From):
if any([n[0] in ('ForeignKey', 'OneToOneField') for n in stmts[0].names]):
continue
new_things[name] = stmts
new_consume = (new_things,) + to_consume[1:]
self._to_consume = [new_consume] # pylint: disable=W0212
return orig_method(self, node)
def foreign_key_sets(chain, node):
"""
When a Django model has a ForeignKey to another model, the target
of the foreign key gets a '<modelname>_set' attribute for accessing
a queryset of the model owning the foreign key - eg:
class ModelA(models.Model):
pass
class ModelB(models.Model):
a = models.ForeignKey(ModelA)
Now, ModelA instances will have a modelb_set attribute.
It's also possible to explicitly name the relationship using the related_name argument
to the ForeignKey constructor. As it's impossible to know this without inspecting all
models before processing, we'll instead do a "best guess" approach and see if the attribute
being accessed goes on to be used as a queryset. This is via 'duck typing': if the method
called on the attribute being accessed is something we might find in a queryset, we'll
warn.
"""
quack = False
# Note: it would have been nice to import the Manager object from Django and
# get its attributes that way - and this used to be the method - but unfortunately
# there's no guarantee that Django is properly configured at that stage, and importing
# anything from the django.db package causes an ImproperlyConfigured exception.
# Therefore we'll fall back on a hard-coded list of attributes which won't be as accurate,
# but this is not 100% accurate anyway.
manager_attrs = (
'none',
'all',
'count',
'dates',
'distinct',
'extra',
'get',
'get_or_create',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'latest',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
)
if node.attrname in manager_attrs or node.attrname.endswith('_set'):
# if this is a X_set method, that's a pretty strong signal that this is the default
# Django name, rather than one set by related_name
quack = True
else:
# we will
if isinstance(node.parent, Getattr):
func_name = getattr(node.parent, 'attrname', None)
if func_name in manager_attrs:
quack = True
if quack:
children = list(node.get_children())
for child in children:
try:
inferred = child.infered()
except InferenceError:
pass
else:
for cls in inferred:
if (node_is_subclass(
cls, 'django.db.models.manager.Manager') or
node_is_subclass(cls, 'django.db.models.base.Model')):
# This means that we are looking at a subclass of models.Model
# and something is trying to access a <something>_set attribute.
# Since this could exist, we will return so as not to raise an
# error.
return
chain()
def foreign_key_ids(chain, node):
if node.attrname.endswith('_id'):
return
chain()
def is_model_admin_subclass(node):
"""Checks that node is derivative of ModelAdmin class."""
if node.name[-5:] != 'Admin' or isinstance(node.parent, Class):
return False
return node_is_subclass(node, 'django.contrib.admin.options.ModelAdmin')
def is_model_media_subclass(node):
"""Checks that node is derivative of Media class."""
if node.name != 'Media' or not isinstance(node.parent, Class):
return False
parents = ('django.contrib.admin.options.ModelAdmin',
'django.forms.widgets.Media',
'django.db.models.base.Model',
'django.forms.forms.Form',
'django.forms.models.ModelForm')
return any([node_is_subclass(node.parent, parent) for parent in parents])
def is_model_meta_subclass(node):
"""Checks that node is derivative of Meta class."""
if node.name != 'Meta' or not isinstance(node.parent, Class):
return False
parents = ('django.db.models.base.Model',
'django.forms.forms.Form',
'django.forms.models.ModelForm',
'rest_framework.serializers.ModelSerializer',
'rest_framework.generics.GenericAPIView',
'rest_framework.viewsets.ReadOnlyModelViewSet',
'rest_framework.viewsets.ModelViewSet',
'django_filters.filterset.FilterSet',)
return any([node_is_subclass(node.parent, parent) for parent in parents])
def is_model_mpttmeta_subclass(node):
"""Checks that node is derivative of MPTTMeta class."""
if node.name != 'MPTTMeta' or not isinstance(node.parent, Class):
return False
parents = ('django.db.models.base.Model',
'django.forms.forms.Form',
'django.forms.models.ModelForm')
return any([node_is_subclass(node.parent, parent) for parent in parents])
def is_model_test_case_subclass(node):
"""Checks that node is derivative of TestCase class."""
if not node.name.endswith('Test') and not isinstance(node.parent, Class):
return False
return node_is_subclass(node, 'django.test.testcases.TestCase')
def is_model_view_subclass_method_shouldnt_be_function(node):
"""Checks that node is get or post method of the View class."""
if node.name not in ('get', 'post'):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
subclass = '.View'
return parent is not None and parent.name.endswith('View') and node_is_subclass(parent, subclass)
def is_model_view_subclass_unused_argument(node):
"""
Checks that node is get or post method of the View class and it has valid arguments.
TODO: Bad checkings, need to be more smart.
"""
if not is_model_view_subclass_method_shouldnt_be_function(node):
return False
return 'request' in node.argnames()
def is_model_field_display_method(node):
"""Accept model's fields with get_*_display names."""
if not node.attrname.endswith('_display'):
return
if not node.attrname.startswith('get_'):
return
if node.last_child():
# TODO: could validate the names of the fields on the model rather than
# blindly accepting get_*_display
try:
for cls in node.last_child().infered():
if node_is_subclass(cls, 'django.db.models.base.Model'):
return True
except InferenceError:
return False
return False
def is_model_media_valid_attributes(node):
"""Suppress warnings for valid attributes of Media class."""
if node.name not in ('js', ):
return False
parent = node.parent
while parent and not isinstance(parent, ScopedClass):
parent = parent.parent
if parent is None or parent.name != "Media":
return False
return True
def is_templatetags_module_valid_constant(node):
"""Suppress warnings for valid constants in templatetags module."""
if node.name not in ('register', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if "templatetags." not in parent.name:
return False
return True
def is_urls_module_valid_constant(node):
"""Suppress warnings for valid constants in urls module."""
if node.name not in ('urlpatterns', ):
return False
parent = node.parent
while not isinstance(parent, Module):
parent = parent.parent
if not parent.name.endswith('urls'):
return False
return True
def is_class(class_name):
"""Shortcut for node_is_subclass."""
return lambda node: node_is_subclass(node, class_name)
def wrap(orig_method, with_method):
def wrap_func(*args, **kwargs):
with_method(orig_method, *args, **kwargs)
return wrap_func
def apply_augmentations(linter):
"""Apply augmentation and suppression rules."""
augment_visit(linter, TypeChecker.visit_getattr, foreign_key_sets)
augment_visit(linter, TypeChecker.visit_getattr, foreign_key_ids)
suppress_message(linter, TypeChecker.visit_getattr, 'E1101', is_model_field_display_method)
# formviews have too many ancestors, there's nothing the user of the library can do about that
suppress_message(linter, MisdesignChecker.visit_class, 'R0901', is_class('django.views.generic.edit.FormView'))
# model forms have no __init__ method anywhere in their bases
suppress_message(linter, ClassChecker.visit_class, 'W0232', is_class('django.forms.models.ModelForm'))
# forms implement __getitem__ but not __len__, thus raising a "Badly implemented container" warning which
# we will suppress.
suppress_message(linter, MisdesignChecker.leave_class, 'R0924', is_class('django.forms.forms.Form'))
suppress_message(linter, MisdesignChecker.leave_class, 'R0924', is_class('django.forms.models.ModelForm'))
# Meta
suppress_message(linter, DocStringChecker.visit_class, 'C0111', is_model_meta_subclass)
suppress_message(linter, NewStyleConflictChecker.visit_class, 'C1001', is_model_meta_subclass)
suppress_message(linter, ClassChecker.visit_class, 'W0232', is_model_meta_subclass)
suppress_message(linter, MisdesignChecker.leave_class, 'R0903', is_model_meta_subclass)
# Media
suppress_message(linter, NameChecker.visit_assname, 'C0103', is_model_media_valid_attributes)
suppress_message(linter, DocStringChecker.visit_class, 'C0111', is_model_media_subclass)
suppress_message(linter, NewStyleConflictChecker.visit_class, 'C1001', is_model_media_subclass)
suppress_message(linter, ClassChecker.visit_class, 'W0232', is_model_media_subclass)
suppress_message(linter, MisdesignChecker.leave_class, 'R0903', is_model_media_subclass)
# Too few public methods started appearing for Views and Models as part of Pylint>=1.4 / astroid>=1.3.3
# Not sure why, suspect this is a failure to get the parent classes somewhere
# For now, just suppress it on models and views
suppress_message(linter, MisdesignChecker.leave_class, 'R0903', is_class('django.db.models.base.Model'))
# TODO: why does this not work with the fqn of 'View'? Must be something to do with the overriding and transforms
suppress_message(linter, MisdesignChecker.leave_class, 'R0903', is_class('.View'))
# Admin
# Too many public methods (40+/20)
# TODO: Count public methods of django.contrib.admin.options.ModelAdmin and increase
# MisdesignChecker.config.max_public_methods to this value to count only user' methods.
#nb_public_methods = 0
#for method in node.methods():
# if not method.name.startswith('_'):
# nb_public_methods += 1
suppress_message(linter, MisdesignChecker.leave_class, 'R0904', is_model_admin_subclass)
# Tests
suppress_message(linter, MisdesignChecker.leave_class, 'R0904', is_model_test_case_subclass)
# View
# Method could be a function (get, post)
suppress_message(linter, ClassChecker.leave_function, 'R0201', is_model_view_subclass_method_shouldnt_be_function)
# Unused argument 'request' (get, post)
suppress_message(linter, VariablesChecker.leave_function, 'W0613', is_model_view_subclass_unused_argument)
# django-mptt
suppress_message(linter, DocStringChecker.visit_class, 'C0111', is_model_mpttmeta_subclass)
suppress_message(linter, NewStyleConflictChecker.visit_class, 'C1001', is_model_mpttmeta_subclass)
suppress_message(linter, ClassChecker.visit_class, 'W0232', is_model_mpttmeta_subclass)
suppress_message(linter, MisdesignChecker.leave_class, 'R0903', is_model_mpttmeta_subclass)
# ForeignKey and OneToOneField
VariablesChecker.leave_module = wrap(VariablesChecker.leave_module, ignore_import_warnings_for_related_fields)
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/pylint_django/augmentations/__init__.py | Python | agpl-3.0 | 14,036 |
""" Simple module to add InsertableOrderedDict, variant of OrderedDict with insertafter method """
# pylint: disable=E1101
# disable error while accessing private member of OrderedDict
from collections import OrderedDict
class InsertableOrderedDict(OrderedDict):
""" OrderedDict extending class which adds method to insert new key at arbitary position """
def insertafter(self, afterkey, key, value, dict_setitem=dict.__setitem__):
# Each link is stored as a list of length three: [0=PREV, 1=NEXT, 2=KEY].
if afterkey is not None:
if afterkey not in self:
raise KeyError('Cannot insert new value after not-existing key \'{0}\''.format(afterkey))
node = self._OrderedDict__map[afterkey]
else:
node = self._OrderedDict__root
node_next = node[1]
if key in self:
del self[key]
node[1] = node_next[0] = self._OrderedDict__map[key] = [node, node_next, key]
dict_setitem(self, key, value)
| pecet/pytosg | TwitterStatsLib/InsertableOrderedDict.py | Python | mit | 1,030 |
"""
A decorator for caching properties in classes.
Examples:
>>> class Foo(object):
... @cached_property
... def bar(self):
... print("This message only print once")
... return None
>>> foo = Foo()
>>> foo.bar
This message only print once
>>> foo.bar
"""
import functools
def cached_property(func):
attr = '_cached_' + func.__name__
@property
@functools.wraps(func)
def decorator(self, *args, **kwargs):
if not hasattr(self, attr):
setattr(self, attr, func(self, *args, **kwargs))
return getattr(self, attr)
return decorator
| wp-lai/xmachinelearning | utilities/cached_property.py | Python | mit | 638 |
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared Response Model (SRM)
The implementations are based on the following publications:
.. [Chen2015] "A Reduced-Dimension fMRI Shared Response Model",
P.-H. Chen, J. Chen, Y. Yeshurun-Dishon, U. Hasson, J. Haxby, P. Ramadge
Advances in Neural Information Processing Systems (NIPS), 2015.
http://papers.nips.cc/paper/5855-a-reduced-dimension-fmri-shared-response-model
.. [Anderson2016] "Enabling Factor Analysis on Thousand-Subject Neuroimaging
Datasets",
Michael J. Anderson, Mihai Capotă, Javier S. Turek, Xia Zhu, Theodore L.
Willke, Yida Wang, Po-Hsuan Chen, Jeremy R. Manning, Peter J. Ramadge,
Kenneth A. Norman,
IEEE International Conference on Big Data, 2016.
https://doi.org/10.1109/BigData.2016.7840719
"""
# Authors: Po-Hsuan Chen (Princeton Neuroscience Institute) and Javier Turek
# (Intel Labs), 2015
import logging
import numpy as np
import scipy
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import assert_all_finite
from sklearn.exceptions import NotFittedError
from mpi4py import MPI
import sys
__all__ = [
"SRM", "DetSRM"
]
logger = logging.getLogger(__name__)
def _init_w_transforms(data, features, random_states, comm=MPI.COMM_SELF):
"""Initialize the mappings (Wi) for the SRM with random orthogonal matrices.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
features : int
The number of features in the model.
random_states : list of `RandomState`s
One `RandomState` instance per subject.
comm : mpi4py.MPI.Intracomm
The MPI communicator containing the data
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The initialized orthogonal transforms (mappings) :math:`W_i` for each
subject.
voxels : list of int
A list with the number of voxels per subject.
Note
----
This function assumes that the numpy random number generator was
initialized.
Not thread safe.
"""
w = []
subjects = len(data)
voxels = np.empty(subjects, dtype=int)
# Set Wi to a random orthogonal voxels by features matrix
for subject in range(subjects):
if data[subject] is not None:
voxels[subject] = data[subject].shape[0]
rnd_matrix = random_states[subject].random_sample((
voxels[subject], features))
q, r = np.linalg.qr(rnd_matrix)
w.append(q)
else:
voxels[subject] = 0
w.append(None)
voxels = comm.allreduce(voxels, op=MPI.SUM)
return w, voxels
class SRM(BaseEstimator, TransformerMixin):
"""Probabilistic Shared Response Model (SRM)
Given multi-subject data, factorize it as a shared response S among all
subjects and an orthogonal transform W per subject:
.. math:: X_i \\approx W_i S, \\forall i=1 \\dots N
Parameters
----------
n_iter : int, default: 10
Number of iterations to run the algorithm.
features : int, default: 50
Number of features to compute.
rand_seed : int, default: 0
Seed for initializing the random number generator.
comm : mpi4py.MPI.Intracomm
The MPI communicator containing the data
Attributes
----------
w_ : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) for each subject.
s_ : array, shape=[features, samples]
The shared response.
sigma_s_ : array, shape=[features, features]
The covariance of the shared response Normal distribution.
mu_ : list of array, element i has shape=[voxels_i]
The voxel means over the samples for each subject.
rho2_ : array, shape=[subjects]
The estimated noise variance :math:`\\rho_i^2` for each subject
comm : mpi4py.MPI.Intracomm
The MPI communicator containing the data
random_state_: `RandomState`
Random number generator initialized using rand_seed
Note
----
The number of voxels may be different between subjects. However, the
number of samples must be the same across subjects.
The probabilistic Shared Response Model is approximated using the
Expectation Maximization (EM) algorithm proposed in [Chen2015]_. The
implementation follows the optimizations published in [Anderson2016]_.
This is a single node version.
The run-time complexity is :math:`O(I (V T K + V K^2 + K^3))` and the
memory complexity is :math:`O(V T)` with I - the number of iterations,
V - the sum of voxels from all subjects, T - the number of samples, and
K - the number of features (typically, :math:`V \\gg T \\gg K`).
"""
def __init__(self, n_iter=10, features=50, rand_seed=0,
comm=MPI.COMM_SELF):
self.n_iter = n_iter
self.features = features
self.rand_seed = rand_seed
self.comm = comm
return
def fit(self, X, y=None):
"""Compute the probabilistic Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
y : not used
"""
logger.info('Starting Probabilistic SRM')
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects "
"({0:d}) to train the model.".format(len(X)))
# Check for input data sizes
number_subjects = len(X)
number_subjects_vec = self.comm.allgather(number_subjects)
for rank in range(self.comm.Get_size()):
if number_subjects_vec[rank] != number_subjects:
raise ValueError(
"Not all ranks have same number of subjects")
# Collect size information
shape0 = np.zeros((number_subjects,), dtype=np.int)
shape1 = np.zeros((number_subjects,), dtype=np.int)
for subject in range(number_subjects):
if X[subject] is not None:
assert_all_finite(X[subject])
shape0[subject] = X[subject].shape[0]
shape1[subject] = X[subject].shape[1]
shape0 = self.comm.allreduce(shape0, op=MPI.SUM)
shape1 = self.comm.allreduce(shape1, op=MPI.SUM)
# Check if all subjects have same number of TRs
number_trs = np.min(shape1)
for subject in range(number_subjects):
if shape1[subject] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
if shape1[subject] != number_trs:
raise ValueError("Different number of samples between subjects"
".")
# Run SRM
self.sigma_s_, self.w_, self.mu_, self.rho2_, self.s_ = self._srm(X)
return self
def transform(self, X, y=None):
"""Use the model to transform matrix to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
note that number of voxels and samples can vary across subjects
y : not used (as it is unsupervised learning)
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
s = [None] * len(X)
for subject in range(len(X)):
if X[subject] is not None:
s[subject] = self.w_[subject].T.dot(X[subject])
return s
def _init_structures(self, data, subjects):
"""Initializes data structures for SRM and preprocess the data.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
subjects : int
The total number of subjects in `data`.
Returns
-------
x : list of array, element i has shape=[voxels_i, samples]
Demeaned data for each subject.
mu : list of array, element i has shape=[voxels_i]
Voxel means over samples, per subject.
rho2 : array, shape=[subjects]
Noise variance :math:`\\rho^2` per subject.
trace_xtx : array, shape=[subjects]
The squared Frobenius norm of the demeaned data in `x`.
"""
x = []
mu = []
rho2 = np.zeros(subjects)
trace_xtx = np.zeros(subjects)
for subject in range(subjects):
rho2[subject] = 1
if data[subject] is not None:
mu.append(np.mean(data[subject], 1))
trace_xtx[subject] = np.sum(data[subject] ** 2)
x.append(data[subject] - mu[subject][:, np.newaxis])
else:
mu.append(None)
trace_xtx[subject] = 0
x.append(None)
return x, mu, rho2, trace_xtx
def _likelihood(self, chol_sigma_s_rhos, log_det_psi, chol_sigma_s,
trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x,
samples):
"""Calculate the log-likelihood function
Parameters
----------
chol_sigma_s_rhos : array, shape=[features, features]
Cholesky factorization of the matrix (Sigma_S + sum_i(1/rho_i^2)
* I)
log_det_psi : float
Determinant of diagonal matrix Psi (containing the rho_i^2 value
voxels_i times).
chol_sigma_s : array, shape=[features, features]
Cholesky factorization of the matrix Sigma_S
trace_xt_invsigma2_x : float
Trace of :math:`\\sum_i (||X_i||_F^2/\\rho_i^2)`
inv_sigma_s_rhos : array, shape=[features, features]
Inverse of :math:`(\\Sigma_S + \\sum_i(1/\\rho_i^2) * I)`
wt_invpsi_x : array, shape=[features, samples]
samples : int
The total number of samples in the data.
Returns
-------
loglikehood : float
The log-likelihood value.
"""
log_det = (np.log(np.diag(chol_sigma_s_rhos) ** 2).sum() + log_det_psi
+ np.log(np.diag(chol_sigma_s) ** 2).sum())
loglikehood = -0.5 * samples * log_det - 0.5 * trace_xt_invsigma2_x
loglikehood += 0.5 * np.trace(
wt_invpsi_x.T.dot(inv_sigma_s_rhos).dot(wt_invpsi_x))
# + const --> -0.5*nTR*nvoxel*subjects*math.log(2*math.pi)
return loglikehood
def _srm(self, data):
"""Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
sigma_s : array, shape=[features, features]
The covariance :math:`\\Sigma_s` of the shared response Normal
distribution.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
mu : list of array, element i has shape=[voxels_i]
The voxel means :math:`\\mu_i` over the samples for each subject.
rho2 : array, shape=[subjects]
The estimated noise variance :math:`\\rho_i^2` for each subject
s : array, shape=[features, samples]
The shared response.
"""
local_min = min([d.shape[1] for d in data if d is not None],
default=sys.maxsize)
samples = self.comm.allreduce(local_min, op=MPI.MIN)
subjects = len(data)
self.random_state_ = np.random.RandomState(self.rand_seed)
random_states = [
np.random.RandomState(self.random_state_.randint(2**32))
for i in range(len(data))]
# Initialization step: initialize the outputs with initial values,
# voxels with the number of voxels in each subject, and trace_xtx with
# the ||X_i||_F^2 of each subject.
w, voxels = _init_w_transforms(data, self.features, random_states,
self.comm)
x, mu, rho2, trace_xtx = self._init_structures(data, subjects)
shared_response = np.zeros((self.features, samples))
sigma_s = np.identity(self.features)
rank = self.comm.Get_rank()
# Main loop of the algorithm (run
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# E-step:
# Sum the inverted the rho2 elements for computing W^T * Psi^-1 * W
if rank == 0:
rho0 = (1 / rho2).sum()
# Invert Sigma_s using Cholesky factorization
(chol_sigma_s, lower_sigma_s) = scipy.linalg.cho_factor(
sigma_s, check_finite=False)
inv_sigma_s = scipy.linalg.cho_solve(
(chol_sigma_s, lower_sigma_s), np.identity(self.features),
check_finite=False)
# Invert (Sigma_s + rho_0 * I) using Cholesky factorization
sigma_s_rhos = inv_sigma_s + np.identity(self.features) * rho0
chol_sigma_s_rhos, lower_sigma_s_rhos = \
scipy.linalg.cho_factor(sigma_s_rhos,
check_finite=False)
inv_sigma_s_rhos = scipy.linalg.cho_solve(
(chol_sigma_s_rhos, lower_sigma_s_rhos),
np.identity(self.features), check_finite=False)
# Compute the sum of W_i^T * rho_i^-2 * X_i, and the sum of traces
# of X_i^T * rho_i^-2 * X_i
wt_invpsi_x = np.zeros((self.features, samples))
trace_xt_invsigma2_x = 0.0
for subject in range(subjects):
if data[subject] is not None:
wt_invpsi_x += (w[subject].T.dot(x[subject])) \
/ rho2[subject]
trace_xt_invsigma2_x += trace_xtx[subject] / rho2[subject]
wt_invpsi_x = self.comm.reduce(wt_invpsi_x, op=MPI.SUM)
trace_xt_invsigma2_x = self.comm.reduce(trace_xt_invsigma2_x,
op=MPI.SUM)
trace_sigma_s = None
if rank == 0:
log_det_psi = np.sum(np.log(rho2) * voxels)
# Update the shared response
shared_response = sigma_s.dot(
np.identity(self.features) - rho0 * inv_sigma_s_rhos).dot(
wt_invpsi_x)
# M-step
# Update Sigma_s and compute its trace
sigma_s = (inv_sigma_s_rhos
+ shared_response.dot(shared_response.T) / samples)
trace_sigma_s = samples * np.trace(sigma_s)
shared_response = self.comm.bcast(shared_response)
trace_sigma_s = self.comm.bcast(trace_sigma_s)
# Update each subject's mapping transform W_i and error variance
# rho_i^2
for subject in range(subjects):
if x[subject] is not None:
a_subject = x[subject].dot(shared_response.T)
perturbation = np.zeros(a_subject.shape)
np.fill_diagonal(perturbation, 0.001)
u_subject, s_subject, v_subject = np.linalg.svd(
a_subject + perturbation, full_matrices=False)
w[subject] = u_subject.dot(v_subject)
rho2[subject] = trace_xtx[subject]
rho2[subject] += -2 * np.sum(w[subject] * a_subject).sum()
rho2[subject] += trace_sigma_s
rho2[subject] /= samples * voxels[subject]
else:
rho2[subject] = 0
rho2 = self.comm.allreduce(rho2, op=MPI.SUM)
if rank == 0:
if logger.isEnabledFor(logging.INFO):
# Calculate and log the current log-likelihood for checking
# convergence
loglike = self._likelihood(
chol_sigma_s_rhos, log_det_psi, chol_sigma_s,
trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x,
samples)
logger.info('Objective function %f' % loglike)
sigma_s = self.comm.bcast(sigma_s)
return sigma_s, w, mu, rho2, shared_response
class DetSRM(BaseEstimator, TransformerMixin):
"""Deterministic Shared Response Model (DetSRM)
Given multi-subject data, factorize it as a shared response S among all
subjects and an orthogonal transform W per subject:
.. math:: X_i \\approx W_i S, \\forall i=1 \\dots N
Parameters
----------
n_iter : int, default: 10
Number of iterations to run the algorithm.
features : int, default: 50
Number of features to compute.
rand_seed : int, default: 0
Seed for initializing the random number generator.
Attributes
----------
w_ : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) for each subject.
s_ : array, shape=[features, samples]
The shared response.
random_state_: `RandomState`
Random number generator initialized using rand_seed
Note
----
The number of voxels may be different between subjects. However, the
number of samples must be the same across subjects.
The Deterministic Shared Response Model is approximated using the
Block Coordinate Descent (BCD) algorithm proposed in [Chen2015]_.
This is a single node version.
The run-time complexity is :math:`O(I (V T K + V K^2))` and the memory
complexity is :math:`O(V T)` with I - the number of iterations, V - the
sum of voxels from all subjects, T - the number of samples, K - the
number of features (typically, :math:`V \\gg T \\gg K`), and N - the
number of subjects.
"""
def __init__(self, n_iter=10, features=50, rand_seed=0):
self.n_iter = n_iter
self.features = features
self.rand_seed = rand_seed
return
def fit(self, X, y=None):
"""Compute the Deterministic Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
y : not used
"""
logger.info('Starting Deterministic SRM')
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects "
"({0:d}) to train the model.".format(len(X)))
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of samples between subjects"
".")
# Run SRM
self.w_, self.s_ = self._srm(X)
return self
def transform(self, X, y=None):
"""Use the model to transform data to the Shared Response subspace
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject.
y : not used
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
s = [None] * len(X)
for subject in range(len(X)):
s[subject] = self.w_[subject].T.dot(X[subject])
return s
def _objective_function(self, data, w, s):
"""Calculate the objective function
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response
Returns
-------
objective : float
The objective function value.
"""
subjects = len(data)
objective = 0.0
for m in range(subjects):
objective += \
np.linalg.norm(data[m] - w[m].dot(s), 'fro')**2
return objective * 0.5 / data[0].shape[1]
def _compute_shared_response(self, data, w):
""" Compute the shared response S
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
s : array, shape=[features, samples]
The shared response for the subjects data with the mappings in w.
"""
s = np.zeros((w[0].shape[1], data[0].shape[1]))
for m in range(len(w)):
s = s + w[m].T.dot(data[m])
s /= len(w)
return s
def _srm(self, data):
"""Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
"""
subjects = len(data)
self.random_state_ = np.random.RandomState(self.rand_seed)
random_states = [
np.random.RandomState(self.random_state_.randint(2**32))
for i in range(len(data))]
# Initialization step: initialize the outputs with initial values,
# voxels with the number of voxels in each subject.
w, _ = _init_w_transforms(data, self.features, random_states)
shared_response = self._compute_shared_response(data, w)
if logger.isEnabledFor(logging.INFO):
# Calculate the current objective function value
objective = self._objective_function(data, w, shared_response)
logger.info('Objective function %f' % objective)
# Main loop of the algorithm
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# Update each subject's mapping transform W_i:
for subject in range(subjects):
a_subject = data[subject].dot(shared_response.T)
perturbation = np.zeros(a_subject.shape)
np.fill_diagonal(perturbation, 0.001)
u_subject, _, v_subject = np.linalg.svd(
a_subject + perturbation, full_matrices=False)
w[subject] = u_subject.dot(v_subject)
# Update the shared response:
shared_response = self._compute_shared_response(data, w)
if logger.isEnabledFor(logging.INFO):
# Calculate the current objective function value
objective = self._objective_function(data, w, shared_response)
logger.info('Objective function %f' % objective)
return w, shared_response
| yidawang/brainiak | brainiak/funcalign/srm.py | Python | apache-2.0 | 25,902 |
#!/usr/bin/env python
ENCODING='utf-8'
import configobj
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("gatewayHost", help="gatewayHost is IP address of CCO VM")
args = parser.parse_args()
inputFile = '/usr/local/tomcatgua/webapps/access/WEB-INF/gua.properties'
outputFile = inputFile
#http://www.programiz.com/python-programming/dictionary
myDict = {
'gatewayHost': args.gatewayHost
}
propertyDict = {}
#http://stackoverflow.com/questions/11555468/how-should-i-read-a-file-line-by-line-in-python
with open(inputFile) as fp:
for line in fp:
if '=' in line:
key,value = line.split("=", 1)
propertyDict[key] = value.rstrip()
propertyDict.update(myDict)
with open(outputFile, 'w') as f:
f.writelines('{}={}\n'.format(k,v) for k, v in propertyDict.items())
f.write('\n')
| HybridCloudAutomation/cloudcenter-automated-installation | cc-install-automation/cc-property-scripts/02_cca-gua.properties.py | Python | gpl-3.0 | 821 |
from __future__ import print_function
import lis_wrapper
import numpy as np
import scipy.sparse
# Define a symmetric 8 x 8 dense upper triangular matrix first.
# This matrix is part of the examples which come with Intel's MKL library
# and is used here for historical reasons.
# A:
# 7.0, 1.0, 2.0, 7.0,
# -4.0, 8.0, 2.0,
# 1.0, 5.0,
# 7.0, 9.0,
# 5.0, 1.0, 5.0,
# -1.0, 5.0,
# 11.0,
# 5.0
A = np.zeros((8, 8), dtype=np.float64)
A[0, 0] = 7.0
A[0, 2] = 1.0
A[0, 5] = 2.0
A[0, 6] = 7.0
A[1, 1] = -4.0
A[1, 2] = 8.0
A[1, 4] = 2.0
A[2, 2] = 1.0
A[2, 7] = 5.0
A[3, 3] = 7.0
A[3, 6] = 9.0
A[4, 4] = 5.0
A[4, 5] = 1.0
A[4, 6] = 5.0
A[5, 5] = -1.0
A[5, 7] = 5.0
A[6, 6] = 11.0
A[7, 7] = 5.0
# print "Dense matrix:"
print(A)
# Dense matrix to sparse matrix in CSR format
Acsr = scipy.sparse.csr_matrix(A)
print("Sparse upper triangular CSR matrix:")
print("values: ", Acsr.data)
# Indices are 0 based
print("index: ", Acsr.indices)
print("pointer: ", Acsr.indptr)
# LIS Manual: Appendix File Formats
# "Note that both the upper and lower triangular entries need to be stored
# irrespective of whether the matrix is symmetric or not."
# Convert the upper triangular CSR matrix Acsr to 'full' CSR matrix Acsr_full
Acsr_full = Acsr + Acsr.T - scipy.sparse.diags(Acsr.diagonal())
print()
print("Sparse 'full' CSR matrix:")
print("values: ", Acsr_full.data)
# Indices are 0 based
print("index: ", Acsr_full.indices)
print("pointer: ", Acsr_full.indptr)
# initial guess for solution x
x = np.zeros(8)
# right hand side
b = np.ones(8)
info = 1 # make LIS more verbose
tol = 1e-6 # convergence tolerance
max_iter = 10000 # maximum number of iterations
logfname = "residuals.log" # log
# in lis_cmd following parameters are set:
# -i cg : conjugate gradient solver
# -p ssor : SSOR preconditioner
# -tol : convergence tolerance
# -maxiter : maximum number of iterations
# -p ssor : SSOR preconditioner
# -ssor_w 1.0 : relaxation coefficient w (0 < w < 2)
# -initx_zeros 0 : don't set initial values for x to 0. The initial guess is passed by x to LIS
# -print mem : Save the residual history to logfile
lis_cmd = "-i cg -tol %e -maxiter %d -p ssor -ssor_w 1.0 -initx_zeros 0 -print mem" % (tol, max_iter)
lis_wrapper.lis(Acsr_full.data, Acsr_full.indices, Acsr_full.indptr, x, b, info, lis_cmd, logfname)
# check solution x with original dense matrix A first
# convert upper triangular matrix AA to 'full' matrix
y = (A + A.T - np.eye(A.shape[0]) * A.diagonal()).dot(x)
assert (np.allclose(b, y))
# check solution with sparse matrix Acsr_full
y = Acsr_full.dot(x)
assert (np.allclose(b, y))
print("Solution x: ", x)
print()
print("A * x:", y)
print("b :", b)
| Kalle0x12/Test2 | csr_test.py | Python | gpl-3.0 | 2,910 |
#!/usr/bin/env python3
# -*- coding : utf-8 -*-
# ====================================
# Paul Cacheux <paulcacheux@gmail.com>
# MPSI 1
# DM informatique
# 13/12/2014
# ====================================
from random import randint # Module importé
N = 6
HISTORIQUE = [] # Variables globales
def afficher(last_move=(0, 0)):
""" Dessine la grille n x n """
step = len(str(N**2))
liste = [" "*(step-1) + "."] * (N**2)
for i in range(len(HISTORIQUE)):
(x, y) = HISTORIQUE[i]
if i == len(HISTORIQUE)-1:
liste[x + y * N] = "\033[91m" + str(i) + "\033[0m"
else:
liste[x + y * N] = str(i)
if last_move != (0, 0):
last_x = x + last_move[0]
last_y = y + last_move[1]
liste[last_x + last_y * N] = " "*(step-1) + ":"
Espace = "\n" * 15
Titre = "TOUR {}. Cavalier en {}.\n\n".format(len(HISTORIQUE), (x, y))
L1 = " " + (("|{:" + str(step) + "}") * N)[1:] + "\n"
L2 = " " + (("+" + "-"*step) * N)[1:] + "\n"
Grille = (L1 + (L2 + L1) * (N-1)).format(*liste)
print(Espace + Titre + Grille)
def proposer(x, y):
""" Retourne sous forme de liste l'éventail des possibles. """
poss = []
for dx, dy in [
(1, 2), (1, -2), (-1, 2), (-1, -2), (2, 1), (2, -1), (-2, 1), (-2, -1)
]:
if 0 <= x+dx < N and 0 <= y+dy < N and (x+dx, y+dy) not in HISTORIQUE:
poss.append((dx, dy))
return poss
def presenter(poss, last_move):
""" Présenter les choix à l'utilisateur. """
prop = "CHOIX :" + "\n"
prop += " espace : arrière" + "\n"
prop += " entrée : automatique" + "\n"
prop += " autre : hasard" + "\n"
for i, p in enumerate(poss):
star = " "
if last_move == p:
star = "*"
prop += " {}{}:{}\n".format(star, i, p)
prop += "\n" * (8 - len(poss))
print(prop)
def choisir(poss, last_move):
""" Choisir le mouvement. """
reponse = input(" ? ")
if reponse == "":
if last_move == (0, 0):
return poss[0] # Choix automatique :
try:
return poss[poss.index(last_move) + 1]
except:
return (0, 0)
if reponse in "_ ":
return (0, 0) # On demande un retour en arrière
if reponse[0].lower() == "f":
raise StopIteration
try:
return poss[int(reponse)] # Choix de l'utilisateur
except:
return poss[randint(0, len(poss)-1)] # Choix au hasard
def course_intelligente(x=0, y=0, manuel=False):
"""
Faire avancer le cavalier autant que possible, automatiquement.
Le mode manuel permet de voir les etapes une par une
"""
HISTORIQUE.append((x, y))
while len(HISTORIQUE) != N**2:
if manuel:
afficher()
input()
x, y = HISTORIQUE[-1]
possibilities = proposer(x, y)
best_score = 10 # plus que 8
new_x, new_y = 0, 0
# on recherche le plus petit nombre de possibilités à l'etat suivant
for poss in possibilities:
score = len(proposer(x + poss[0], y + poss[1]))
if score <= best_score:
best_score = score
new_x = x + poss[0]
new_y = y + poss[1]
HISTORIQUE.append((new_x, new_y))
afficher()
print("Tour fini !!")
def course(x=0, y=0):
""" Faire avancer le cavalier autant que possible. """
HISTORIQUE.append((x, y))
last_move = (0, 0)
afficher(last_move)
while True:
(x, y) = HISTORIQUE[-1]
poss = proposer(x, y)
if poss == []:
input("BLOQUE ! Seul choix possible : arrière." + "\n" * 13)
(dx, dy) = (0, 0) # on est coincé, donc : retour en arrière
else:
presenter(poss, last_move)
try:
(dx, dy) = choisir(poss, last_move)
except StopIteration:
break
if (dx, dy) == (0, 0): # Retour en arrière
if len(HISTORIQUE) > 1: # Seulement si c'est possible !
rem_x, rem_y = HISTORIQUE.pop()
new_x = rem_x - HISTORIQUE[-1][0]
new_y = rem_y - HISTORIQUE[-1][1]
last_move = (new_x, new_y)
else:
HISTORIQUE.append((x + dx, y + dy))
last_move = (0, 0)
afficher(last_move)
print("Fin")
if __name__ == '__main__': # ==============MAIN==============
print("Par défaut, la taille de la grille est de 6 * 6.")
print(" - pour garder cette valeur, appuyer sur entrée,")
p = input(" - sinon, proposer une valeur : ")
try:
assert 2 < int(p)
N = int(p)
except:
pass
course_intelligente(manuel=False)
# course()
| M0n0xy2/mpsi_python_cours | python/knight_tour/DM/paul_cacheux_cavalier.py | Python | mit | 4,772 |
brothers_age = 27
sisters_age = 28
if brothers_age > sisters_age:
print("Brother is older!")
print("Brother is " + str(brothers_age))
print("Sister is " + str(sisters_age))
print("They have a difference of " + str(brothers_age - sisters_age) + " years.")
elif brothers_age == sisters_age:
print("They have the same age!")
else:
print("Sister is older!")
print("Sister is " + str(sisters_age))
print("Brother is " + str(brothers_age))
print("They have a difference of " + str(sisters_age - brothers_age) + " years.") | lucascolusso/python_workshop | 02/01/play.py | Python | mit | 552 |
# -*- coding: utf-8 -*-
"""
args
~~~~
"""
import os
import sys
from sys import argv
from glob import glob
from collections import OrderedDict
# Python 3
if sys.version_info[0] == 3:
string_type = str
else:
string_type = basestring
def _expand_path(path):
"""Expands directories and globs in given path."""
paths = []
path = os.path.expanduser(path)
path = os.path.expandvars(path)
if os.path.isdir(path):
for (dir, dirs, files) in os.walk(path):
for file in files:
paths.append(os.path.join(dir, file))
else:
paths.extend(glob(path))
return paths
def _is_collection(obj):
"""Tests if an object is a collection. Strings don't count."""
if isinstance(obj, string_type):
return False
return hasattr(obj, '__getitem__')
class ArgsList(object):
"""CLI Argument management."""
def __init__(self, args=None, no_argv=False):
if not args:
if not no_argv:
self._args = argv[1:]
else:
self._args = []
else:
self._args = args
def __len__(self):
return len(self._args)
def __repr__(self):
return '<args %s>' % (repr(self._args))
def __getitem__(self, i):
try:
return self.all[i]
except IndexError:
return None
def __contains__(self, x):
return self.first(x) is not None
def get(self, x):
"""Returns argument at given index, else none."""
try:
return self.all[x]
except IndexError:
return None
def get_with(self, x):
"""Returns first argument that contains given string."""
return self.all[self.first_with(x)]
def remove(self, x):
"""Removes given arg (or list thereof) from Args object."""
def _remove(x):
found = self.first(x)
if found is not None:
self._args.pop(found)
if _is_collection(x):
for item in x:
_remove(x)
else:
_remove(x)
def pop(self, x):
"""Removes and Returns value at given index, else none."""
try:
return self._args.pop(x)
except IndexError:
return None
def any_contain(self, x):
"""Tests if given string is contained in any stored argument."""
return bool(self.first_with(x))
def contains(self, x):
"""Tests if given object is in arguments list.
Accepts strings and lists of strings."""
return self.__contains__(x)
def first(self, x):
"""Returns first found index of given value (or list of values)."""
def _find(x):
try:
return self.all.index(str(x))
except ValueError:
return None
if _is_collection(x):
for item in x:
found = _find(item)
if found is not None:
return found
return None
else:
return _find(x)
def first_with(self, x):
"""Returns first found index containing value (or list of values)."""
def _find(x):
try:
for arg in self.all:
if x in arg:
return self.all.index(arg)
except ValueError:
return None
if _is_collection(x):
for item in x:
found = _find(item)
if found:
return found
return None
else:
return _find(x)
def first_without(self, x):
"""Returns first found index not containing value
(or list of values).
"""
def _find(x):
try:
for arg in self.all:
if x not in arg:
return self.all.index(arg)
except ValueError:
return None
if _is_collection(x):
for item in x:
found = _find(item)
if found:
return found
return None
else:
return _find(x)
def start_with(self, x):
"""Returns all arguments beginning with given string
(or list thereof).
"""
_args = []
for arg in self.all:
if _is_collection(x):
for _x in x:
if arg.startswith(x):
_args.append(arg)
break
else:
if arg.startswith(x):
_args.append(arg)
return ArgsList(_args, no_argv=True)
def contains_at(self, x, index):
"""Tests if given [list of] string is at given index."""
try:
if _is_collection(x):
for _x in x:
if (_x in self.all[index]) or (_x == self.all[index]):
return True
else:
return False
else:
return (x in self.all[index])
except IndexError:
return False
def has(self, x):
"""Returns true if argument exists at given index.
Accepts: integer.
"""
try:
self.all[x]
return True
except IndexError:
return False
def value_after(self, x):
"""Returns value of argument after given found argument
(or list thereof).
"""
try:
try:
i = self.all.index(x)
except ValueError:
return None
return self.all[i + 1]
except IndexError:
return None
@property
def grouped(self):
"""Extracts --flag groups from argument list.
Returns {format: Args, ...}
"""
collection = OrderedDict(_=ArgsList(no_argv=True))
_current_group = None
for arg in self.all:
if arg.startswith('-'):
_current_group = arg
collection.setdefault(arg, ArgsList(no_argv=True))
else:
if _current_group:
collection[_current_group]._args.append(arg)
else:
collection['_']._args.append(arg)
return collection
@property
def last(self):
"""Returns last argument."""
try:
return self.all[-1]
except IndexError:
return None
@property
def all(self):
"""Returns all arguments."""
return self._args
def all_with(self, x):
"""Returns all arguments containing given string (or list thereof)."""
_args = []
for arg in self.all:
if _is_collection(x):
for _x in x:
if _x in arg:
_args.append(arg)
break
else:
if x in arg:
_args.append(arg)
return ArgsList(_args, no_argv=True)
def all_without(self, x):
"""Returns all arguments not containing given string
(or list thereof).
"""
_args = []
for arg in self.all:
if _is_collection(x):
for _x in x:
if _x not in arg:
_args.append(arg)
break
else:
if x not in arg:
_args.append(arg)
return ArgsList(_args, no_argv=True)
@property
def flags(self):
"""Returns Arg object including only flagged arguments."""
return self.start_with('-')
@property
def not_flags(self):
"""Returns Arg object excluding flagged arguments."""
return self.all_without('-')
@property
def files(self, absolute=False):
"""Returns an expanded list of all valid paths that were passed in."""
_paths = []
for arg in self.all:
for path in _expand_path(arg):
if os.path.exists(path):
if absolute:
_paths.append(os.path.abspath(path))
else:
_paths.append(path)
return _paths
@property
def not_files(self):
"""Returns a list of all arguments that aren't files/globs."""
_args = []
for arg in self.all:
if not len(_expand_path(arg)):
if not os.path.exists(arg):
_args.append(arg)
return ArgsList(_args, no_argv=True)
@property
def copy(self):
"""Returns a copy of Args object for temporary manipulation."""
return ArgsList(self.all)
@property
def assignments(self):
"""Extracts assignment values from assignments."""
collection = OrderedDict()
for arg in self.all:
if '=' in arg:
collection.setdefault(
arg.split('=', 1)[0], ArgsList(no_argv=True))
collection[arg.split('=', 1)[0]]._args.append(
arg.split('=', 1)[1])
return collection
args = ArgsList()
get = args.get
get_with = args.get_with
remove = args.remove
pop = args.pop
any_contain = args.any_contain
contains = args.contains
first = args.first
first_with = args.first_with
first_without = args.first_without
start_with = args.start_with
contains_at = args.contains_at
has = args.has
value_after = args.value_after
grouped = args.grouped
last = args.last
all = args.all
all_with = args.all_with
all_without = args.all_without
flags = args.flags
not_flags = args.not_flags
files = args.files
not_files = args.not_files
copy = args.copy
assignments = args.assignments
| kennethreitz/args | args.py | Python | bsd-2-clause | 9,854 |
"""
Segregation of pymongo functions from the data modeling mechanisms for split modulestore.
"""
import datetime
import logging
import math
import re
import zlib
from contextlib import contextmanager
from time import time
import pymongo
import pytz
import six
from six.moves import cPickle as pickle
from contracts import check, new_contract
from mongodb_proxy import autoretry_read
# Import this just to export it
from pymongo.errors import DuplicateKeyError # pylint: disable=unused-import
from xmodule.exceptions import HeartbeatFailure
from xmodule.modulestore import BlockData
from xmodule.modulestore.split_mongo import BlockKey
from xmodule.mongo_utils import connect_to_mongodb, create_collection_index
try:
from django.core.cache import caches, InvalidCacheBackendError
DJANGO_AVAILABLE = True
except ImportError:
DJANGO_AVAILABLE = False
new_contract('BlockData', BlockData)
log = logging.getLogger(__name__)
def get_cache(alias):
"""
Return cache for an `alias`
Note: The primary purpose of this is to mock the cache in test_split_modulestore.py
"""
return caches[alias]
def round_power_2(value):
"""
Return value rounded up to the nearest power of 2.
"""
if value == 0:
return 0
return math.pow(2, math.ceil(math.log(value, 2)))
class Tagger(object):
"""
An object used by :class:`QueryTimer` to allow timed code blocks
to add measurements and tags to the timer.
"""
def __init__(self, default_sample_rate):
self.added_tags = []
self.measures = []
self.sample_rate = default_sample_rate
def measure(self, name, size):
"""
Record a measurement of the timed data. This would be something to
indicate the size of the value being timed.
Arguments:
name: The name of the measurement.
size (float): The size of the measurement.
"""
self.measures.append((name, size))
def tag(self, **kwargs):
"""
Add tags to the timer.
Arguments:
**kwargs: Each keyword is treated as a tag name, and the
value of the argument is the tag value.
"""
self.added_tags.extend(list(kwargs.items()))
@property
def tags(self):
"""
Return all tags for this (this includes any tags added with :meth:`tag`,
and also all of the added measurements, bucketed into powers of 2).
"""
return [
'{}:{}'.format(name, round_power_2(size))
for name, size in self.measures
] + [
'{}:{}'.format(name, value)
for name, value in self.added_tags
]
class QueryTimer(object):
"""
An object that allows timing a block of code while also recording measurements
about that code.
"""
def __init__(self, metric_base, sample_rate=1):
"""
Arguments:
metric_base: The prefix to be used for all queries captured
with this :class:`QueryTimer`.
"""
self._metric_base = metric_base
self._sample_rate = sample_rate
@contextmanager
def timer(self, metric_name, course_context):
"""
Contextmanager which acts as a timer for the metric ``metric_name``,
but which also yields a :class:`Tagger` object that allows the timed block
of code to add tags and quantity measurements. Tags are added verbatim to the
timer output. Measurements are recorded as histogram measurements in their own,
and also as bucketed tags on the timer measurement.
Arguments:
metric_name: The name used to aggregate all of these metrics.
course_context: The course which the query is being made for.
"""
tagger = Tagger(self._sample_rate)
metric_name = "{}.{}".format(self._metric_base, metric_name)
start = time()
try:
yield tagger
finally:
end = time()
tags = tagger.tags
tags.append('course:{}'.format(course_context))
TIMER = QueryTimer(__name__, 0.01)
def structure_from_mongo(structure, course_context=None):
"""
Converts the 'blocks' key from a list [block_data] to a map
{BlockKey: block_data}.
Converts 'root' from [block_type, block_id] to BlockKey.
Converts 'blocks.*.fields.children' from [[block_type, block_id]] to [BlockKey].
N.B. Does not convert any other ReferenceFields (because we don't know which fields they are at this level).
Arguments:
structure: The document structure to convert
course_context (CourseKey): For metrics gathering, the CourseKey
for the course that this data is being processed for.
"""
with TIMER.timer('structure_from_mongo', course_context) as tagger:
tagger.measure('blocks', len(structure['blocks']))
check('seq[2]', structure['root'])
check('list(dict)', structure['blocks'])
for block in structure['blocks']:
if 'children' in block['fields']:
check('list(list[2])', block['fields']['children'])
structure['root'] = BlockKey(*structure['root'])
new_blocks = {}
for block in structure['blocks']:
if 'children' in block['fields']:
block['fields']['children'] = [BlockKey(*child) for child in block['fields']['children']]
new_blocks[BlockKey(block['block_type'], block.pop('block_id'))] = BlockData(**block)
structure['blocks'] = new_blocks
return structure
def structure_to_mongo(structure, course_context=None):
"""
Converts the 'blocks' key from a map {BlockKey: block_data} to
a list [block_data], inserting BlockKey.type as 'block_type'
and BlockKey.id as 'block_id'.
Doesn't convert 'root', since namedtuple's can be inserted
directly into mongo.
"""
with TIMER.timer('structure_to_mongo', course_context) as tagger:
tagger.measure('blocks', len(structure['blocks']))
check('BlockKey', structure['root'])
check('dict(BlockKey: BlockData)', structure['blocks'])
for block in six.itervalues(structure['blocks']):
if 'children' in block.fields:
check('list(BlockKey)', block.fields['children'])
new_structure = dict(structure)
new_structure['blocks'] = []
for block_key, block in six.iteritems(structure['blocks']):
new_block = dict(block.to_storable())
new_block.setdefault('block_type', block_key.type)
new_block['block_id'] = block_key.id
new_structure['blocks'].append(new_block)
return new_structure
class CourseStructureCache(object):
"""
Wrapper around django cache object to cache course structure objects.
The course structures are pickled and compressed when cached.
If the 'course_structure_cache' doesn't exist, then don't do anything for
for set and get.
"""
def __init__(self):
self.cache = None
if DJANGO_AVAILABLE:
try:
self.cache = get_cache('course_structure_cache')
except InvalidCacheBackendError:
pass
def get(self, key, course_context=None):
"""Pull the compressed, pickled struct data from cache and deserialize."""
if self.cache is None:
return None
with TIMER.timer("CourseStructureCache.get", course_context) as tagger:
try:
compressed_pickled_data = self.cache.get(key)
tagger.tag(from_cache=str(compressed_pickled_data is not None).lower())
if compressed_pickled_data is None:
# Always log cache misses, because they are unexpected
tagger.sample_rate = 1
return None
tagger.measure('compressed_size', len(compressed_pickled_data))
pickled_data = zlib.decompress(compressed_pickled_data)
tagger.measure('uncompressed_size', len(pickled_data))
if six.PY2:
return pickle.loads(pickled_data)
else:
return pickle.loads(pickled_data, encoding='latin-1')
except Exception:
# The cached data is corrupt in some way, get rid of it.
log.warning("CourseStructureCache: Bad data in cache for %s", course_context)
self.cache.delete(key)
return None
def set(self, key, structure, course_context=None):
"""Given a structure, will pickle, compress, and write to cache."""
if self.cache is None:
return None
with TIMER.timer("CourseStructureCache.set", course_context) as tagger:
pickled_data = pickle.dumps(structure, 4) # Protocol can't be incremented until cache is cleared
tagger.measure('uncompressed_size', len(pickled_data))
# 1 = Fastest (slightly larger results)
compressed_pickled_data = zlib.compress(pickled_data, 1)
tagger.measure('compressed_size', len(compressed_pickled_data))
# Stuctures are immutable, so we set a timeout of "never"
self.cache.set(key, compressed_pickled_data, None)
class MongoConnection(object):
"""
Segregation of pymongo functions from the data modeling mechanisms for split modulestore.
"""
def __init__(
self, db, collection, host, port=27017, tz_aware=True, user=None, password=None,
asset_collection=None, retry_wait_time=0.1, **kwargs
):
"""
Create & open the connection, authenticate, and provide pointers to the collections
"""
# Set a write concern of 1, which makes writes complete successfully to the primary
# only before returning. Also makes pymongo report write errors.
kwargs['w'] = 1
self.database = connect_to_mongodb(
db, host,
port=port, tz_aware=tz_aware, user=user, password=password,
retry_wait_time=retry_wait_time, **kwargs
)
self.course_index = self.database[collection + '.active_versions']
self.structures = self.database[collection + '.structures']
self.definitions = self.database[collection + '.definitions']
def heartbeat(self):
"""
Check that the db is reachable.
"""
try:
# The ismaster command is cheap and does not require auth.
self.database.client.admin.command('ismaster')
return True
except pymongo.errors.ConnectionFailure:
raise HeartbeatFailure("Can't connect to {}".format(self.database.name), 'mongo')
def get_structure(self, key, course_context=None):
"""
Get the structure from the persistence mechanism whose id is the given key.
This method will use a cached version of the structure if it is available.
"""
with TIMER.timer("get_structure", course_context) as tagger_get_structure:
cache = CourseStructureCache()
structure = cache.get(key, course_context)
tagger_get_structure.tag(from_cache=str(bool(structure)).lower())
if not structure:
# Always log cache misses, because they are unexpected
tagger_get_structure.sample_rate = 1
with TIMER.timer("get_structure.find_one", course_context) as tagger_find_one:
doc = self.structures.find_one({'_id': key})
if doc is None:
log.warning(
"doc was None when attempting to retrieve structure for item with key %s",
six.text_type(key)
)
return None
tagger_find_one.measure("blocks", len(doc['blocks']))
structure = structure_from_mongo(doc, course_context)
tagger_find_one.sample_rate = 1
cache.set(key, structure, course_context)
return structure
@autoretry_read()
def find_structures_by_id(self, ids, course_context=None):
"""
Return all structures that specified in ``ids``.
Arguments:
ids (list): A list of structure ids
"""
with TIMER.timer("find_structures_by_id", course_context) as tagger:
tagger.measure("requested_ids", len(ids))
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find({'_id': {'$in': ids}})
]
tagger.measure("structures", len(docs))
return docs
@autoretry_read()
def find_courselike_blocks_by_id(self, ids, block_type, course_context=None):
"""
Find all structures that specified in `ids`. Among the blocks only return block whose type is `block_type`.
Arguments:
ids (list): A list of structure ids
block_type: type of block to return
"""
with TIMER.timer("find_courselike_blocks_by_id", course_context) as tagger:
tagger.measure("requested_ids", len(ids))
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find(
{'_id': {'$in': ids}},
{'blocks': {'$elemMatch': {'block_type': block_type}}, 'root': 1}
)
]
tagger.measure("structures", len(docs))
return docs
@autoretry_read()
def find_structures_derived_from(self, ids, course_context=None):
"""
Return all structures that were immediately derived from a structure listed in ``ids``.
Arguments:
ids (list): A list of structure ids
"""
with TIMER.timer("find_structures_derived_from", course_context) as tagger:
tagger.measure("base_ids", len(ids))
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find({'previous_version': {'$in': ids}})
]
tagger.measure("structures", len(docs))
return docs
@autoretry_read()
def find_ancestor_structures(self, original_version, block_key, course_context=None):
"""
Find all structures that originated from ``original_version`` that contain ``block_key``.
Arguments:
original_version (str or ObjectID): The id of a structure
block_key (BlockKey): The id of the block in question
"""
with TIMER.timer("find_ancestor_structures", course_context) as tagger:
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find({
'original_version': original_version,
'blocks': {
'$elemMatch': {
'block_id': block_key.id,
'block_type': block_key.type,
'edit_info.update_version': {
'$exists': True,
},
},
},
})
]
tagger.measure("structures", len(docs))
return docs
def insert_structure(self, structure, course_context=None):
"""
Insert a new structure into the database.
"""
with TIMER.timer("insert_structure", course_context) as tagger:
tagger.measure("blocks", len(structure["blocks"]))
self.structures.insert_one(structure_to_mongo(structure, course_context))
def get_course_index(self, key, ignore_case=False):
"""
Get the course_index from the persistence mechanism whose id is the given key
"""
with TIMER.timer("get_course_index", key):
if ignore_case:
query = {
key_attr: re.compile(u'^{}$'.format(re.escape(getattr(key, key_attr))), re.IGNORECASE)
for key_attr in ('org', 'course', 'run')
}
else:
query = {
key_attr: getattr(key, key_attr)
for key_attr in ('org', 'course', 'run')
}
return self.course_index.find_one(query)
def find_matching_course_indexes(
self,
branch=None,
search_targets=None,
org_target=None,
course_context=None,
course_keys=None
):
"""
Find the course_index matching particular conditions.
Arguments:
branch: If specified, this branch must exist in the returned courses
search_targets: If specified, this must be a dictionary specifying field values
that must exist in the search_targets of the returned courses
org_target: If specified, this is an ORG filter so that only course_indexs are
returned for the specified ORG
"""
with TIMER.timer("find_matching_course_indexes", course_context):
query = {}
if course_keys:
courses_queries = self._generate_query_from_course_keys(branch, course_keys)
query['$or'] = courses_queries
else:
if branch is not None:
query['versions.{}'.format(branch)] = {'$exists': True}
if search_targets:
for key, value in six.iteritems(search_targets):
query['search_targets.{}'.format(key)] = value
if org_target:
query['org'] = org_target
return self.course_index.find(query)
def _generate_query_from_course_keys(self, branch, course_keys):
"""
Generate query for courses using course keys
"""
courses_queries = []
query = {}
if branch:
query = {'versions.{}'.format(branch): {'$exists': True}}
for course_key in course_keys:
course_query = {
key_attr: getattr(course_key, key_attr)
for key_attr in ('org', 'course', 'run')
}
course_query.update(query)
courses_queries.append(course_query)
return courses_queries
def insert_course_index(self, course_index, course_context=None):
"""
Create the course_index in the db
"""
with TIMER.timer("insert_course_index", course_context):
course_index['last_update'] = datetime.datetime.now(pytz.utc)
self.course_index.insert_one(course_index)
def update_course_index(self, course_index, from_index=None, course_context=None):
"""
Update the db record for course_index.
Arguments:
from_index: If set, only update an index if it matches the one specified in `from_index`.
"""
with TIMER.timer("update_course_index", course_context):
if from_index:
query = {"_id": from_index["_id"]}
# last_update not only tells us when this course was last updated but also helps
# prevent collisions
if 'last_update' in from_index:
query['last_update'] = from_index['last_update']
else:
query = {
'org': course_index['org'],
'course': course_index['course'],
'run': course_index['run'],
}
course_index['last_update'] = datetime.datetime.now(pytz.utc)
self.course_index.replace_one(query, course_index, upsert=False,)
def delete_course_index(self, course_key):
"""
Delete the course_index from the persistence mechanism whose id is the given course_index
"""
with TIMER.timer("delete_course_index", course_key):
query = {
key_attr: getattr(course_key, key_attr)
for key_attr in ('org', 'course', 'run')
}
return self.course_index.remove(query)
def get_definition(self, key, course_context=None):
"""
Get the definition from the persistence mechanism whose id is the given key
"""
with TIMER.timer("get_definition", course_context) as tagger:
definition = self.definitions.find_one({'_id': key})
tagger.measure("fields", len(definition['fields']))
tagger.tag(block_type=definition['block_type'])
return definition
def get_definitions(self, definitions, course_context=None):
"""
Retrieve all definitions listed in `definitions`.
"""
with TIMER.timer("get_definitions", course_context) as tagger:
tagger.measure('definitions', len(definitions))
definitions = self.definitions.find({'_id': {'$in': definitions}})
return definitions
def insert_definition(self, definition, course_context=None):
"""
Create the definition in the db
"""
with TIMER.timer("insert_definition", course_context) as tagger:
tagger.measure('fields', len(definition['fields']))
tagger.tag(block_type=definition['block_type'])
self.definitions.insert_one(definition)
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
create_collection_index(
self.course_index,
[
('org', pymongo.ASCENDING),
('course', pymongo.ASCENDING),
('run', pymongo.ASCENDING)
],
unique=True,
background=True
)
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
self.database.client.close()
def _drop_database(self, database=True, collections=True, connections=True):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
If database is True, then this should drop the entire database.
Otherwise, if collections is True, then this should drop all of the collections used
by this modulestore.
Otherwise, the modulestore should remove all data from the collections.
If connections is True, then close the connection to the database as well.
"""
connection = self.database.client
if database:
connection.drop_database(self.database.name)
elif collections:
self.course_index.drop()
self.structures.drop()
self.definitions.drop()
else:
self.course_index.remove({})
self.structures.remove({})
self.definitions.remove({})
if connections:
connection.close()
| msegado/edx-platform | common/lib/xmodule/xmodule/modulestore/split_mongo/mongo_connection.py | Python | agpl-3.0 | 23,404 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Converts a mixture / source lists from DESED format to ssdata format.
Usage example:
SCRIPT_PATH=/data/src && \
LISTS_PATH=/data/lists && \
BASE_OUT=DESED_synthetic_2020_audio_train_synthetic20_soundscapes && \
python3 ${SCRIPT_PATH}/convert_desed_list.py \
--mixtures ${LISTS_PATH}/${BASE_OUT}_train.txt \
--sources ${LISTS_PATH}/${BASE_OUT}_sources_train.txt \
--output ${LISTS_PATH}/${BASE_OUT}_sslist_train.txt
"""
import argparse
import collections
def main():
parser = argparse.ArgumentParser(
description='Make mixing list.')
parser.add_argument(
'-m', '--mixtures', help='DESED mixture list', required=True)
parser.add_argument(
'-s', '--sources', help='desed mixture list', required=True)
parser.add_argument(
'-o', '--output', help='Output list file.', required=True)
args = parser.parse_args()
mixtures = args.mixtures
sources = args.sources
with open(sources, 'r') as f:
sourcelines = f.read().splitlines()
with open(mixtures, 'r') as f:
mixlines = f.read().splitlines()
key_list = []
source_dict = collections.defaultdict(list)
for line in mixlines:
key = line.split('.')[0]
source_dict[key].append(line)
key_list.append(key)
for line in sourcelines:
key = line.split('_')[0]
source_dict[key].append(line)
with open(args.output, 'w') as f:
for key in key_list:
line = source_dict[key]
f.write('\t'.join(line) + '\n')
if __name__ == '__main__':
main()
| google-research/sound-separation | models/dcase2020_desed_fuss_baseline/convert_desed_lists.py | Python | apache-2.0 | 2,047 |
#!/usr/bin/env python
# Foundations of Python Network Programming - Chapter 15 - folder_info.py
# Opening an IMAP connection with IMAPClient and listing folder information.
import getpass, sys
from imapclient import IMAPClient
try:
hostname, username = sys.argv[1:]
except ValueError:
print 'usage: %s hostname username' % sys.argv[0]
sys.exit(2)
c = IMAPClient(hostname, ssl=True)
try:
c.login(username, getpass.getpass())
except c.Error, e:
print 'Could not log in:', e
sys.exit(1)
else:
select_dict = c.select_folder('INBOX', readonly=True)
for k, v in select_dict.items():
print '%s: %r' % (k, v)
c.logout()
| jac2130/BayesGame | foundations-of-python-network-programming/python2/15/folder_info.py | Python | mit | 659 |
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_utils import uuidutils
from testtools.matchers import HasLength
from magnum.common import exception
from magnum import objects
from magnum.tests.unit.db import base
from magnum.tests.unit.db import utils
class TestClusterObject(base.DbTestCase):
def setUp(self):
super(TestClusterObject, self).setUp()
self.fake_cluster = utils.get_test_cluster()
self.fake_nodegroups = utils.get_nodegroups_for_cluster()
self.fake_cluster['trust_id'] = 'trust_id'
self.fake_cluster['trustee_username'] = 'trustee_user'
self.fake_cluster['trustee_user_id'] = 'trustee_user_id'
self.fake_cluster['trustee_password'] = 'password'
self.fake_cluster['coe_version'] = 'fake-coe-version'
self.fake_cluster['container_version'] = 'fake-container-version'
cluster_template_id = self.fake_cluster['cluster_template_id']
self.fake_cluster_template = objects.ClusterTemplate(
uuid=cluster_template_id)
self.fake_cluster['keypair'] = 'keypair1'
self.fake_cluster['docker_volume_size'] = 3
self.fake_cluster['labels'] = {}
self.fake_cluster['health_status'] = 'HEALTHY'
self.fake_cluster['health_status_reason'] = {}
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
def test_get_by_id(self, mock_cluster_template_get):
cluster_id = self.fake_cluster['id']
with mock.patch.object(self.dbapi, 'get_cluster_by_id',
autospec=True) as mock_get_cluster:
mock_cluster_template_get.return_value = self.fake_cluster_template
mock_get_cluster.return_value = self.fake_cluster
cluster = objects.Cluster.get(self.context, cluster_id)
mock_get_cluster.assert_called_once_with(self.context, cluster_id)
self.assertEqual(self.context, cluster._context)
self.assertEqual(cluster.cluster_template_id,
cluster.cluster_template.uuid)
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
def test_get_by_uuid(self, mock_cluster_template_get):
uuid = self.fake_cluster['uuid']
with mock.patch.object(self.dbapi, 'get_cluster_by_uuid',
autospec=True) as mock_get_cluster:
mock_cluster_template_get.return_value = self.fake_cluster_template
mock_get_cluster.return_value = self.fake_cluster
cluster = objects.Cluster.get(self.context, uuid)
mock_get_cluster.assert_called_once_with(self.context, uuid)
self.assertEqual(self.context, cluster._context)
self.assertEqual(cluster.cluster_template_id,
cluster.cluster_template.uuid)
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
def test_get_by_name(self, mock_cluster_template_get):
name = self.fake_cluster['name']
with mock.patch.object(self.dbapi, 'get_cluster_by_name',
autospec=True) as mock_get_cluster:
mock_cluster_template_get.return_value = self.fake_cluster_template
mock_get_cluster.return_value = self.fake_cluster
cluster = objects.Cluster.get_by_name(self.context, name)
mock_get_cluster.assert_called_once_with(self.context, name)
self.assertEqual(self.context, cluster._context)
self.assertEqual(cluster.cluster_template_id,
cluster.cluster_template.uuid)
def test_get_bad_id_and_uuid(self):
self.assertRaises(exception.InvalidIdentity,
objects.Cluster.get, self.context, 'not-a-uuid')
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
def test_list(self, mock_cluster_template_get):
with mock.patch.object(self.dbapi, 'get_cluster_list',
autospec=True) as mock_get_list:
mock_get_list.return_value = [self.fake_cluster]
mock_cluster_template_get.return_value = self.fake_cluster_template
clusters = objects.Cluster.list(self.context)
self.assertEqual(1, mock_get_list.call_count)
self.assertThat(clusters, HasLength(1))
self.assertIsInstance(clusters[0], objects.Cluster)
self.assertEqual(self.context, clusters[0]._context)
self.assertEqual(clusters[0].cluster_template_id,
clusters[0].cluster_template.uuid)
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
def test_list_all(self, mock_cluster_template_get):
with mock.patch.object(self.dbapi, 'get_cluster_list',
autospec=True) as mock_get_list:
mock_get_list.return_value = [self.fake_cluster]
mock_cluster_template_get.return_value = self.fake_cluster_template
self.context.all_tenants = True
clusters = objects.Cluster.list(self.context)
mock_get_list.assert_called_once_with(
self.context, limit=None, marker=None, filters=None,
sort_dir=None, sort_key=None)
self.assertEqual(1, mock_get_list.call_count)
self.assertThat(clusters, HasLength(1))
self.assertIsInstance(clusters[0], objects.Cluster)
self.assertEqual(self.context, clusters[0]._context)
mock_cluster_template_get.assert_not_called()
def test_list_with_filters(self):
with mock.patch.object(self.dbapi, 'get_cluster_list',
autospec=True) as mock_get_list:
mock_get_list.return_value = [self.fake_cluster]
filters = {'name': 'cluster1'}
clusters = objects.Cluster.list(self.context, filters=filters)
mock_get_list.assert_called_once_with(self.context, sort_key=None,
sort_dir=None,
filters=filters, limit=None,
marker=None)
self.assertEqual(1, mock_get_list.call_count)
self.assertThat(clusters, HasLength(1))
self.assertIsInstance(clusters[0], objects.Cluster)
self.assertEqual(self.context, clusters[0]._context)
def test_create(self):
with mock.patch.object(self.dbapi, 'create_cluster',
autospec=True) as mock_create_cluster:
mock_create_cluster.return_value = self.fake_cluster
cluster = objects.Cluster(self.context, **self.fake_cluster)
cluster.create()
mock_create_cluster.assert_called_once_with(self.fake_cluster)
self.assertEqual(self.context, cluster._context)
def test_destroy(self):
uuid = self.fake_cluster['uuid']
with mock.patch.object(self.dbapi, 'get_cluster_by_uuid',
autospec=True) as mock_get_cluster:
mock_get_cluster.return_value = self.fake_cluster
with mock.patch.object(self.dbapi, 'destroy_cluster',
autospec=True) as mock_destroy_cluster:
cluster = objects.Cluster.get_by_uuid(self.context, uuid)
cluster.destroy()
mock_get_cluster.assert_called_once_with(self.context, uuid)
mock_destroy_cluster.assert_called_once_with(uuid)
self.assertEqual(self.context, cluster._context)
def test_save(self):
uuid = self.fake_cluster['uuid']
with mock.patch.object(self.dbapi, 'get_cluster_by_uuid',
autospec=True) as mock_get_cluster:
mock_get_cluster.return_value = self.fake_cluster
with mock.patch.object(self.dbapi, 'update_cluster',
autospec=True) as mock_update_cluster:
cluster = objects.Cluster.get_by_uuid(self.context, uuid)
cluster.status = 'DELETE_IN_PROGRESS'
cluster.save()
mock_get_cluster.assert_called_once_with(self.context, uuid)
mock_update_cluster.assert_called_once_with(
uuid, {'status': 'DELETE_IN_PROGRESS'})
self.assertEqual(self.context, cluster._context)
def test_refresh(self):
uuid = self.fake_cluster['uuid']
new_uuid = uuidutils.generate_uuid()
returns = [dict(self.fake_cluster, uuid=uuid),
dict(self.fake_cluster, uuid=new_uuid)]
expected = [mock.call(self.context, uuid),
mock.call(self.context, uuid)]
with mock.patch.object(self.dbapi, 'get_cluster_by_uuid',
side_effect=returns,
autospec=True) as mock_get_cluster:
cluster = objects.Cluster.get_by_uuid(self.context, uuid)
self.assertEqual(uuid, cluster.uuid)
cluster.refresh()
self.assertEqual(new_uuid, cluster.uuid)
self.assertEqual(expected, mock_get_cluster.call_args_list)
self.assertEqual(self.context, cluster._context)
| ArchiFleKs/magnum | magnum/tests/unit/objects/test_cluster.py | Python | apache-2.0 | 9,842 |
# Copyright 2016 Andreas Riegg - t-h-i-n-x.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VALUES = {}
| ariegg/webiopi-examples | twoscripts/twoscriptscommon.py | Python | apache-2.0 | 626 |
"""Tests the test handler"""
from logbook import Logger, TestHandler
log = Logger('Test logger')
def run():
with TestHandler() as handler:
for x in xrange(500):
log.warning('this is not handled')
| ayvazj/logbook | benchmark/bench_test_handler.py | Python | bsd-3-clause | 224 |
# -*- coding: utf-8 -*-
"""
Tests for IBM Model 2 training methods
"""
import unittest
from collections import defaultdict
from nltk.translate import AlignedSent
from nltk.translate import IBMModel
from nltk.translate import IBMModel2
from nltk.translate.ibm_model import AlignmentInfo
class TestIBMModel2(unittest.TestCase):
def test_set_uniform_alignment_probabilities(self):
# arrange
corpus = [
AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']),
AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']),
]
model2 = IBMModel2(corpus, 0)
# act
model2.set_uniform_probabilities(corpus)
# assert
# expected_prob = 1.0 / (length of source sentence + 1)
self.assertEqual(model2.alignment_table[0][1][3][2], 1.0 / 4)
self.assertEqual(model2.alignment_table[2][4][2][4], 1.0 / 3)
def test_set_uniform_alignment_probabilities_of_non_domain_values(self):
# arrange
corpus = [
AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']),
AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']),
]
model2 = IBMModel2(corpus, 0)
# act
model2.set_uniform_probabilities(corpus)
# assert
# examine i and j values that are not in the training data domain
self.assertEqual(model2.alignment_table[99][1][3][2], IBMModel.MIN_PROB)
self.assertEqual(model2.alignment_table[2][99][2][4], IBMModel.MIN_PROB)
def test_prob_t_a_given_s(self):
# arrange
src_sentence = ["ich", 'esse', 'ja', 'gern', 'räucherschinken']
trg_sentence = ['i', 'love', 'to', 'eat', 'smoked', 'ham']
corpus = [AlignedSent(trg_sentence, src_sentence)]
alignment_info = AlignmentInfo((0, 1, 4, 0, 2, 5, 5),
[None] + src_sentence,
['UNUSED'] + trg_sentence,
None)
translation_table = defaultdict(lambda: defaultdict(float))
translation_table['i']['ich'] = 0.98
translation_table['love']['gern'] = 0.98
translation_table['to'][None] = 0.98
translation_table['eat']['esse'] = 0.98
translation_table['smoked']['räucherschinken'] = 0.98
translation_table['ham']['räucherschinken'] = 0.98
alignment_table = defaultdict(
lambda: defaultdict(lambda: defaultdict(
lambda: defaultdict(float))))
alignment_table[0][3][5][6] = 0.97 # None -> to
alignment_table[1][1][5][6] = 0.97 # ich -> i
alignment_table[2][4][5][6] = 0.97 # esse -> eat
alignment_table[4][2][5][6] = 0.97 # gern -> love
alignment_table[5][5][5][6] = 0.96 # räucherschinken -> smoked
alignment_table[5][6][5][6] = 0.96 # räucherschinken -> ham
model2 = IBMModel2(corpus, 0)
model2.translation_table = translation_table
model2.alignment_table = alignment_table
# act
probability = model2.prob_t_a_given_s(alignment_info)
# assert
lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98
alignment = 0.97 * 0.97 * 0.97 * 0.97 * 0.96 * 0.96
expected_probability = lexical_translation * alignment
self.assertEqual(round(probability, 4), round(expected_probability, 4))
| sdoran35/hate-to-hugs | venv/lib/python3.6/site-packages/nltk/test/unit/translate/test_ibm2.py | Python | mit | 3,433 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('rango', '0003_category_slug'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('website', models.URLField(blank=True)),
('picture', models.ImageField(upload_to='profile_images', blank=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
]
| wangjie1492/tango_with_django_project | rango/migrations/0004_userprofile.py | Python | apache-2.0 | 791 |
# Copyright (c) 2017-2019 The University of Manchester
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import re
import time
from collections import defaultdict
class Counter(object):
def __init__(self, name=None):
self._jobs = 0
self._time = 0
self._name = name
def register(self, time_increment):
self._jobs += 1
self._time += time_increment
def __str__(self):
if self._name is None:
return "Total of {} jobs took {}".format(
self._jobs, get_time_in_hours(self._time))
else:
return "Total of {} {} jobs took {}".format(
self._jobs, self._name, get_time_in_hours(self._time))
def get_time(time_str):
return time.mktime(time.strptime(time_str, '%Y-%m-%d %H:%M:%S,%f'))
def get_time_in_hours(seconds):
m, s = divmod(round(seconds), 60)
h, m = divmod(m, 60)
return "{}:{}:{}".format(int(h), int(m), int(s))
create_job = defaultdict(list)
power_on = dict()
total = Counter()
total_NMPI = Counter("NMPI")
total_machine_test = Counter("Machine test")
total_other = Counter("Other")
other_users = set()
def process_create(match):
date = match.group(1)
dict_object = eval(match.group(2))
ip_address = match.group(3)
create_job[ip_address].append((dict_object["owner"], get_time(date)))
def process_power_on(match):
# date isn't used
# date = match.group(1)
job_id = match.group(2)
state = match.group(3)
ip_address = match.group(4)
if state == "On" and len(create_job[ip_address]) > 0:
owner, start_time = create_job[ip_address].pop()
if len(create_job[ip_address]) == 0:
del create_job[ip_address]
power_on[job_id] = (owner, start_time)
def process_completed(match):
date = match.group(1)
job_id = match.group(2)
if job_id in power_on:
owner, start_time = power_on[job_id]
del power_on[job_id]
end_time = get_time(date)
time_in_seconds = end_time - start_time
total.register(time_in_seconds)
if owner == 'NMPI':
total_NMPI.register(time_in_seconds)
elif owner == 'machine tests':
total_machine_test.register(time_in_seconds)
else:
total_other.register(time_in_seconds)
other_users.add(owner)
def scan_logfile(log_file_name):
with open(log_file_name, "r") as log:
for line in log:
match_create = re.search(
r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}):"
r" create_job\(\(.*\),(\{.*\})\) from (.*)", line)
if match_create is not None:
process_create(match_create)
continue
match_power_on = re.search(
r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}):"
r" power_job\((\d+),(On|Off)\) from (.*)", line)
if match_power_on is not None:
process_power_on(match_power_on)
continue
match_completed = re.search(
r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}):"
r" completed shutdown of job (\d+)", line)
if match_completed is not None:
process_completed(match_completed)
continue
match_destroyed = re.search(
r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}):"
" destroy_job", line)
if match_destroyed is None:
print("Unknown line format for line {}".format(line))
for filename in sys.argv[1:]:
scan_logfile(filename)
print("Missing power on for {} jobs".format(len(create_job)))
print("Missing shutdown for {} jobs".format(len(power_on)))
print(str(total))
print(str(total_NMPI))
print(str(total_machine_test))
print(str(total_other))
print("Total of {} Other users".format(len(other_users)))
| project-rig/spalloc_server | count_times.py | Python | gpl-2.0 | 4,487 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from nova.conf import utils as confutils
DEFAULT_SERVICE_TYPE = 'identity'
keystone_group = cfg.OptGroup(
'keystone',
title='Keystone Options',
help='Configuration options for the identity service')
def register_opts(conf):
conf.register_group(keystone_group)
confutils.register_ksa_opts(conf, keystone_group.name,
DEFAULT_SERVICE_TYPE, include_auth=False)
def list_opts():
return {
keystone_group: (
ks_loading.get_session_conf_options() +
confutils.get_ksa_adapter_opts(DEFAULT_SERVICE_TYPE)
)
}
| rahulunair/nova | nova/conf/keystone.py | Python | apache-2.0 | 1,257 |
import os
import fnmatch
import locale
import subprocess
DOIT_CONFIG = {
'default_tasks': ['flake8', 'test'],
'reporter': 'executed-only',
}
def recursive_glob(path, pattern):
"""recursively walk path directories and return files matching the pattern"""
for root, dirnames, filenames in os.walk(path, followlinks=True):
for filename in fnmatch.filter(filenames, pattern):
yield os.path.join(root, filename)
def task_flake8():
"""flake8 - static check for python files"""
yield {
'name': os.path.join(os.getcwd(), 'nikola'),
'actions': ['flake8 nikola/'],
}
def task_pydocstyle():
"""pydocstyle -- static check for docstring style"""
yield {
'name': os.path.join(os.getcwd(), 'nikola'),
'actions': ["pydocstyle --count --match-dir='(?!^\.)(?!data).*' nikola/"],
}
def task_locale():
"""set environ locale vars used in nikola tests"""
def set_nikola_test_locales():
try:
out = subprocess.check_output(['locale', '-a'])
out = out.decode('utf-8')
locales = []
languages = set()
for line in out.splitlines():
if (line.endswith('.utf8') or line.endswith('.UTF-8')) and '_' in line:
lang = line.split('_')[0]
if lang not in languages:
try:
locale.setlocale(locale.LC_ALL, str(line))
except:
continue
languages.add(lang)
locales.append((lang, line))
if len(locales) == 2:
break
if len(locales) != 2:
return False # task failed
else:
os.environ['NIKOLA_LOCALE_DEFAULT'] = ','.join(locales[0])
os.environ['NIKOLA_LOCALE_OTHER'] = ','.join(locales[1])
finally:
# restore to default locale
locale.resetlocale()
return {'actions': [set_nikola_test_locales], 'verbosity': 2}
def task_test():
"""run unit-tests using py.test"""
return {
'task_dep': ['locale'],
'actions': ['py.test tests/'],
}
def task_coverage():
"""run unit-tests using py.test, with coverage reporting"""
return {
'task_dep': ['locale'],
'actions': ['py.test --cov nikola --cov-report term-missing tests/'],
'verbosity': 2,
}
def task_gen_completion():
"""generate tab-completion scripts"""
cmd = 'nikola tabcompletion --shell {0} --hardcode-tasks > _nikola_{0}'
for shell in ('bash', 'zsh'):
yield {
'name': shell,
'actions': [cmd.format(shell)],
'targets': ['_nikola_{0}'.format(shell)],
}
| xuhdev/nikola | dodo.py | Python | mit | 2,829 |
# Django settings for wayf project.
import os
here = lambda x: os.path.join(os.path.abspath(os.path.dirname(__file__)), x)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Athens'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = here('static')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'w70vxdbe(^&92@^a)b%jm=8p0@-o$ykbfal2)tn%ssky(t*z5l'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
#'filesystem_multilingual.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.locale.LocaleMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
here('templates'),
)
INSTALLED_APPS = (
'django.contrib.staticfiles',
'wayf',
)
SITE_ROOT = SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
STATIC_URL = 'static/'
STATIC_ROOT= 'static'
STATICFILES_DIR = (
os.path.join(SITE_ROOT, 'static/'),
)
IDP_COOKIE = 'grnet_selected_idp'
SHIB_METADATA = 'federation-metadata.xml'
LAST_IDP_COOKIE = 'grnet_last_idp'
COOKIE_DOMAIN = '.grnet.gr'
LANGUAGE_COOKIE_NAME = 'grnet_aai_language'
WAYF_SITENAME='localhost:8000'
INSTITUTION_CATEGORIES = (
('university', ("Universities")),
('tei', ("Technological educational institutes")),
('school', ("Other academic institutions")),
('institute', ("Research institutes")),
('other', ("Please select your institute")),
('test', ("Testing")),
)
P3P_HEADER = 'CP="NOI CUR DEVa OUR IND COM NAV PRE"'
| JensTimmerman/django-wayf | settings.py | Python | gpl-3.0 | 3,496 |
class Solution:
# @param {integer[]} nums
# @param {integer} target
# @return {integer}
def search(self, nums, target):
l, r = 0, len(nums) - 1
while l <= r:
m = (l + r) / 2
if nums[m] == target:
return m
elif nums[m] > target:
if nums[m] > nums[r] and target < nums[l]:
l = m + 1
else:
r = m - 1
else:
if nums[m] < nums[r] and target > nums[r]:
r = m - 1
else:
l = m + 1
return -1
| Chasego/codi | leetcode/033-Search-in-Rotated-Sorted-Array/SearchinRSArr_001.py | Python | mit | 632 |
"""
GRAMMAR
=======
documentation: header param_list code
header: TEXT*
param_list: param_doc*
code: CODE_FENCE TEXT CODE_FENCE
param_doc: LEFT_BRACE param_id? RIGHT_BRACE type_hint? value_hint? TEXT*
param_id: identifier | flag
identifier: IDENTIFIER | INTEGER
flag: FLAG
type_hint: LEFT_PAREN IDENTIFIER RIGHT_PAREN
value_hint: LEFT_BRACKET (value_list | value_range) RIGHT_BRACKET
value_list: value (COMMA value)*
value_range: DIGIT COLON digit (COLON digit)? (STAR digit)?
value: STAR? (STRING | digit)
digit: INTEGER | FLOAT
"""
from clisnips.exceptions import ParsingError
from clisnips.syntax.documentation.lexer import Tokens
from clisnips.syntax.documentation.nodes import *
from clisnips.syntax.llk_parser import LLkParser
def _to_number(string):
if '.' in string:
return float(string)
return int(string)
class Parser(LLkParser):
def __init__(self, lexer):
super().__init__(lexer, 2)
self._auto_field_count = -1
self._has_numeric_field = False
self._ast = None
def parse(self):
self.reset()
self._auto_field_count = -1
self._has_numeric_field = False
self._ast = Documentation()
self._ast.header = self._text()
for param in self._param_list():
self._ast.parameters[param.name] = param
for block in self._code():
self._ast.code_blocks.append(block)
return self._ast
def _text(self):
"""
TEXT*
"""
text = []
while True:
t = self._lookahead()
if t.type is Tokens.TEXT:
self._consume()
text.append(t.value)
else:
break
return ''.join(text)
def _param_list(self):
"""
param_doc*
"""
while True:
t = self._lookahead()
if t.type is Tokens.LEFT_BRACE:
yield self._param_doc()
else:
break
def _code(self):
"""
CODEBLOCK*
"""
code_blocks = []
while self._lookahead_type() is Tokens.CODE_FENCE:
self._match(Tokens.CODE_FENCE)
code = self._match(Tokens.TEXT).value
try:
block = CodeBlock(code)
except SyntaxError as err:
raise ParsingError(f'Syntax error in code block: {code!r} \n{err!s}')
except TypeError as err:
raise ParsingError(f'Null bytes in code block: {code!r}')
else:
code_blocks.append(block)
self._match(Tokens.CODE_FENCE)
return code_blocks
def _param_doc(self):
"""
LEFT_BRACE param_id RIGHT_BRACE
type_hint? value_hint? TEXT*
"""
typehint, valuehint, text = None, None, None
self._match(Tokens.LEFT_BRACE)
param = self._param_id()
self._match(Tokens.RIGHT_BRACE)
token = self._lookahead()
if token.type is Tokens.LEFT_PAREN:
if param.type_hint == 'flag':
raise ParsingError('A flag cannot have a type hint.')
param.type_hint = self._typehint()
token = self._lookahead()
if token.type is Tokens.LEFT_BRACKET:
if param.type_hint == 'flag':
raise ParsingError('A flag cannot have a value hint.')
param.value_hint = self._valuehint()
token = self._lookahead()
if token.type is Tokens.TEXT:
param.text = self._text()
return param
def _param_id(self):
# no identifier, try automatic numbering
if self._lookahead_type() is Tokens.RIGHT_BRACE:
if self._has_numeric_field:
raise ParsingError('Cannot switch from manual to automatic field numbering')
self._auto_field_count += 1
return Parameter(str(self._auto_field_count))
token = self._match(Tokens.IDENTIFIER, Tokens.INTEGER, Tokens.FLAG)
# it's a flag
if token.type is Tokens.FLAG:
param = Parameter(token.value)
param.type_hint = 'flag'
return param
# it's an integer, check that numbering is correct
if token.type is Tokens.INTEGER:
if self._auto_field_count > -1:
raise ParsingError('Cannot switch from automatic to manual field numbering')
self._has_numeric_field = True
return Parameter(token.value)
def _typehint(self):
"""
LEFT_PAREN IDENTIFIER RIGHT_PAREN
"""
self._match(Tokens.LEFT_PAREN)
hint = self._match(Tokens.IDENTIFIER).value
self._match(Tokens.RIGHT_PAREN)
return hint
def _valuehint(self):
"""
LEFT_BRACKET (value_list | value_range) RIGHT_BRACKET
"""
self._match(Tokens.LEFT_BRACKET)
token = self._lookahead()
if (
token.type in (Tokens.INTEGER, Tokens.FLOAT)
and self._lookahead_type(2) is Tokens.COLON
):
valuehint = self._value_range()
else:
valuehint = self._value_list()
self._match(Tokens.RIGHT_BRACKET)
return valuehint
def _value_list(self):
"""
value (COMMA value)*
"""
values = []
default, count = 0, 0
initial = self._value()
values.append(initial['value'])
while True:
count += 1
t = self._lookahead()
if t.type is Tokens.COMMA:
self._consume()
value = self._value()
values.append(value['value'])
if value['default']:
default = count
else:
break
return ValueList(values, default)
def _value(self):
"""
STAR? (STRING | digit)
"""
is_default = False
token = self._match(Tokens.DEFAULT_MARKER, Tokens.STRING, Tokens.INTEGER, Tokens.FLOAT)
if token.type is Tokens.DEFAULT_MARKER:
is_default = True
token = self._match(Tokens.STRING, Tokens.INTEGER, Tokens.FLOAT)
if token.type is Tokens.STRING:
return {'value': token.value, 'default': is_default}
else:
return {'value': _to_number(token.value), 'default': is_default}
def _value_range(self):
"""
digit COLON digit (COLON digit)? (STAR digit)?
"""
start = self._digit().value
self._match(Tokens.COLON)
end = self._digit().value
step, default = None, None
token = self._lookahead()
if token.type is Tokens.COLON:
self._consume()
step = self._digit().value
token = self._lookahead()
if token.type is Tokens.DEFAULT_MARKER:
self._consume()
default = self._digit().value
return ValueRange(
_to_number(start),
_to_number(end),
_to_number(step) if step is not None else step,
_to_number(default) if default is not None else default,
)
def _digit(self):
return self._match(Tokens.INTEGER, Tokens.FLOAT)
| ju1ius/clisnips | clisnips/syntax/documentation/parser.py | Python | gpl-3.0 | 7,291 |
from Screens.Wizard import WizardSummary
from Screens.WizardLanguage import WizardLanguage
from Screens.Wizard import wizardManager
from Screens.Rc import Rc
from Screens.Screen import Screen
from Components.Label import Label
from Components.MenuList import MenuList
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
from Tools.Directories import fileExists, resolveFilename, SCOPE_PLUGINS
from Components.Pixmap import Pixmap, MovingPixmap, MultiPixmap
from os import popen, path, makedirs, listdir, access, stat, rename, remove, W_OK, R_OK
from enigma import eEnv
from boxbranding import getBoxType
from Components.config import config, getConfigListEntry, ConfigSubsection, ConfigText, ConfigLocations, ConfigBoolean
from Components.Harddisk import harddiskmanager
boxtype = getBoxType()
config.misc.firstrun = ConfigBoolean(default = True)
config.plugins.configurationbackup = ConfigSubsection()
if boxtype == "odinm9" or boxtype == "maram9" or boxtype == "odinm7" or boxtype == "odinm6":
config.plugins.configurationbackup.backuplocation = ConfigText(default = '/media/backup/', visible_width = 50, fixed_size = False)
else:
config.plugins.configurationbackup.backuplocation = ConfigText(default = '/media/hdd/', visible_width = 50, fixed_size = False)
config.plugins.configurationbackup.backupdirs = ConfigLocations(default=[eEnv.resolve('${sysconfdir}/enigma2/'), '/etc/CCcam.cfg', '/etc/network/interfaces', '/etc/wpa_supplicant.conf', '/etc/wpa_supplicant.ath0.conf', '/etc/wpa_supplicant.wlan0.conf', '/etc/resolv.conf', '/etc/default_gw', '/etc/hostname'])
backupfile = "enigma2settingsbackup.tar.gz"
def checkConfigBackup():
parts = [ (r.description, r.mountpoint) for r in harddiskmanager.getMountedPartitions(onlyhotplug = False)]
if boxtype == "odinm9" or boxtype == "maram9" or boxtype == "odinm7" or boxtype == "odinm6":
parts.append(('mtd backup','/media/backup'))
for x in parts:
if x[1] == '/':
parts.remove(x)
if len(parts):
for x in parts:
if x[1].endswith('/'):
fullbackupfile = x[1] + 'backup_' + boxtype + '/' + backupfile
if fileExists(fullbackupfile):
config.plugins.configurationbackup.backuplocation.setValue(str(x[1]))
config.plugins.configurationbackup.backuplocation.save()
config.plugins.configurationbackup.save()
return x
fullbackupfile = x[1] + 'backup/' + backupfile
if fileExists(fullbackupfile):
config.plugins.configurationbackup.backuplocation.setValue(str(x[1]))
config.plugins.configurationbackup.backuplocation.save()
config.plugins.configurationbackup.save()
return x
else:
fullbackupfile = x[1] + '/backup_' + boxtype + '/' + backupfile
if fileExists(fullbackupfile):
config.plugins.configurationbackup.backuplocation.setValue(str(x[1]))
config.plugins.configurationbackup.backuplocation.save()
config.plugins.configurationbackup.save()
return x
fullbackupfile = x[1] + '/backup/' + backupfile
if fileExists(fullbackupfile):
config.plugins.configurationbackup.backuplocation.setValue(str(x[1]))
config.plugins.configurationbackup.backuplocation.save()
config.plugins.configurationbackup.save()
return x
return None
def checkBackupFile():
backuplocation = config.plugins.configurationbackup.backuplocation.getValue()
if backuplocation.endswith('/'):
fullbackupfile = backuplocation + 'backup_' + boxtype + '/' + backupfile
if fileExists(fullbackupfile):
return True
else:
fullbackupfile = backuplocation + 'backup/' + backupfile
if fileExists(fullbackupfile):
return True
else:
return False
else:
fullbackupfile = backuplocation + '/backup_' + boxtype + '/' + backupfile
if fileExists(fullbackupfile):
return True
else:
fullbackupfile = backuplocation + '/backup/' + backupfile
if fileExists(fullbackupfile):
return True
else:
return False
if checkConfigBackup() is None:
backupAvailable = 0
else:
backupAvailable = 1
class ImageWizard(WizardLanguage, Rc):
skin = """
<screen name="ImageWizard" position="0,0" size="720,576" title="Welcome..." flags="wfNoBorder" >
<widget name="text" position="153,40" size="340,330" font="Regular;22" />
<widget source="list" render="Listbox" position="43,340" size="490,180" scrollbarMode="showOnDemand" >
<convert type="StringList" />
</widget>
<widget name="config" position="53,340" zPosition="1" size="440,180" transparent="1" scrollbarMode="showOnDemand" />
<ePixmap pixmap="buttons/button_red.png" position="40,225" zPosition="0" size="15,16" transparent="1" alphatest="on" />
<widget name="languagetext" position="55,225" size="95,30" font="Regular;18" />
<widget name="wizard" pixmap="wizard.png" position="40,50" zPosition="10" size="110,174" alphatest="on" />
<widget name="rc" pixmaps="rc.png,rcold.png" position="530,50" zPosition="10" size="154,500" alphatest="on" />
<widget name="arrowdown" pixmap="arrowdown.png" position="-100,-100" zPosition="11" size="37,70" alphatest="on" />
<widget name="arrowdown2" pixmap="arrowdown.png" position="-100,-100" zPosition="11" size="37,70" alphatest="on" />
<widget name="arrowup" pixmap="arrowup.png" position="-100,-100" zPosition="11" size="37,70" alphatest="on" />
<widget name="arrowup2" pixmap="arrowup.png" position="-100,-100" zPosition="11" size="37,70" alphatest="on" />
</screen>"""
def __init__(self, session):
self.xmlfile = resolveFilename(SCOPE_PLUGINS, "SystemPlugins/SoftwareManager/imagewizard.xml")
WizardLanguage.__init__(self, session, showSteps = False, showStepSlider = False)
Rc.__init__(self)
self.session = session
self["wizard"] = Pixmap()
Screen.setTitle(self, _("Welcome..."))
self.selectedDevice = None
def markDone(self):
pass
def listDevices(self):
list = [ (r.description, r.mountpoint) for r in harddiskmanager.getMountedPartitions(onlyhotplug = False)]
for x in list:
result = access(x[1], W_OK) and access(x[1], R_OK)
if result is False or x[1] == '/':
list.remove(x)
for x in list:
if x[1].startswith('/autofs/'):
list.remove(x)
return list
def deviceSelectionMade(self, index):
self.deviceSelect(index)
def deviceSelectionMoved(self):
self.deviceSelect(self.selection)
def deviceSelect(self, device):
self.selectedDevice = device
config.plugins.configurationbackup.backuplocation.setValue(self.selectedDevice)
config.plugins.configurationbackup.backuplocation.save()
config.plugins.configurationbackup.save()
if config.misc.firstrun.getValue():
wizardManager.registerWizard(ImageWizard, backupAvailable, priority = 10)
| Mariusz1970/enigma2 | lib/python/Plugins/SystemPlugins/SoftwareManager/ImageWizard.py | Python | gpl-2.0 | 6,666 |
"""
Migration script to add an inheritable column to the following tables:
library_info_association, library_folder_info_association.
Also, in case of sqlite check if the previous migration script deleted the
request table and if so, restore the table.
"""
from sqlalchemy import *
from migrate import *
from migrate.changeset import *
from galaxy.model.custom_types import *
import datetime
now = datetime.datetime.utcnow
import logging
log = logging.getLogger( __name__ )
metadata = MetaData()
def get_false_value(migrate_engine):
if migrate_engine.name == 'sqlite':
return '0'
else:
return 'false'
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
#
# In case of sqlite, check if the previous migration script deleted the
# request table and if so, restore the table.
#
if migrate_engine.name == 'sqlite':
if not migrate_engine.has_table('request'):
# load the tables referenced in foreign keys
metadata.reflect(only=['form_values', 'request_type', 'galaxy_user'])
# create a temporary table
Request_table = Table( 'request', metadata,
Column( "id", Integer, primary_key=True),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "name", TrimmedString( 255 ), nullable=False ),
Column( "desc", TEXT ),
Column( "form_values_id", Integer, ForeignKey( "form_values.id" ), index=True ),
Column( "request_type_id", Integer, ForeignKey( "request_type.id" ), index=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "deleted", Boolean, index=True, default=False ) )
try:
Request_table.create()
except Exception, e:
log.debug( "Creating request table failed: %s" % str( e ) )
metadata.reflect()
LibraryInfoAssociation_table = Table( "library_info_association", metadata, autoload=True )
c = Column( "inheritable", Boolean, index=True, default=False )
c.create( LibraryInfoAssociation_table, index_name='ix_library_info_association_inheritable')
assert c is LibraryInfoAssociation_table.c.inheritable
cmd = "UPDATE library_info_association SET inheritable = %s" % get_false_value(migrate_engine)
try:
migrate_engine.execute( cmd )
except Exception, e:
log.debug( "Setting value of column inheritable to false in library_info_association failed: %s" % ( str( e ) ) )
LibraryFolderInfoAssociation_table = Table( "library_folder_info_association", metadata, autoload=True )
c = Column( "inheritable", Boolean, index=True, default=False )
c.create( LibraryFolderInfoAssociation_table, index_name='ix_library_folder_info_association_inheritable')
assert c is LibraryFolderInfoAssociation_table.c.inheritable
cmd = "UPDATE library_folder_info_association SET inheritable = %s" % get_false_value(migrate_engine)
try:
migrate_engine.execute( cmd )
except Exception, e:
log.debug( "Setting value of column inheritable to false in library_folder_info_association failed: %s" % ( str( e ) ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
pass
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/model/migrate/versions/0038_add_inheritable_column_to_library_template_assoc_tables.py | Python | gpl-3.0 | 3,569 |
"""This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from thrift.protocol import TBinaryProtocol
from thrift.transport import TSocket
from thrift.transport import TTransport
from osquery.extensions.ExtensionManager import Client
DEFAULT_SOCKET_PATH = "/var/osquery/osquery.em"
"""The default path for osqueryd sockets"""
class ExtensionClient(object):
"""A client for connecting to an existing extension manager socket"""
_protocol = None
_transport = None
def __init__(self, path=DEFAULT_SOCKET_PATH, uuid=None):
"""
Keyword arguments:
path -- the path of the extension socket to connect to
uuid -- the additional UUID to use when constructing the socket path
"""
self.path = path
if uuid:
self.path += ".%s" % str(uuid)
sock = TSocket.TSocket(unix_socket=self.path)
self._transport = TTransport.TBufferedTransport(sock)
self._protocol = TBinaryProtocol.TBinaryProtocol(self._transport)
def close(self):
"""Close the extension client connection"""
if self._transport:
self._transport.close()
def open(self):
"""Attempt to open the UNIX domain socket"""
self._transport.open()
def extension_manager_client(self):
"""Return an extension manager (osquery core) client."""
return Client(self._protocol)
def extension_client(self):
"""Return an extension (osquery extension) client."""
return Client(self._protocol)
| glensc/osquery-python | osquery/extension_client.py | Python | bsd-3-clause | 1,845 |
import discord
from discord.ext import commands
class Mycog:
"""My custom cog that does stuff!"""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def mycom(self):
"""This does stuff!"""
#Your code will go here
await self.bot.say("I can do stuff!")
def setup(bot):
bot.add_cog(Mycog(bot))
| heartsdale/heartsdale-cogs | limin.py | Python | mit | 363 |
# by amounra 0413 : http://www.aumhaa.com
import Live
from _Framework.ButtonElement import ButtonElement
from _Framework.InputControlElement import InputControlElement
from _Framework.NotifyingControlElement import NotifyingControlElement
MIDI_NOTE_TYPE = 0
MIDI_CC_TYPE = 1
MIDI_PB_TYPE = 2
MIDI_MSG_TYPES = (MIDI_NOTE_TYPE,
MIDI_CC_TYPE,
MIDI_PB_TYPE)
MIDI_NOTE_ON_STATUS = 144
MIDI_NOTE_OFF_STATUS = 128
MIDI_CC_STATUS = 176
MIDI_PB_STATUS = 224
class MonoButtonElement(ButtonElement):
__module__ = __name__
__doc__ = ' Special button class that can be configured with custom on- and off-values, some of which flash at specified intervals called by _Update_Display'
def __init__(self, is_momentary, msg_type, channel, identifier, name, cs, *a, **k):
super(MonoButtonElement, self).__init__(is_momentary, msg_type, channel, identifier, *a, **k)
self.name = name
self._script = cs
self._color_map = [2, 64, 4, 8, 16, 127, 32]
self._num_colors = 7
self._num_flash_states = 18
self._flash_state = 0
self._color = 0
self._on_value = 127
self._off_value = 0
self._darkened = 0
self._is_enabled = True
self._is_notifying = False
self._force_next_value = False
self._parameter = None
self._report_input = True
def set_color_map(self, color_map):
assert isinstance(color_map, tuple)
assert len(color_map) > 0
self._num_colors = len(color_map)
self._num_flash_states = int(127/len(color_map))
self._color_map = color_map
def set_on_off_values(self, on_value, off_value):
assert (on_value in range(128))
assert (off_value in range(128))
self._last_sent_message = None
self._on_value = on_value
self._off_value = off_value
def set_on_value(self, value):
assert (value in range(128))
self._last_sent_message = None
self._on_value = value
def set_off_value(self, value):
assert (value in range(128))
self._last_sent_message = None
self._off_value = value
def set_force_next_value(self):
self._last_sent_message = None
self._force_next_value = True
def set_enabled(self, enabled):
self._is_enabled = enabled
self._request_rebuild()
def turn_on(self, force = False):
self.force_next_send()
self.send_value(self._on_value)
def turn_off(self, force = False):
self.force_next_send()
self.send_value(self._off_value)
def reset(self, force = False):
self.force_next_send()
self.send_value(0)
def receive_value(self, value):
self._last_sent_message = None
ButtonElement.receive_value(self, value)
def send_value(self, value, force = False): #commented this because of ButtonElement==NoneType errors in log
if(type(self) != type(None)):
assert (value != None)
assert isinstance(value, int)
assert (value in range(128))
if (force or self._force_next_send or ((value != self._last_sent_value) and self._is_being_forwarded)):
data_byte1 = self._original_identifier
if value in range(1, 127):
data_byte2 = self._color_map[(value - 1) % (self._num_colors)]
elif value == 127:
data_byte2 = self._color_map[self._num_colors-1]
else:
data_byte2 = self._darkened
self._color = data_byte2
status_byte = self._original_channel
if (self._msg_type == MIDI_NOTE_TYPE):
status_byte += MIDI_NOTE_ON_STATUS
elif (self._msg_type == MIDI_CC_TYPE):
status_byte += MIDI_CC_STATUS
else:
assert False
self.send_midi(tuple([status_byte,
data_byte1,
data_byte2]))
self._last_sent_message = [value]
if self._report_output:
is_input = True
self._report_value(value, (not is_input))
self._flash_state = round((value -1)/self._num_colors)
self._force_next_value = False
def script_wants_forwarding(self):
if not self._is_enabled:
return False
else:
return InputControlElement.script_wants_forwarding(self)
def flash(self, timer):
if (self._is_being_forwarded and self._flash_state in range(1, self._num_flash_states) and (timer % self._flash_state) == 0):
data_byte1 = self._original_identifier
data_byte2 = self._color * int((timer % (self._flash_state * 2)) > 0)
status_byte = self._original_channel
if (self._msg_type == MIDI_NOTE_TYPE):
status_byte += MIDI_NOTE_ON_STATUS
elif (self._msg_type == MIDI_CC_TYPE):
status_byte += MIDI_CC_STATUS
else:
assert False
self.send_midi((status_byte,
data_byte1,
data_byte2))
| jim-cooley/abletonremotescripts | remote-scripts/samples/Twister Ableton Script v1.2.2/_Mono_Framework/MonoButtonElement.py | Python | apache-2.0 | 4,389 |
"""Transformer Config builder library."""
from third_party.flax_examples import transformer_modules
def make_transformer_config(config, vocab_size, deterministic):
"""Returns a transformer_modules.TransformerConfig to spec."""
return make_transformer_config_num_layers(
config.transformer_num_layers, config, vocab_size, deterministic)
def make_transformer_config_num_layers(num_layers, config, vocab_size, deterministic):
"""Returns a transformer_modules.TransformerConfig to spec."""
return transformer_modules.TransformerConfig(
vocab_size=vocab_size,
output_vocab_size=vocab_size,
emb_dim=config.transformer_emb_dim,
num_heads=config.transformer_num_heads,
num_layers=num_layers,
qkv_dim=config.transformer_qkv_dim,
mlp_dim=config.transformer_mlp_dim,
dropout_rate=config.transformer_dropout_rate,
attention_dropout_rate=config.transformer_attention_dropout_rate,
max_len=config.max_tokens,
deterministic=deterministic,
)
| google-research/runtime-error-prediction | core/modules/transformer_config_lib.py | Python | apache-2.0 | 1,013 |
#!/usr/bin/env python
# Summation of digits of 2^1000
# First, make sure python can handle it:
n = 2**1000
print n
print
leftover = n
accum = 0
while leftover >0:
accum += leftover % 10
leftover /= 10
print accum
| nayrbnayrb/projecteuler | 0016/0016.py | Python | gpl-3.0 | 223 |
# Volatility
# Copyright (C) 2010 Brendan Dolan-Gavitt
# Copyright (c) 2011 Michael Cohen <scudette@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Brendan Dolan-Gavitt
@license: GNU General Public License 2.0
@contact: brendandg@gatech.edu
@organization: Georgia Institute of Technology
"""
import os, struct, socket
import copy
import zipfile
import volatility.plugins
import volatility.plugins.overlays.basic as basic
import volatility.plugins.overlays.native_types as native_types
import volatility.exceptions as exceptions
import volatility.obj as obj
import volatility.debug as debug
import volatility.dwarf as dwarf
import volatility.plugins.linux.common as linux_common
import volatility.plugins.linux.flags as linux_flags
import volatility.addrspace as addrspace
import volatility.utils as utils
import volatility.protos as protos
x64_native_types = copy.deepcopy(native_types.x64_native_types)
x64_native_types['long'] = [8, '<q']
x64_native_types['unsigned long'] = [8, '<Q']
class LinuxPermissionFlags(basic.Flags):
"""A Flags object for printing vm_area_struct permissions
in a format like rwx or r-x"""
def __str__(self):
result = []
value = self.v()
keys = self.bitmap.keys()
keys.sort()
for k in keys:
if value & (1 << self.bitmap[k]):
result.append(k)
else:
result.append('-')
return ''.join(result)
def is_flag(self, flag):
return self.v() & (1 << self.bitmap[flag])
def is_executable(self):
return self.is_flag('x')
def is_readable(self):
return self.is_flag('r')
def is_writable(self):
return self.is_flag('w')
linux_overlay = {
'task_struct' : [None, {
'comm' : [ None , ['String', dict(length = 16)]],
}],
'in_ifaddr' : [None, {
'ifa_label' : [ None , ['String', dict(length = 16)]],
}],
'module' : [None, {
'name' : [ None , ['String', dict(length = 60)]],
}],
'super_block' : [None, {
's_id' : [ None , ['String', dict(length = 32)]],
}],
'net_device' : [None, {
'name' : [ None , ['String', dict(length = 16)]],
}],
'sockaddr_un' : [None, {
'sun_path' : [ None , ['String', dict(length = 108)]],
}],
'hlist_head' : [None, {
'first' : [ None , ['pointer', ['hlist_node']]],
}],
'tty_struct' : [None, {
'name' : [ None , ['String', dict(length = 64)]],
}],
'dentry' : [None, {
'd_u' : [ None , ['list_head', {}]],
}],
'VOLATILITY_MAGIC': [None, {
'DTB' : [ 0x0, ['VolatilityDTB', dict(configname = "DTB")]],
'ArmValidAS' : [ 0x0, ['VolatilityLinuxARMValidAS']],
'IA32ValidAS' : [ 0x0, ['VolatilityLinuxIntelValidAS']],
'AMD64ValidAS' : [ 0x0, ['VolatilityLinuxIntelValidAS']],
}],
'vm_area_struct' : [ None, {
'vm_flags' : [ None, ['LinuxPermissionFlags', {'bitmap': {'r': 0, 'w': 1, 'x': 2}}]],
'vm_end' : [ None , ['unsigned long']],
'vm_start' : [ None , ['unsigned long']],
}],
}
intel_overlay = {
'cpuinfo_x86' : [None, {
'x86_model_id' : [ None , ['String', dict(length = 64)]],
'x86_vendor_id' : [ None, ['String', dict(length = 16)]],
}],
}
def parse_system_map(data, module):
"""Parse the symbol file."""
sys_map = {}
sys_map[module] = {}
mem_model = None
arch = "x86"
# get the system map
for line in data.splitlines():
try:
(str_addr, symbol_type, symbol) = line.strip().split()
except ValueError:
continue
try:
sym_addr = long(str_addr, 16)
except ValueError:
continue
if symbol == "arm_syscall":
arch = "ARM"
if not symbol in sys_map[module]:
sys_map[module][symbol] = []
sys_map[module][symbol].append([sym_addr, symbol_type])
mem_model = str(len(str_addr) * 4) + "bit"
if mem_model == "64bit" and arch == "x86":
arch = "x64"
return arch, mem_model, sys_map
def LinuxProfileFactory(profpkg):
""" Takes in a zip file, spits out a LinuxProfile class
The zipfile should include at least one .dwarf file
and the appropriate system.map file.
To generate a suitable dwarf file:
dwarfdump -di vmlinux > output.dwarf
"""
dwarfdata = None
sysmapdata = None
# XXX Do we want to initialize this
memmodel, arch = "32bit", "x86"
profilename = os.path.splitext(os.path.basename(profpkg.filename))[0]
for f in profpkg.filelist:
if f.filename.lower().endswith('.dwarf'):
dwarfdata = profpkg.read(f.filename)
elif 'system.map' in f.filename.lower():
sysmapdata = profpkg.read(f.filename)
arch, memmodel, sysmap = parse_system_map(profpkg.read(f.filename), "kernel")
if memmodel == "64bit":
arch = "x64"
if not sysmapdata or not dwarfdata:
# Might be worth throwing an exception here?
return None
class AbstractLinuxProfile(obj.Profile):
__doc__ = "A Profile for Linux " + profilename + " " + arch
_md_os = "linux"
_md_memory_model = memmodel
_md_arch = arch
# Override 64-bit native_types
native_mapping = {'32bit': native_types.x86_native_types,
'64bit': x64_native_types}
def __init__(self, *args, **kwargs):
# change the name to catch any code referencing the old hash table
self.sys_map = {}
obj.Profile.__init__(self, *args, **kwargs)
def clear(self):
"""Clear out the system map, and everything else"""
self.sys_map = {}
obj.Profile.clear(self)
def reset(self):
"""Reset the vtypes, sysmap and apply modifications, then compile"""
self.clear()
self.load_vtypes()
self.load_sysmap()
self.load_modifications()
self.compile()
def _merge_anonymous_members(self, vtypesvar):
members_index = 1
types_index = 1
offset_index = 0
try:
for candidate in vtypesvar:
done = False
while not done:
if any(member.startswith('__unnamed_') for member in vtypesvar[candidate][members_index]):
for member in vtypesvar[candidate][members_index].keys():
if member.startswith('__unnamed_'):
member_type = vtypesvar[candidate][members_index][member][types_index][0]
location = vtypesvar[candidate][members_index][member][offset_index]
vtypesvar[candidate][members_index].update(vtypesvar[member_type][members_index])
for name in vtypesvar[member_type][members_index].keys():
vtypesvar[candidate][members_index][name][offset_index] += location
del vtypesvar[candidate][members_index][member]
# Don't update done because we'll need to check if any
# of the newly imported types need merging
else:
done = True
except KeyError, e:
import pdb
pdb.set_trace()
raise exceptions.VolatilityException("Inconsistent linux profile - unable to look up " + str(e))
def load_vtypes(self):
"""Loads up the vtypes data"""
ntvar = self.metadata.get('memory_model', '32bit')
self.native_types = copy.deepcopy(self.native_mapping.get(ntvar))
vtypesvar = dwarf.DWARFParser(dwarfdata).finalize()
self._merge_anonymous_members(vtypesvar)
self.vtypes.update(vtypesvar)
debug.debug("{2}: Found dwarf file {0} with {1} symbols".format(f.filename, len(vtypesvar.keys()), profilename))
def load_sysmap(self):
"""Loads up the system map data"""
arch, _memmodel, sysmapvar = parse_system_map(sysmapdata, "kernel")
debug.debug("{2}: Found system file {0} with {1} symbols".format(f.filename, len(sysmapvar.keys()), profilename))
self.sys_map.update(sysmapvar)
def get_all_symbols(self, module = "kernel"):
""" Gets all the symbol tuples for the given module """
ret = []
symtable = self.sys_map
if module in symtable:
mod = symtable[module]
for (name, addrs) in mod.items():
ret.append(addrs)
else:
debug.info("All symbols requested for non-existent module %s" % module)
return ret
def get_all_addresses(self, module = "kernel"):
""" Gets all the symbol addresses for the given module """
# returns a hash table for quick looks
# the main use of this function is to see if an address is known
ret = {}
symbols = self.get_all_symbols(module)
for sym in symbols:
for (addr, addrtype) in sym:
ret[addr] = 1
return ret
def get_symbol_by_address(self, module, sym_address):
ret = ""
symtable = self.sys_map
mod = symtable[module]
for (name, addrs) in mod.items():
for (addr, addr_type) in addrs:
if sym_address == addr:
ret = name
break
return ret
def get_all_symbol_names(self, module = "kernel"):
symtable = self.sys_map
if module in symtable:
ret = symtable[module].keys()
else:
debug.error("get_all_symbol_names called on non-existent module")
return ret
def get_next_symbol_address(self, sym_name, module = "kernel"):
"""
This is used to find the address of the next symbol in the profile
For some data structures, we cannot determine their size automaticlaly so this
can be used to figure it out on the fly
"""
high_addr = 0xffffffffffffffff
table_addr = self.get_symbol(sym_name, module = module)
addrs = self.get_all_addresses(module = module)
for addr in addrs.keys():
if table_addr < addr < high_addr:
high_addr = addr
return high_addr
def get_symbol(self, sym_name, nm_type = "", module = "kernel"):
"""Gets a symbol out of the profile
sym_name -> name of the symbol
nm_tyes -> types as defined by 'nm' (man nm for examples)
module -> which module to get the symbol from, default is kernel, otherwise can be any name seen in 'lsmod'
This fixes a few issues from the old static hash table method:
1) Conflicting symbols can be handled, if a symbol is found to conflict on any profile,
then the plugin will need to provide the nm_type to differentiate, otherwise the plugin will be errored out
2) Can handle symbols gathered from modules on disk as well from the static kernel
symtable is stored as a hash table of:
symtable[module][sym_name] = [(symbol address, symbol type), (symbol addres, symbol type), ...]
The function has overly verbose error checking on purpose...
"""
symtable = self.sys_map
ret = None
# check if the module is there...
if module in symtable:
mod = symtable[module]
# check if the requested symbol is in the module
if sym_name in mod:
sym_list = mod[sym_name]
# if a symbol has multiple definitions, then the plugin needs to specify the type
if len(sym_list) > 1:
if nm_type == "":
debug.error("Requested symbol {0:s} in module {1:s} has multiple definitions and no type given\n".format(sym_name, module))
else:
for (addr, stype) in sym_list:
if stype == nm_type:
ret = addr
break
if ret == None:
debug.error("Requested symbol {0:s} in module {1:s} could not be found\n".format(sym_name, module))
else:
# get the address of the symbol
ret = sym_list[0][0]
else:
debug.debug("Requested symbol {0:s} not found in module {1:s}\n".format(sym_name, module))
else:
debug.info("Requested module {0:s} not found in symbol table\n".format(module))
return ret
cls = AbstractLinuxProfile
cls.__name__ = 'Linux' + profilename.replace('.', '_') + arch
return cls
################################
# Track down the zip files
# Push them through the factory
# Check whether ProfileModifications will work
new_classes = []
for path in set(volatility.plugins.__path__):
for path, _, files in os.walk(path):
for fn in files:
if zipfile.is_zipfile(os.path.join(path, fn)):
new_classes.append(LinuxProfileFactory(zipfile.ZipFile(os.path.join(path, fn))))
################################
# really 'file' but don't want to mess with python's version
class linux_file(obj.CType):
@property
def dentry(self):
if hasattr(self, "f_dentry"):
ret = self.f_dentry
else:
ret = self.f_path.dentry
return ret
@property
def vfsmnt(self):
if hasattr(self, "f_vfsmnt"):
ret = self.f_vfsmnt
else:
ret = self.f_path.mnt
return ret
# FIXME - walking backwards has not been thorougly tested
class hlist_node(obj.CType):
"""A hlist_node makes a doubly linked list."""
def list_of_type(self, obj_type, member, offset = -1, forward = True, head_sentinel = True):
if not self.is_valid():
return
## Get the first element
if forward:
nxt = self.next.dereference()
else:
nxt = self.pprev.dereference().dereference()
offset = self.obj_vm.profile.get_obj_offset(obj_type, member)
seen = set()
if head_sentinel:
# We're a header element and not to be included in the list
seen.add(self.obj_offset)
while nxt.is_valid() and nxt.obj_offset not in seen:
## Instantiate the object
item = obj.Object(obj_type, offset = nxt.obj_offset - offset,
vm = self.obj_vm,
parent = self.obj_parent,
name = obj_type)
seen.add(nxt.obj_offset)
yield item
if forward:
nxt = item.m(member).next.dereference()
else:
nxt = item.m(member).pprev.dereference().dereference()
def __nonzero__(self):
## List entries are valid when both Flinks and Blink are valid
return bool(self.next) or bool(self.pprev)
def __iter__(self):
return self.list_of_type(self.obj_parent.obj_name, self.obj_name)
class list_head(obj.CType):
"""A list_head makes a doubly linked list."""
def list_of_type(self, obj_type, member, offset = -1, forward = True, head_sentinel = True):
if not self.is_valid():
return
## Get the first element
if forward:
nxt = self.next.dereference()
else:
nxt = self.prev.dereference()
offset = self.obj_vm.profile.get_obj_offset(obj_type, member)
seen = set()
if head_sentinel:
# We're a header element and not to be included in the list
seen.add(self.obj_offset)
while nxt.is_valid() and nxt.obj_offset not in seen:
## Instantiate the object
item = obj.Object(obj_type, offset = nxt.obj_offset - offset,
vm = self.obj_vm,
parent = self.obj_parent,
name = obj_type)
seen.add(nxt.obj_offset)
yield item
if forward:
nxt = item.m(member).next.dereference()
else:
nxt = item.m(member).prev.dereference()
def __nonzero__(self):
## List entries are valid when both Flinks and Blink are valid
return bool(self.next) or bool(self.prev)
def __iter__(self):
return self.list_of_type(self.obj_parent.obj_name, self.obj_name)
class files_struct(obj.CType):
def get_fds(self):
if hasattr(self, "fdt"):
fdt = self.fdt
ret = fdt.fd.dereference()
else:
ret = self.fd.dereference()
return ret
def get_max_fds(self):
if hasattr(self, "fdt"):
ret = self.fdt.max_fds
else:
ret = self.max_fds
return ret
class kernel_param(obj.CType):
@property
def get(self):
if self.members.get("get"):
ret = self.m("get")
else:
ret = self.ops.get
return ret
class kparam_array(obj.CType):
@property
def get(self):
if self.members.get("get"):
ret = self.m("get")
else:
ret = self.ops.get
return ret
class gate_struct64(obj.CType):
@property
def Address(self):
low = self.offset_low
middle = self.offset_middle
high = self.offset_high
ret = (high << 32) | (middle << 16) | low
return ret
class desc_struct(obj.CType):
@property
def Address(self):
return (self.b & 0xffff0000) | (self.a & 0x0000ffff)
class module_sect_attr(obj.CType):
def get_name(self):
if type(self.m("name")) == obj.Array:
name = obj.Object("String", offset = self.m("name").obj_offset, vm = self.obj_vm, length = 32)
else:
name = self.name.dereference_as("String", length = 255)
return name
class inet_sock(obj.CType):
"""Class for an internet socket object"""
@property
def protocol(self):
"""Return the protocol string (i.e. IPv4, IPv6)"""
return protos.protos.get(self.sk.sk_protocol.v(), "UNKNOWN")
@property
def state(self):
state = self.sk.__sk_common.skc_state #pylint: disable-msg=W0212
return linux_flags.tcp_states[state]
@property
def src_port(self):
if hasattr(self, "sport"):
return socket.htons(self.sport)
elif hasattr(self, "inet_sport"):
return socket.htons(self.inet_sport)
else:
return None
@property
def dst_port(self):
if hasattr(self, "dport"):
return socket.htons(self.dport)
elif hasattr(self, "inet_dport"):
return socket.htons(self.inet_dport)
elif hasattr(self, "sk") and hasattr(self.sk, "__sk_common") and hasattr(self.sk.__sk_common, "skc_dport"):
return self.sk.__sk_common.skc_dport
else:
return None
@property
def src_addr(self):
if self.sk.__sk_common.skc_family == socket.AF_INET:
# FIXME: Consider using kernel version metadata rather than checking hasattr
if hasattr(self, "rcv_saddr"):
saddr = self.rcv_saddr
elif hasattr(self, "inet_rcv_saddr"):
saddr = self.inet_rcv_saddr
else:
saddr = self.sk.__sk_common.skc_rcv_saddr
return saddr.cast("IpAddress")
else:
return self.pinet6.saddr.cast("Ipv6Address")
@property
def dst_addr(self):
if self.sk.__sk_common.skc_family == socket.AF_INET:
# FIXME: Consider using kernel version metadata rather than checking hasattr
if hasattr(self, "daddr") and self.daddr:
daddr = self.daddr
elif hasattr(self, "inet_daddr") and self.inet_daddr:
daddr = self.inet_daddr
else:
daddr = self.sk.__sk_common.skc_daddr
return daddr.cast("IpAddress")
else:
return self.pinet6.daddr.cast("Ipv6Address")
class tty_ldisc(obj.CType):
@property
def ops(self):
check = self.members.get("ops")
if check:
ret = self.m('ops')
else:
ret = self
return ret
class in_device(obj.CType):
def devices(self):
cur = self.ifa_list
while cur != None and cur.is_valid():
yield cur
cur = cur.ifa_next
class net_device(obj.CType):
@property
def mac_addr(self):
if self.members.has_key("perm_addr"):
hwaddr = self.perm_addr
else:
hwaddr = self.dev_addr
macaddr = ":".join(["{0:02x}".format(x) for x in hwaddr][:6])
return macaddr
@property
def promisc(self):
return self.flags & 0x100 == 0x100 # IFF_PROMISC
class task_struct(obj.CType):
def is_valid_task(self):
ret = self.fs.v() != 0 and self.files.v() != 0
if ret and self.members.get("cred"):
ret = self.cred.is_valid()
return ret
@property
def uid(self):
ret = self.members.get("uid")
if ret is None:
ret = self.cred.uid
else:
ret = self.m("uid")
return ret
@property
def gid(self):
ret = self.members.get("gid")
if ret is None:
gid = self.cred.gid
if hasattr(gid, 'counter'):
ret = obj.Object("int", offset = gid.v(), vm = self.obj_vm)
else:
ret = gid
else:
ret = self.m("gid")
return ret
@property
def euid(self):
ret = self.members.get("euid")
if ret is None:
ret = self.cred.euid
else:
ret = self.m("euid")
return ret
def get_process_address_space(self):
## If we've got a NoneObject, return it maintain the reason
if self.mm.pgd.v() == None:
return self.mm.pgd.v()
directory_table_base = self.obj_vm.vtop(self.mm.pgd.v())
try:
process_as = self.obj_vm.__class__(
self.obj_vm.base, self.obj_vm.get_config(), dtb = directory_table_base)
except AssertionError, _e:
return obj.NoneObject("Unable to get process AS")
process_as.name = "Process {0}".format(self.pid)
return process_as
def get_proc_maps(self):
for vma in linux_common.walk_internal_list("vm_area_struct", "vm_next", self.mm.mmap):
yield vma
def search_process_memory(self, s, heap_only = False):
# Allow for some overlap in case objects are
# right on page boundaries
overlap = 1024
# Make sure s in a list. This allows you to search for
# multiple strings at once, without changing the API.
if type(s) != list:
debug.warning("Single strings to search_process_memory is deprecated, use a list instead")
s = [s]
scan_blk_sz = 1024 * 1024 * 10
addr_space = self.get_process_address_space()
for vma in self.get_proc_maps():
if heap_only:
if not (vma.vm_start <= self.mm.start_brk and vma.vm_end >= self.mm.brk):
continue
offset = vma.vm_start
out_of_range = vma.vm_start + (vma.vm_end - vma.vm_start)
while offset < out_of_range:
# Read some data and match it.
to_read = min(scan_blk_sz + overlap, out_of_range - offset)
data = addr_space.zread(offset, to_read)
if not data:
break
for x in s:
for hit in utils.iterfind(data, x):
yield offset + hit
offset += min(to_read, scan_blk_sz)
def ACTHZ(self, CLOCK_TICK_RATE, HZ):
LATCH = ((CLOCK_TICK_RATE + HZ/2) / HZ)
return self.SH_DIV(CLOCK_TICK_RATE, LATCH, 8)
def SH_DIV(self, NOM, DEN, LSH):
return ((NOM / DEN) << LSH) + (((NOM % DEN) << LSH) + DEN / 2) / DEN
def TICK_NSEC(self):
HZ = 1000
CLOCK_TICK_RATE = 1193182
return self.SH_DIV(1000000 * 1000, self.ACTHZ(CLOCK_TICK_RATE, HZ), 8)
def get_time_vars(self):
'''
Sometime in 3.[3-5], Linux switched to a global timekeeper structure
This just figures out which is in use and returns the correct variables
'''
wall_addr = self.obj_vm.profile.get_symbol("wall_to_monotonic")
sleep_addr = self.obj_vm.profile.get_symbol("total_sleep_time")
# old way
if wall_addr and sleep_addr:
wall = obj.Object("timespec", offset = wall_addr, vm = self.obj_vm)
timeo = obj.Object("timespec", offset = sleep_addr, vm = self.obj_vm)
elif wall_addr:
wall = obj.Object("timespec", offset = wall_addr, vm = self.obj_vm)
init_task_addr = self.obj_vm.profile.get_symbol("init_task")
init_task = obj.Object("task_struct", offset = init_task_addr, vm = self.obj_vm)
time_val = init_task.utime + init_task.stime
nsec = time_val * self.TICK_NSEC()
tv_sec = nsec / linux_common.nsecs_per
tv_nsec = nsec % linux_common.nsecs_per
timeo = linux_common.vol_timespec(tv_sec, tv_nsec)
# timekeeper way
else:
timekeeper_addr = self.obj_vm.profile.get_symbol("timekeeper")
timekeeper = obj.Object("timekeeper", offset = timekeeper_addr, vm = self.obj_vm)
wall = timekeeper.wall_to_monotonic
timeo = timekeeper.total_sleep_time
return (wall, timeo)
# based on 2.6.35 getboottime
def get_boot_time(self):
(wall, timeo) = self.get_time_vars()
secs = wall.tv_sec + timeo.tv_sec
nsecs = wall.tv_nsec + timeo.tv_nsec
secs = secs * -1
nsecs = nsecs * -1
while nsecs >= linux_common.nsecs_per:
nsecs = nsecs - linux_common.nsecs_per
secs = secs + 1
while nsecs < 0:
nsecs = nsecs + linux_common.nsecs_per
secs = secs - 1
boot_time = secs + (nsecs / linux_common.nsecs_per / 100)
return boot_time
def get_task_start_time(self):
start_time = self.start_time
start_secs = start_time.tv_sec + (start_time.tv_nsec / linux_common.nsecs_per / 100)
sec = self.get_boot_time() + start_secs
# convert the integer as little endian
try:
data = struct.pack("<I", sec)
except struct.error:
# in case we exceed 0 <= number <= 4294967295
return ""
bufferas = addrspace.BufferAddressSpace(self.obj_vm.get_config(), data = data)
dt = obj.Object("UnixTimeStamp", offset = 0, vm = bufferas, is_utc = True)
return dt
def get_commandline(self):
if self.mm:
# set the as with our new dtb so we can read from userland
proc_as = self.get_process_address_space()
# read argv from userland
start = self.mm.arg_start.v()
argv = proc_as.read(start, self.mm.arg_end - self.mm.arg_start)
if argv:
# split the \x00 buffer into args
name = " ".join(argv.split("\x00"))
else:
name = ""
else:
# kernel thread
name = "[" + self.comm + "]"
return name
class linux_fs_struct(obj.CType):
def get_root_dentry(self):
# < 2.6.26
if hasattr(self, "rootmnt"):
ret = self.root
else:
ret = self.root.dentry
return ret
def get_root_mnt(self):
# < 2.6.26
if hasattr(self, "rootmnt"):
ret = self.rootmnt
else:
ret = self.root.mnt
return ret
class super_block(obj.CType):
@property
def major(self):
return self.s_dev >> 20
@property
def minor(self):
return self.s_dev & ((1 << 20) - 1)
class inode(obj.CType):
def is_dir(self):
"""Mimic the S_ISDIR macro"""
return self.i_mode & linux_flags.S_IFMT == linux_flags.S_IFDIR
def is_reg(self):
"""Mimic the S_ISREG macro"""
return self.i_mode & linux_flags.S_IFMT == linux_flags.S_IFREG
class timespec(obj.CType):
def as_timestamp(self):
time_val = struct.pack("<I", self.tv_sec)
time_buf = addrspace.BufferAddressSpace(self.obj_vm.get_config(), data = time_val)
time_obj = obj.Object("UnixTimeStamp", offset = 0, vm = time_buf, is_utc = True)
return time_obj
class dentry(obj.CType):
def get_partial_path(self):
""" we can't get the full path b/c we
do not have a ref to the vfsmnt """
path = []
name = ""
dentry = self
while dentry and dentry != dentry.d_parent:
name = dentry.d_name.name.dereference_as("String", length = 255)
if name.is_valid():
path.append(str(name))
dentry = dentry.d_parent
path.reverse()
str_path = "/".join([p for p in path])
return str_path
class VolatilityDTB(obj.VolatilityMagic):
"""A scanner for DTB values."""
def generate_suggestions(self):
"""Tries to locate the DTB."""
shift = 0xc0000000
# this is the only code allowed to reference the internal sys_map!
yield self.obj_vm.profile.get_symbol("swapper_pg_dir") - shift
# the intel check, simply checks for the static paging of init_task
class VolatilityLinuxIntelValidAS(obj.VolatilityMagic):
"""An object to check that an address space is a valid Arm Paged space"""
def generate_suggestions(self):
init_task_addr = self.obj_vm.profile.get_symbol("init_task")
if self.obj_vm.profile.metadata.get('memory_model', '32bit') == "32bit":
shift = 0xc0000000
else:
shift = 0xffffffff80000000
yield self.obj_vm.vtop(init_task_addr) == init_task_addr - shift
# the ARM check, has to check multiple values b/c phones do not map RAM at 0
class VolatilityLinuxARMValidAS(obj.VolatilityMagic):
"""An object to check that an address space is a valid Arm Paged space"""
def generate_suggestions(self):
init_task_addr = self.obj_vm.profile.get_symbol("init_task")
do_fork_addr = self.obj_vm.profile.get_symbol("do_fork")
sym_addr_diff = (do_fork_addr - init_task_addr)
if self.obj_vm.profile.metadata.get('memory_model', '32bit') == "32bit":
shift = 0xc0000000
else:
shift = 0xffffffff80000000
task_paddr = self.obj_vm.vtop(init_task_addr)
fork_paddr = self.obj_vm.vtop(do_fork_addr)
if task_paddr and fork_paddr:
# these won't be zero due to RAM not at physical address 0
# but if the offset from 0 is the same across two paging operations
# then we have the right DTB
task_off = task_paddr - shift
fork_off = fork_paddr - shift
yield fork_off - task_off == sym_addr_diff
class LinuxObjectClasses(obj.ProfileModification):
conditions = {'os': lambda x: x == 'linux'}
before = ['BasicObjectClasses']
def modification(self, profile):
profile.object_classes.update({
'fs_struct': linux_fs_struct,
'file': linux_file,
'list_head': list_head,
'hlist_node': hlist_node,
'files_struct': files_struct,
'task_struct': task_struct,
'net_device' : net_device,
'in_device' : in_device,
'tty_ldisc' : tty_ldisc,
'module_sect_attr' : module_sect_attr,
'VolatilityDTB': VolatilityDTB,
'IpAddress': basic.IpAddress,
'Ipv6Address': basic.Ipv6Address,
'VolatilityLinuxIntelValidAS' : VolatilityLinuxIntelValidAS,
'VolatilityLinuxARMValidAS' : VolatilityLinuxARMValidAS,
'kernel_param' : kernel_param,
'kparam_array' : kparam_array,
'gate_struct64' : gate_struct64,
'desc_struct' : desc_struct,
'page': page,
'LinuxPermissionFlags': LinuxPermissionFlags,
'super_block' : super_block,
'inode' : inode,
'dentry' : dentry,
'timespec' : timespec,
'inet_sock' : inet_sock,
})
class LinuxOverlay(obj.ProfileModification):
conditions = {'os': lambda x: x == 'linux'}
before = ['BasicObjectClasses'] # , 'LinuxVTypes']
def modification(self, profile):
profile.merge_overlay(linux_overlay)
class LinuxIntelOverlay(obj.ProfileModification):
conditions = {'os': lambda x: x == 'linux',
'arch' : lambda x: x == 'x86' or x == 'x64'}
before = ['BasicObjectClasses'] # , 'LinuxVTypes']
def modification(self, profile):
profile.merge_overlay(intel_overlay)
class page(obj.CType):
def to_vaddr(self):
#FIXME Do it!
pass
def to_paddr(self):
mem_map_addr = self.obj_vm.profile.get_symbol("mem_map")
mem_section_addr = self.obj_vm.profile.get_symbol("mem_section")
if mem_map_addr:
# FLATMEM kernels, usually 32 bit
mem_map_ptr = obj.Object("Pointer", offset = mem_map_addr, vm = self.obj_vm, parent = self.obj_parent)
elif mem_section_addr:
# this is hardcoded in the kernel - VMEMMAPSTART, usually 64 bit kernels
mem_map_ptr = 0xffffea0000000000
else:
debug.error("phys_addr_of_page: Unable to determine physical address of page. NUMA is not supported at this time.\n")
phys_offset = (self.obj_offset - mem_map_ptr) / self.obj_vm.profile.get_obj_size("page")
phys_offset = phys_offset << 12
return phys_offset
class mount(obj.CType):
@property
def mnt_sb(self):
if hasattr(self, "mnt"):
ret = self.mnt.mnt_sb
else:
ret = self.mnt_sb
return ret
@property
def mnt_root(self):
if hasattr(self, "mnt"):
ret = self.mnt.mnt_root
else:
ret = self.mnt_root
return ret
@property
def mnt_flags(self):
if hasattr(self, "mnt"):
ret = self.mnt.mnt_flags
else:
ret = self.mnt_flags
return ret
class vfsmount(obj.CType):
def _get_real_mnt(self):
offset = self.obj_vm.profile.get_obj_offset("mount", "mnt")
mnt = obj.Object("mount", offset = self.obj_offset - offset, vm = self.obj_vm)
return mnt
@property
def mnt_parent(self):
ret = self.members.get("mnt_parent")
if ret is None:
ret = self._get_real_mnt().mnt_parent
else:
ret = self.m("mnt_parent")
return ret
@property
def mnt_mountpoint(self):
ret = self.members.get("mnt_mountpoint")
if ret is None:
ret = self._get_real_mnt().mnt_mountpoint
else:
ret = self.m("mnt_mountpoint")
return ret
class LinuxMountOverlay(obj.ProfileModification):
conditions = {'os': lambda x: x == 'linux'}
before = ['BasicObjectClasses'] # , 'LinuxVTypes']
def modification(self, profile):
if profile.vtypes.get("mount"):
profile.object_classes.update({'mount' : mount, 'vfsmount' : vfsmount})
| piniGitHUB/volatility | volatility/plugins/overlays/linux/linux.py | Python | gpl-2.0 | 37,298 |
"""
High-level libvirt test utility functions.
This module is meant to reduce code size by performing common test procedures.
Generally, code here should look like test code.
More specifically:
- Functions in this module should raise exceptions if things go wrong
- Functions in this module typically use functions and classes from
lower-level modules (e.g. utils_misc, qemu_vm, aexpect).
- Functions in this module should not be used by lower-level linux_modules.
- Functions in this module should be used in the right context.
For example, a function should not be used where it may display
misleading or inaccurate info or debug messages.
:copyright: 2014 Red Hat Inc.
"""
import re
import os
import ast
import logging
import shutil
import threading
import time
import sys
from avocado.core import exceptions
from avocado.utils import process
from avocado.utils import stacktrace
from avocado.utils import linux_modules
import aexpect
from .. import virsh
from .. import xml_utils
from .. import iscsi
from .. import nfs
from .. import data_dir
from .. import utils_misc
from .. import utils_selinux
from .. import libvirt_storage
from .. import utils_net
from .. import gluster
from .. import remote
from ..staging import lv_utils
from ..test_setup import LibvirtPolkitConfig
from ..utils_libvirtd import service_libvirtd_control
from ..libvirt_xml import vm_xml
from ..libvirt_xml import network_xml
from ..libvirt_xml import xcepts
from ..libvirt_xml import NetworkXML
from ..libvirt_xml import IPXML
from ..libvirt_xml import pool_xml
from ..libvirt_xml import nwfilter_xml
from ..libvirt_xml.devices import disk
from ..libvirt_xml.devices import hostdev
from ..libvirt_xml.devices import controller
from ..libvirt_xml.devices import seclabel
from ..libvirt_xml.devices import channel
from __init__ import ping
class LibvirtNetwork(object):
"""
Class to create a temporary network for testing.
"""
def create_vnet_xml(self):
"""
Create XML for a virtual network.
"""
net_xml = NetworkXML()
net_xml.name = self.name
ip = IPXML(address=self.address)
net_xml.ip = ip
return self.address, net_xml
def create_macvtap_xml(self):
"""
Create XML for a macvtap network.
"""
net_xml = NetworkXML()
net_xml.name = self.name
net_xml.forward = {'mode': 'bridge', 'dev': self.iface}
ip = utils_net.get_ip_address_by_interface(self.iface)
return ip, net_xml
def create_bridge_xml(self):
"""
Create XML for a bridged network.
"""
net_xml = NetworkXML()
net_xml.name = self.name
net_xml.forward = {'mode': 'bridge'}
net_xml.bridge = {'name': self.iface}
ip = utils_net.get_ip_address_by_interface(self.iface)
return ip, net_xml
def __init__(self, net_type, address=None, iface=None, net_name=None,
persistent=False):
if net_name is None:
self.name = 'virt-test-%s' % net_type
else:
self.name = net_name
self.address = address
self.iface = iface
self.persistent = persistent
if net_type == 'vnet':
if not self.address:
raise exceptions.TestError('Create vnet need address be set')
self.ip, net_xml = self.create_vnet_xml()
elif net_type == 'macvtap':
if not self.iface:
raise exceptions.TestError('Create macvtap need iface be set')
self.ip, net_xml = self.create_macvtap_xml()
elif net_type == 'bridge':
if not self.iface:
raise exceptions.TestError('Create bridge need iface be set')
self.ip, net_xml = self.create_bridge_xml()
else:
raise exceptions.TestError(
'Unknown libvirt network type %s' % net_type)
if self.persistent:
net_xml.define()
net_xml.start()
else:
net_xml.create()
def cleanup(self):
"""
Clear up network.
"""
virsh.net_destroy(self.name)
if self.persistent:
virsh.net_undefine(self.name)
def cpus_parser(cpulist):
"""
Parse a list of cpu list, its syntax is a comma separated list,
with '-' for ranges and '^' denotes exclusive.
:param cpulist: a list of physical CPU numbers
"""
hyphens = []
carets = []
commas = []
others = []
if cpulist is None:
return None
else:
if "," in cpulist:
cpulist_list = re.split(",", cpulist)
for cpulist in cpulist_list:
if "-" in cpulist:
tmp = re.split("-", cpulist)
hyphens = hyphens + range(int(tmp[0]), int(tmp[-1]) + 1)
elif "^" in cpulist:
tmp = re.split("\^", cpulist)[-1]
carets.append(int(tmp))
else:
try:
commas.append(int(cpulist))
except ValueError:
logging.error("The cpulist has to be an "
"integer. (%s)", cpulist)
elif "-" in cpulist:
tmp = re.split("-", cpulist)
hyphens = range(int(tmp[0]), int(tmp[-1]) + 1)
elif "^" in cpulist:
tmp = re.split("^", cpulist)[-1]
carets.append(int(tmp))
else:
try:
others.append(int(cpulist))
return others
except ValueError:
logging.error("The cpulist has to be an "
"integer. (%s)", cpulist)
cpus_set = set(hyphens).union(set(commas)).difference(set(carets))
return sorted(list(cpus_set))
def cpus_string_to_affinity_list(cpus_string, num_cpus):
"""
Parse the cpus_string string to a affinity list.
e.g
host_cpu_count = 4
0 --> [y,-,-,-]
0,1 --> [y,y,-,-]
0-2 --> [y,y,y,-]
0-2,^2 --> [y,y,-,-]
r --> [y,y,y,y]
"""
# Check the input string.
single_pattern = r"\d+"
between_pattern = r"\d+-\d+"
exclude_pattern = r"\^\d+"
sub_pattern = r"(%s)|(%s)|(%s)" % (exclude_pattern,
single_pattern, between_pattern)
pattern = r"^((%s),)*(%s)$" % (sub_pattern, sub_pattern)
if not re.match(pattern, cpus_string):
logging.debug("Cpus_string=%s is not a supported format for cpu_list."
% cpus_string)
# Init a list for result.
affinity = []
for i in range(int(num_cpus)):
affinity.append('-')
# Letter 'r' means all cpus.
if cpus_string == "r":
for i in range(len(affinity)):
affinity[i] = "y"
return affinity
# Split the string with ','.
sub_cpus = cpus_string.split(",")
# Parse each sub_cpus.
for cpus in sub_cpus:
if "-" in cpus:
minmum = cpus.split("-")[0]
maxmum = cpus.split("-")[-1]
for i in range(int(minmum), int(maxmum) + 1):
affinity[i] = "y"
elif "^" in cpus:
affinity[int(cpus.strip("^"))] = "-"
else:
affinity[int(cpus)] = "y"
return affinity
def cpu_allowed_list_by_task(pid, tid):
"""
Get the Cpus_allowed_list in status of task.
"""
cmd = "cat /proc/%s/task/%s/status|grep Cpus_allowed_list:| awk '{print $2}'" % (
pid, tid)
result = process.run(cmd, ignore_status=True)
if result.exit_status:
return None
return result.stdout.strip()
def clean_up_snapshots(vm_name, snapshot_list=[]):
"""
Do recovery after snapshot
:param vm_name: Name of domain
:param snapshot_list: The list of snapshot name you want to remove
"""
if not snapshot_list:
# Get all snapshot names from virsh snapshot-list
snapshot_list = virsh.snapshot_list(vm_name)
# Get snapshot disk path
for snap_name in snapshot_list:
# Delete useless disk snapshot file if exists
snap_xml = virsh.snapshot_dumpxml(vm_name,
snap_name).stdout.strip()
xtf_xml = xml_utils.XMLTreeFile(snap_xml)
disks_path = xtf_xml.findall('disks/disk/source')
for disk in disks_path:
os.system('rm -f %s' % disk.get('file'))
# Delete snapshots of vm
virsh.snapshot_delete(vm_name, snap_name)
else:
# Get snapshot disk path from domain xml because
# there is no snapshot info with the name
dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name).xmltreefile
disk_path = dom_xml.find('devices/disk/source').get('file')
for name in snapshot_list:
snap_disk_path = disk_path.split(".")[0] + "." + name
os.system('rm -f %s' % snap_disk_path)
def get_all_cells():
"""
Use virsh freecell --all to get all cells on host
::
# virsh freecell --all
0: 124200 KiB
1: 1059868 KiB
--------------------
Total: 1184068 KiB
That would return a dict like:
::
cell_dict = {"0":"124200 KiB", "1":"1059868 KiB", "Total":"1184068 KiB"}
:return: cell_dict
"""
fc_result = virsh.freecell(options="--all", ignore_status=True)
if fc_result.exit_status:
if fc_result.stderr.count("NUMA not supported"):
raise exceptions.TestNAError(fc_result.stderr.strip())
else:
raise exceptions.TestFail(fc_result.stderr.strip())
output = fc_result.stdout.strip()
cell_list = output.splitlines()
# remove "------------" line
del cell_list[-2]
cell_dict = {}
for cell_line in cell_list:
cell_info = cell_line.split(":")
cell_num = cell_info[0].strip()
cell_mem = cell_info[-1].strip()
cell_dict[cell_num] = cell_mem
return cell_dict
def check_blockjob(vm_name, target, check_point="none", value="0"):
"""
Run blookjob command to check block job progress, bandwidth, ect.
:param vm_name: Domain name
:param target: Domian disk target dev
:param check_point: Job progrss, bandwidth or none(no job)
:param value: Value of progress, bandwidth or 0(no job)
:return: Boolean value, true for pass, false for fail
"""
if check_point not in ["progress", "bandwidth", "none"]:
logging.error("Check point must be: progress, bandwidth or none")
return False
try:
cmd_result = virsh.blockjob(
vm_name, target, "--info", ignore_status=True)
output = cmd_result.stdout.strip()
err = cmd_result.stderr.strip()
status = cmd_result.exit_status
except:
raise exceptions.TestFail("Error occur when running blockjob command.")
if status == 0:
# libvirt print job progress to stderr
if not len(err):
logging.debug("No block job find")
if check_point == "none":
return True
else:
if check_point == "none":
logging.error("Expect no job but find block job:\n%s", err)
elif check_point == "progress":
progress = value + " %"
if re.search(progress, err):
return True
elif check_point == "bandwidth":
bandwidth = value + " MiB/s"
if bandwidth == output.split(':')[1].strip():
logging.debug("Bandwidth is equal to %s", bandwidth)
return True
else:
logging.error("Bandwidth is not equal to %s", bandwidth)
else:
logging.error("Run blockjob command fail")
return False
def setup_or_cleanup_nfs(is_setup, mount_dir="nfs-mount", is_mount=False,
export_options="rw,no_root_squash",
mount_options="rw",
export_dir="nfs-export",
restore_selinux=""):
"""
Set SElinux to "permissive" and Set up nfs service on localhost.
Or clean up nfs service on localhost and restore SElinux.
Note: SElinux status must be backed up and restored after use.
Example:
# Setup NFS.
res = setup_or_cleanup_nfs(is_setup=True)
# Backup SELinux status.
selinux_bak = res["selinux_status_bak"]
# Do something.
...
# Cleanup NFS and restore NFS.
res = setup_or_cleanup_nfs(is_setup=False, restore_selinux=selinux_bak)
:param is_setup: Boolean value, true for setup, false for cleanup
:param mount_dir: NFS mount dir. This can be an absolute path on the
host or a relative path origin from libvirt tmp dir.
Default to "nfs-mount".
:param is_mount: Boolean value, Whether the target NFS should be mounted.
:param export_options: Options for nfs dir. Default to "nfs-export".
:param mount_options: Options for mounting nfs dir. Default to "rw".
:param export_dir: NFS export dir. This can be an absolute path on the
host or a relative path origin from libvirt tmp dir.
Default to "nfs-export".
:return: A dict contains export and mount result parameters:
export_dir: Absolute directory of exported local NFS file system.
mount_dir: Absolute directory NFS file system mounted on.
selinux_status_bak: SELinux status before set
"""
result = {}
tmpdir = data_dir.get_tmp_dir()
if not os.path.isabs(export_dir):
export_dir = os.path.join(tmpdir, export_dir)
if not os.path.isabs(mount_dir):
mount_dir = os.path.join(tmpdir, mount_dir)
result["export_dir"] = export_dir
result["mount_dir"] = mount_dir
result["selinux_status_bak"] = utils_selinux.get_status()
nfs_params = {"nfs_mount_dir": mount_dir, "nfs_mount_options": mount_options,
"nfs_mount_src": export_dir, "setup_local_nfs": "yes",
"export_options": export_options}
_nfs = nfs.Nfs(nfs_params)
if is_setup:
# Set selinux to permissive that the file in nfs
# can be used freely
if utils_selinux.is_enforcing():
utils_selinux.set_status("permissive")
_nfs.setup()
if not is_mount:
_nfs.umount()
del result["mount_dir"]
else:
if restore_selinux:
utils_selinux.set_status(restore_selinux)
_nfs.unexportfs_in_clean = True
_nfs.cleanup()
return result
def setup_or_cleanup_iscsi(is_setup, is_login=True,
emulated_image="emulated-iscsi", image_size="1G",
chap_user="", chap_passwd="", restart_tgtd="no"):
"""
Set up(and login iscsi target) or clean up iscsi service on localhost.
:param is_setup: Boolean value, true for setup, false for cleanup
:param is_login: Boolean value, true for login, false for not login
:param emulated_image: name of iscsi device
:param image_size: emulated image's size
:param chap_user: CHAP authentication username
:param chap_passwd: CHAP authentication password
:return: iscsi device name or iscsi target
"""
tmpdir = os.path.join(data_dir.get_root_dir(), 'tmp')
emulated_path = os.path.join(tmpdir, emulated_image)
emulated_target = ("iqn.%s.com.virttest:%s.target" %
(time.strftime("%Y-%m"), emulated_image))
iscsi_params = {"emulated_image": emulated_path, "target": emulated_target,
"image_size": image_size, "iscsi_thread_id": "virt",
"chap_user": chap_user, "chap_passwd": chap_passwd,
"restart_tgtd": restart_tgtd}
_iscsi = iscsi.Iscsi.create_iSCSI(iscsi_params)
if is_setup:
if is_login:
_iscsi.login()
# The device doesn't necessarily appear instantaneously, so give
# about 5 seconds for it to appear before giving up
iscsi_device = utils_misc.wait_for(_iscsi.get_device_name, 5, 0, 1,
"Searching iscsi device name.")
if iscsi_device:
logging.debug("iscsi device: %s", iscsi_device)
return iscsi_device
if not iscsi_device:
logging.error("Not find iscsi device.")
# Cleanup and return "" - caller needs to handle that
# _iscsi.export_target() will have set the emulated_id and
# export_flag already on success...
_iscsi.cleanup()
process.run("rm -f %s" % emulated_path)
else:
_iscsi.export_target()
return emulated_target
else:
_iscsi.export_flag = True
_iscsi.emulated_id = _iscsi.get_target_id()
_iscsi.cleanup()
process.run("rm -f %s" % emulated_path)
process.run("vgscan --cache", ignore_status=True)
return ""
def get_host_ipv4_addr():
"""
Get host ipv4 addr
"""
if_up = utils_net.get_net_if(state="UP")
for i in if_up:
ipv4_value = utils_net.get_net_if_addrs(i)["ipv4"]
logging.debug("ipv4_value is %s", ipv4_value)
if ipv4_value != []:
ip_addr = ipv4_value[0]
break
if ip_addr is not None:
logging.info("ipv4 address is %s", ip_addr)
else:
raise exceptions.TestFail("Fail to get ip address")
return ip_addr
def setup_or_cleanup_gluster(is_setup, vol_name, brick_path="", pool_name="",
file_path="/etc/glusterfs/glusterd.vol"):
"""
Set up or clean up glusterfs environment on localhost
:param is_setup: Boolean value, true for setup, false for cleanup
:param vol_name: gluster created volume name
:param brick_path: Dir for create glusterfs
:return: ip_addr or nothing
"""
try:
utils_misc.find_command("gluster")
except ValueError:
raise exceptions.TestNAError("Missing command 'gluster'")
if not brick_path:
tmpdir = os.path.join(data_dir.get_root_dir(), 'tmp')
brick_path = os.path.join(tmpdir, pool_name)
if is_setup:
ip_addr = get_host_ipv4_addr()
gluster.add_rpc_insecure(file_path)
gluster.glusterd_start()
logging.debug("finish start gluster")
gluster.gluster_vol_create(vol_name, ip_addr, brick_path, force=True)
gluster.gluster_allow_insecure(vol_name)
logging.debug("finish vol create in gluster")
return ip_addr
else:
gluster.gluster_vol_stop(vol_name, True)
gluster.gluster_vol_delete(vol_name)
gluster.gluster_brick_delete(brick_path)
return ""
def define_pool(pool_name, pool_type, pool_target, cleanup_flag, **kwargs):
"""
To define a given type pool(Support types: 'dir', 'netfs', logical',
iscsi', 'gluster', 'disk' and 'fs').
:param pool_name: Name of the pool
:param pool_type: Type of the pool
:param pool_target: Target for underlying storage
:param cleanup_flag: A list contains 3 booleans and 1 string stands for
need_cleanup_nfs, need_cleanup_iscsi,
need_cleanup_logical, selinux_bak and
need_cleanup_gluster
:param kwargs: key words for special pool define. eg, glusterfs pool
source path and source name, etc
"""
extra = ""
vg_name = pool_name
cleanup_nfs = False
cleanup_iscsi = False
cleanup_logical = False
selinux_bak = ""
cleanup_gluster = False
if not os.path.exists(pool_target) and pool_type != "gluster":
os.mkdir(pool_target)
if pool_type == "dir":
pass
elif pool_type == "netfs":
# Set up NFS server without mount
res = setup_or_cleanup_nfs(True, pool_target, False)
nfs_path = res["export_dir"]
selinux_bak = res["selinux_status_bak"]
cleanup_nfs = True
extra = "--source-host %s --source-path %s" % ('localhost',
nfs_path)
elif pool_type == "logical":
# Create vg by using iscsi device
lv_utils.vg_create(vg_name, setup_or_cleanup_iscsi(True))
cleanup_iscsi = True
cleanup_logical = True
extra = "--source-name %s" % vg_name
elif pool_type == "iscsi":
# Set up iscsi target without login
iscsi_target = setup_or_cleanup_iscsi(True, False)
cleanup_iscsi = True
extra = "--source-host %s --source-dev %s" % ('localhost',
iscsi_target)
elif pool_type == "disk":
# Set up iscsi target and login
device_name = setup_or_cleanup_iscsi(True)
cleanup_iscsi = True
# Create a partition to make sure disk pool can start
cmd = "parted -s %s mklabel msdos" % device_name
process.run(cmd)
cmd = "parted -s %s mkpart primary ext4 0 100" % device_name
process.run(cmd)
extra = "--source-dev %s" % device_name
elif pool_type == "fs":
# Set up iscsi target and login
device_name = setup_or_cleanup_iscsi(True)
cleanup_iscsi = True
# Format disk to make sure fs pool can start
cmd = "mkfs.ext4 -F %s" % device_name
process.run(cmd)
extra = "--source-dev %s" % device_name
elif pool_type == "gluster":
gluster_source_path = kwargs.get('gluster_source_path')
gluster_source_name = kwargs.get('gluster_source_name')
gluster_file_name = kwargs.get('gluster_file_name')
gluster_file_type = kwargs.get('gluster_file_type')
gluster_file_size = kwargs.get('gluster_file_size')
gluster_vol_number = kwargs.get('gluster_vol_number')
# Prepare gluster service and create volume
hostip = setup_or_cleanup_gluster(True, gluster_source_name,
pool_name=pool_name)
logging.debug("hostip is %s", hostip)
# create image in gluster volume
file_path = "gluster://%s/%s" % (hostip, gluster_source_name)
for i in range(gluster_vol_number):
file_name = "%s_%d" % (gluster_file_name, i)
process.run("qemu-img create -f %s %s/%s %s" %
(gluster_file_type, file_path, file_name,
gluster_file_size))
cleanup_gluster = True
extra = "--source-host %s --source-path %s --source-name %s" % \
(hostip, gluster_source_path, gluster_source_name)
elif pool_type in ["scsi", "mpath", "rbd", "sheepdog"]:
raise exceptions.TestNAError(
"Pool type '%s' has not yet been supported in the test." %
pool_type)
else:
raise exceptions.TestFail("Invalid pool type: '%s'." % pool_type)
# Mark the clean up flags
cleanup_flag[0] = cleanup_nfs
cleanup_flag[1] = cleanup_iscsi
cleanup_flag[2] = cleanup_logical
cleanup_flag[3] = selinux_bak
cleanup_flag[4] = cleanup_gluster
try:
result = virsh.pool_define_as(pool_name, pool_type, pool_target, extra,
ignore_status=True)
except process.CmdError:
logging.error("Define '%s' type pool fail.", pool_type)
return result
def verify_virsh_console(session, user, passwd, timeout=10, debug=False):
"""
Run commands in console session.
"""
log = ""
console_cmd = "cat /proc/cpuinfo"
try:
while True:
match, text = session.read_until_last_line_matches(
[r"[E|e]scape character is", r"login:",
r"[P|p]assword:", session.prompt],
timeout, internal_timeout=1)
if match == 0:
if debug:
logging.debug("Got '^]', sending '\\n'")
session.sendline()
elif match == 1:
if debug:
logging.debug("Got 'login:', sending '%s'", user)
session.sendline(user)
elif match == 2:
if debug:
logging.debug("Got 'Password:', sending '%s'", passwd)
session.sendline(passwd)
elif match == 3:
if debug:
logging.debug("Got Shell prompt -- logged in")
break
status, output = session.cmd_status_output(console_cmd)
logging.info("output of command:\n%s", output)
session.close()
except (aexpect.ShellError,
aexpect.ExpectError), detail:
log = session.get_output()
logging.error("Verify virsh console failed:\n%s\n%s", detail, log)
session.close()
return False
if not re.search("processor", output):
logging.error("Verify virsh console failed: Result does not match.")
return False
return True
def pci_label_from_address(address_dict, radix=10):
"""
Generate a pci label from a dict of address.
:param address_dict: A dict contains domain, bus, slot and function.
:param radix: The radix of your data in address_dict.
Example:
::
address_dict = {'domain': '0x0000', 'bus': '0x08', 'slot': '0x10', 'function': '0x0'}
radix = 16
return = pci_0000_08_10_0
"""
try:
domain = int(address_dict['domain'], radix)
bus = int(address_dict['bus'], radix)
slot = int(address_dict['slot'], radix)
function = int(address_dict['function'], radix)
except (TypeError, KeyError), detail:
raise exceptions.TestError(detail)
pci_label = ("pci_%04x_%02x_%02x_%01x" % (domain, bus, slot, function))
return pci_label
def mk_label(disk, label="msdos", session=None):
"""
Set label for disk.
"""
mklabel_cmd = "parted -s %s mklabel %s" % (disk, label)
if session:
session.cmd(mklabel_cmd)
else:
process.run(mklabel_cmd)
def mk_part(disk, size="100M", session=None):
"""
Create a partition for disk
"""
mklabel_cmd = "parted -s %s mklabel msdos" % disk
mkpart_cmd = "parted -s %s mkpart primary ext4 0 %s" % (disk, size)
if session:
session.cmd(mklabel_cmd)
session.cmd(mkpart_cmd)
else:
process.run(mklabel_cmd)
process.run(mkpart_cmd)
def mkfs(partition, fs_type, options="", session=None):
"""
Make a file system on the partition
"""
mkfs_cmd = "mkfs.%s -F %s %s" % (fs_type, partition, options)
if session:
session.cmd(mkfs_cmd)
else:
process.run(mkfs_cmd)
def get_parts_list(session=None):
"""
Get all partition lists.
"""
parts_cmd = "cat /proc/partitions"
if session:
_, parts_out = session.cmd_status_output(parts_cmd)
else:
parts_out = process.run(parts_cmd).stdout
parts = []
if parts_out:
for line in parts_out.rsplit("\n"):
if line.startswith("major") or line == "":
continue
parts_line = line.rsplit()
if len(parts_line) == 4:
parts.append(parts_line[3])
logging.debug("Find parts: %s" % parts)
return parts
def yum_install(pkg_list, session=None):
"""
Try to install packages on system
"""
if not isinstance(pkg_list, list):
raise exceptions.TestError("Parameter error.")
yum_cmd = "rpm -q {0} || yum -y install {0}"
for pkg in pkg_list:
if session:
status = session.cmd_status(yum_cmd.format(pkg))
else:
status = process.run(yum_cmd.format(pkg)).exit_status
if status:
raise exceptions.TestFail("Failed to install package: %s"
% pkg)
def check_actived_pool(pool_name):
"""
Check if pool_name exist in active pool list
"""
sp = libvirt_storage.StoragePool()
if not sp.pool_exists(pool_name):
raise exceptions.TestFail("Can't find pool %s" % pool_name)
if not sp.is_pool_active(pool_name):
raise exceptions.TestFail("Pool %s is not active." % pool_name)
logging.debug("Find active pool %s", pool_name)
return True
class PoolVolumeTest(object):
"""Test class for storage pool or volume"""
def __init__(self, test, params):
self.tmpdir = test.tmpdir
self.params = params
self.selinux_bak = ""
def cleanup_pool(self, pool_name, pool_type, pool_target, emulated_image,
**kwargs):
"""
Delete vols, destroy the created pool and restore the env
"""
sp = libvirt_storage.StoragePool()
source_format = kwargs.get('source_format')
source_name = kwargs.get('source_name')
device_name = kwargs.get('device_name', "/DEV/EXAMPLE")
try:
if sp.pool_exists(pool_name):
pv = libvirt_storage.PoolVolume(pool_name)
if pool_type in ["dir", "netfs", "logical", "disk"]:
if sp.is_pool_active(pool_name):
vols = pv.list_volumes()
for vol in vols:
# Ignore failed deletion here for deleting pool
pv.delete_volume(vol)
if not sp.delete_pool(pool_name):
raise exceptions.TestFail(
"Delete pool %s failed" % pool_name)
finally:
if pool_type == "netfs" and source_format != 'glusterfs':
nfs_server_dir = self.params.get("nfs_server_dir", "nfs-server")
nfs_path = os.path.join(self.tmpdir, nfs_server_dir)
setup_or_cleanup_nfs(is_setup=False, export_dir=nfs_path,
restore_selinux=self.selinux_bak)
if os.path.exists(nfs_path):
shutil.rmtree(nfs_path)
if pool_type == "logical":
cmd = "pvs |grep vg_logical|awk '{print $1}'"
pv = process.system_output(cmd)
# Cleanup logical volume anyway
process.run("vgremove -f vg_logical", ignore_status=True)
process.run("pvremove %s" % pv, ignore_status=True)
# These types used iscsi device
# If we did not provide block device
if (pool_type in ["logical", "fs", "disk"] and
device_name.count("EXAMPLE")):
setup_or_cleanup_iscsi(is_setup=False,
emulated_image=emulated_image)
# Used iscsi device anyway
if pool_type in ["iscsi", "scsi"]:
setup_or_cleanup_iscsi(is_setup=False,
emulated_image=emulated_image)
if pool_type == "scsi":
scsi_xml_file = self.params.get("scsi_xml_file", "")
if os.path.exists(scsi_xml_file):
os.remove(scsi_xml_file)
if pool_type in ["dir", "fs", "netfs"]:
pool_target = os.path.join(self.tmpdir, pool_target)
if os.path.exists(pool_target):
shutil.rmtree(pool_target)
if pool_type == "gluster" or source_format == 'glusterfs':
setup_or_cleanup_gluster(False, source_name)
def pre_pool(self, pool_name, pool_type, pool_target, emulated_image,
**kwargs):
"""
Prepare(define or create) the specific type pool
:param pool_name: created pool name
:param pool_type: dir, disk, logical, fs, netfs or else
:param pool_target: target of storage pool
:param emulated_image: use an image file to simulate a scsi disk
it could be used for disk, logical pool, etc
:param kwargs: key words for specific pool
"""
extra = ""
image_size = kwargs.get('image_size', "100M")
source_format = kwargs.get('source_format')
source_name = kwargs.get('source_name', None)
persistent = kwargs.get('persistent', False)
device_name = kwargs.get('device_name', "/DEV/EXAMPLE")
# If tester does not provide block device, creating one
if (device_name.count("EXAMPLE") and
pool_type in ["disk", "fs", "logical"]):
device_name = setup_or_cleanup_iscsi(is_setup=True,
emulated_image=emulated_image,
image_size=image_size)
if pool_type == "dir" and not persistent:
pool_target = os.path.join(self.tmpdir, pool_target)
if not os.path.exists(pool_target):
os.mkdir(pool_target)
elif pool_type == "disk":
# Disk pool does not allow to create volume by virsh command,
# so introduce parameter 'pre_disk_vol' to create partition(s)
# by 'parted' command, the parameter is a list of partition size,
# and the max number of partitions is 4. If pre_disk_vol is None,
# disk pool will have no volume
pre_disk_vol = kwargs.get('pre_disk_vol', None)
if type(pre_disk_vol) == list and len(pre_disk_vol):
for vol in pre_disk_vol:
mk_part(device_name, vol)
else:
mk_label(device_name, "gpt")
extra = " --source-dev %s" % device_name
if source_format:
extra += " --source-format %s" % source_format
elif pool_type == "fs":
cmd = "mkfs.ext4 -F %s" % device_name
pool_target = os.path.join(self.tmpdir, pool_target)
if not os.path.exists(pool_target):
os.mkdir(pool_target)
extra = " --source-dev %s" % device_name
process.run(cmd)
elif pool_type == "logical":
logical_device = device_name
cmd_pv = "pvcreate %s" % logical_device
vg_name = "vg_%s" % pool_type
cmd_vg = "vgcreate %s %s" % (vg_name, logical_device)
extra = "--source-name %s" % vg_name
process.run(cmd_pv)
process.run(cmd_vg)
# Create a small volume for verification
# And VG path will not exist if no any volume in.(bug?)
cmd_lv = "lvcreate --name default_lv --size 1M %s" % vg_name
process.run(cmd_lv)
elif pool_type == "netfs":
export_options = kwargs.get('export_options',
"rw,async,no_root_squash")
pool_target = os.path.join(self.tmpdir, pool_target)
if not os.path.exists(pool_target):
os.mkdir(pool_target)
if source_format == 'glusterfs':
hostip = setup_or_cleanup_gluster(True, source_name,
pool_name=pool_name)
logging.debug("hostip is %s", hostip)
extra = "--source-host %s --source-path %s" % (hostip,
source_name)
extra += " --source-format %s" % source_format
process.system("setsebool virt_use_fusefs on")
else:
nfs_server_dir = self.params.get(
"nfs_server_dir", "nfs-server")
nfs_path = os.path.join(self.tmpdir, nfs_server_dir)
if not os.path.exists(nfs_path):
os.mkdir(nfs_path)
res = setup_or_cleanup_nfs(is_setup=True,
export_options=export_options,
export_dir=nfs_path)
self.selinux_bak = res["selinux_status_bak"]
source_host = self.params.get("source_host", "localhost")
extra = "--source-host %s --source-path %s" % (source_host,
nfs_path)
elif pool_type == "iscsi":
ip_protocal = kwargs.get('ip_protocal', "ipv4")
setup_or_cleanup_iscsi(is_setup=True,
emulated_image=emulated_image,
image_size=image_size)
iscsi_sessions = iscsi.iscsi_get_sessions()
iscsi_target = None
for iscsi_node in iscsi_sessions:
if iscsi_node[1].count(emulated_image):
iscsi_target = iscsi_node[1]
break
iscsi.iscsi_logout(iscsi_target)
if ip_protocal == "ipv6":
ip_addr = "::1"
else:
ip_addr = "127.0.0.1"
extra = " --source-host %s --source-dev %s" % (ip_addr,
iscsi_target)
elif pool_type == "scsi":
scsi_xml_file = self.params.get("scsi_xml_file", "")
if not os.path.exists(scsi_xml_file):
logical_device = setup_or_cleanup_iscsi(
is_setup=True,
emulated_image=emulated_image,
image_size=image_size)
cmd = ("iscsiadm -m session -P 3 |grep -B3 %s| grep Host|awk "
"'{print $3}'" % logical_device.split('/')[2])
scsi_host = process.system_output(cmd)
scsi_pool_xml = pool_xml.PoolXML()
scsi_pool_xml.name = pool_name
scsi_pool_xml.pool_type = "scsi"
scsi_pool_xml.target_path = pool_target
scsi_pool_source_xml = pool_xml.SourceXML()
scsi_pool_source_xml.adp_type = 'scsi_host'
scsi_pool_source_xml.adp_name = "host" + scsi_host
scsi_pool_xml.set_source(scsi_pool_source_xml)
logging.debug("SCSI pool XML %s:\n%s", scsi_pool_xml.xml,
str(scsi_pool_xml))
scsi_xml_file = scsi_pool_xml.xml
self.params['scsi_xml_file'] = scsi_xml_file
elif pool_type == "gluster":
source_path = kwargs.get('source_path')
hostip = setup_or_cleanup_gluster(True, source_name,
pool_name=pool_name)
logging.debug("Gluster host ip address: %s", hostip)
extra = "--source-host %s --source-path %s --source-name %s" % \
(hostip, source_path, source_name)
func = virsh.pool_create_as
if pool_type == "scsi":
func = virsh.pool_create
if persistent:
func = virsh.pool_define_as
if pool_type == "scsi":
func = virsh.pool_define
# Create/define pool
if pool_type == "scsi":
result = func(scsi_xml_file, debug=True)
else:
result = func(pool_name, pool_type, pool_target, extra, debug=True)
# Here, virsh.pool_create_as return a boolean value and all other 3
# functions return CmdResult object
if isinstance(result, bool):
re_v = result
else:
re_v = result.exit_status == 0
if not re_v:
self.cleanup_pool(pool_name, pool_type, pool_target,
emulated_image, **kwargs)
raise exceptions.TestFail("Prepare pool failed")
xml_str = virsh.pool_dumpxml(pool_name)
logging.debug("New prepared pool XML: %s", xml_str)
def pre_vol(self, vol_name, vol_format, capacity, allocation, pool_name):
"""
Preapare the specific type volume in pool
"""
pv = libvirt_storage.PoolVolume(pool_name)
if not pv.create_volume(vol_name, capacity, allocation, vol_format):
raise exceptions.TestFail("Prepare volume failed.")
if not pv.volume_exists(vol_name):
raise exceptions.TestFail("Can't find volume: %s", vol_name)
# Migration Relative functions##############
class MigrationTest(object):
"""Class for migration tests"""
def __init__(self):
# To get result in thread, using member parameters
# Result of virsh migrate command
# True means command executed successfully
self.RET_MIGRATION = True
# A lock for threads
self.RET_LOCK = threading.RLock()
# The time spent when migrating vms
# format: vm_name -> time(seconds)
self.mig_time = {}
def thread_func_migration(self, vm, desturi, options=None):
"""
Thread for virsh migrate command.
:param vm: A libvirt vm instance(local or remote).
:param desturi: remote host uri.
"""
# Migrate the domain.
try:
if options is None:
options = "--live --timeout=60"
stime = int(time.time())
vm.migrate(desturi, option=options, ignore_status=False,
debug=True)
etime = int(time.time())
self.mig_time[vm.name] = etime - stime
except process.CmdError, detail:
logging.error("Migration to %s failed:\n%s", desturi, detail)
self.RET_LOCK.acquire()
self.RET_MIGRATION = False
self.RET_LOCK.release()
def do_migration(self, vms, srcuri, desturi, migration_type, options=None,
thread_timeout=60):
"""
Migrate vms.
:param vms: migrated vms.
:param srcuri: local uri, used when migrate vm from remote to local
:param descuri: remote uri, used when migrate vm from local to remote
:param migration_type: do orderly for simultaneous migration
"""
if migration_type == "orderly":
for vm in vms:
migration_thread = threading.Thread(target=self.thread_func_migration,
args=(vm, desturi, options))
migration_thread.start()
migration_thread.join(thread_timeout)
if migration_thread.isAlive():
logging.error("Migrate %s timeout.", migration_thread)
self.RET_LOCK.acquire()
self.RET_MIGRATION = False
self.RET_LOCK.release()
elif migration_type == "cross":
# Migrate a vm to remote first,
# then migrate another to remote with the first vm back
vm_remote = vms.pop()
self.thread_func_migration(vm_remote, desturi)
for vm in vms:
thread1 = threading.Thread(target=self.thread_func_migration,
args=(vm_remote, srcuri, options))
thread2 = threading.Thread(target=self.thread_func_migration,
args=(vm, desturi, options))
thread1.start()
thread2.start()
thread1.join(thread_timeout)
thread2.join(thread_timeout)
vm_remote = vm
if thread1.isAlive() or thread1.isAlive():
logging.error("Cross migrate timeout.")
self.RET_LOCK.acquire()
self.RET_MIGRATION = False
self.RET_LOCK.release()
# Add popped vm back to list
vms.append(vm_remote)
elif migration_type == "simultaneous":
migration_threads = []
for vm in vms:
migration_threads.append(threading.Thread(
target=self.thread_func_migration,
args=(vm, desturi, options)))
# let all migration going first
for thread in migration_threads:
thread.start()
# listen threads until they end
for thread in migration_threads:
thread.join(thread_timeout)
if thread.isAlive():
logging.error("Migrate %s timeout.", thread)
self.RET_LOCK.acquire()
self.RET_MIGRATION = False
self.RET_LOCK.release()
if not self.RET_MIGRATION:
raise exceptions.TestFail()
def cleanup_dest_vm(self, vm, srcuri, desturi):
"""
Cleanup migrated vm on remote host.
"""
vm.connect_uri = desturi
if vm.exists():
if vm.is_persistent():
vm.undefine()
if vm.is_alive():
# If vm on remote host is unaccessible
# graceful shutdown may cause confused
vm.destroy(gracefully=False)
# Set connect uri back to local uri
vm.connect_uri = srcuri
def check_result(result, expected_fails=[], skip_if=[], any_error=False):
"""
Check the result of a command and check command error message against
expectation.
:param result: Command result instance.
:param expected_fails: list of regex of expected stderr patterns. The check
will pass if any of these patterns matches.
:param skip_if: list of regex of expected patterns. The check will raise a
TestNAError if any of these patterns matches.
:param any_error: Whether expect on any error message. Setting to True will
will override expected_fails
"""
logging.debug("Command result:\n%s" % result)
if skip_if:
for patt in skip_if:
if re.search(patt, result.stderr):
raise exceptions.TestNAError("Test skipped: found '%s' in test "
"result:\n%s" %
(patt, result.stderr))
if any_error:
if result.exit_status:
return
else:
raise exceptions.TestFail(
"Expect should fail but got:\n%s" % result)
if result.exit_status:
if expected_fails:
if not any(re.search(patt, result.stderr)
for patt in expected_fails):
raise exceptions.TestFail("Expect should fail with one of %s, "
"but failed with:\n%s" %
(expected_fails, result))
else:
raise exceptions.TestFail(
"Expect should succeed, but got:\n%s" % result)
else:
if expected_fails:
raise exceptions.TestFail("Expect should fail with one of %s, "
"but succeeded:\n%s" %
(expected_fails, result))
def check_exit_status(result, expect_error=False):
"""
Check the exit status of virsh commands.
:param result: Virsh command result object
:param expect_error: Boolean value, expect command success or fail
"""
if not expect_error:
if result.exit_status != 0:
raise exceptions.TestFail(result.stderr)
else:
logging.debug("Command output:\n%s", result.stdout.strip())
elif expect_error and result.exit_status == 0:
raise exceptions.TestFail("Expect fail, but run successfully.")
def get_interface_details(vm_name):
"""
Get the interface details from virsh domiflist command output
:return: list of all interfaces details
"""
# Parse the domif-list command output
domiflist_out = virsh.domiflist(vm_name).stdout
# Regular expression for the below output
# vnet0 bridge virbr0 virtio 52:54:00:b2:b3:b4
rg = re.compile(r"^(\w+|-)\s+(\w+)\s+(\w+)\s+(\S+)\s+"
"(([a-fA-F0-9]{2}:?){6})")
iface_cmd = {}
ifaces_cmd = []
for line in domiflist_out.split('\n'):
match_obj = rg.search(line)
# Due to the extra space in the list
if match_obj is not None:
iface_cmd['interface'] = match_obj.group(1)
iface_cmd['type'] = match_obj.group(2)
iface_cmd['source'] = match_obj.group(3)
iface_cmd['model'] = match_obj.group(4)
iface_cmd['mac'] = match_obj.group(5)
ifaces_cmd.append(iface_cmd)
iface_cmd = {}
return ifaces_cmd
def get_ifname_host(vm_name, mac):
"""
Get the vm interface name on host
:return: interface name, None if not exist
"""
ifaces = get_interface_details(vm_name)
for iface in ifaces:
if iface["mac"] == mac:
return iface["interface"]
return None
def check_iface(iface_name, checkpoint, extra="", **dargs):
"""
Check interface with specified checkpoint.
:param iface_name: Interface name
:param checkpoint: Check if interface exists,
and It's MAC address, IP address and State,
also connectivity by ping.
valid checkpoint: [exists, mac, ip, ping, state]
:param extra: Extra string for checking
:return: Boolean value, true for pass, false for fail
"""
support_check = ["exists", "mac", "ip", "ping"]
iface = utils_net.Interface(name=iface_name)
check_pass = False
try:
if checkpoint == "exists":
# extra is iface-list option
list_find, ifcfg_find = (False, False)
# Check virsh list output
result = virsh.iface_list(extra, ignore_status=True)
check_exit_status(result, False)
output = re.findall(r"(\S+)\ +(\S+)\ +(\S+|\s+)[\ +\n]",
str(result.stdout))
if filter(lambda x: x[0] == iface_name, output[1:]):
list_find = True
logging.debug("Find '%s' in virsh iface-list output: %s",
iface_name, list_find)
# Check network script
iface_script = "/etc/sysconfig/network-scripts/ifcfg-" + iface_name
ifcfg_find = os.path.exists(iface_script)
logging.debug("Find '%s': %s", iface_script, ifcfg_find)
check_pass = list_find and ifcfg_find
elif checkpoint == "mac":
# extra is the MAC address to compare
iface_mac = iface.get_mac().lower()
check_pass = iface_mac == extra
logging.debug("MAC address of %s: %s", iface_name, iface_mac)
elif checkpoint == "ip":
# extra is the IP address to compare
iface_ip = iface.get_ip()
check_pass = iface_ip == extra
logging.debug("IP address of %s: %s", iface_name, iface_ip)
elif checkpoint == "state":
# check iface State
result = virsh.iface_list(extra, ignore_status=True)
check_exit_status(result, False)
output = re.findall(r"(\S+)\ +(\S+)\ +(\S+|\s+)[\ +\n]",
str(result.stdout))
iface_state = filter(
lambda x: x[0] == iface_name, output[1:])[0][1]
# active corresponds True, otherwise return False
check_pass = iface_state == "active"
elif checkpoint == "ping":
# extra is the ping destination
count = dargs.get("count", 3)
timeout = dargs.get("timeout", 5)
ping_s, _ = ping(dest=extra, count=count, interface=iface_name,
timeout=timeout,)
check_pass = ping_s == 0
else:
logging.debug("Support check points are: %s", support_check)
logging.error("Unsupport check point: %s", checkpoint)
except Exception, detail:
raise exceptions.TestFail("Interface check failed: %s" % detail)
return check_pass
def create_hostdev_xml(pci_id, boot_order=0):
"""
Create a hostdev configuration file.
:param pci_id: such as "0000:03:04.0"
"""
# Create attributes dict for device's address element
device_domain = pci_id.split(':')[0]
device_domain = "0x%s" % device_domain
device_bus = pci_id.split(':')[1]
device_bus = "0x%s" % device_bus
device_slot = pci_id.split(':')[-1].split('.')[0]
device_slot = "0x%s" % device_slot
device_function = pci_id.split('.')[-1]
device_function = "0x%s" % device_function
hostdev_xml = hostdev.Hostdev()
hostdev_xml.mode = "subsystem"
hostdev_xml.managed = "yes"
hostdev_xml.hostdev_type = "pci"
if boot_order:
hostdev_xml.boot_order = boot_order
attrs = {'domain': device_domain, 'slot': device_slot,
'bus': device_bus, 'function': device_function}
hostdev_xml.source_address = hostdev_xml.new_source_address(**attrs)
logging.debug("Hostdev XML:\n%s", str(hostdev_xml))
return hostdev_xml.xml
def alter_boot_order(vm_name, pci_id, boot_order=0):
"""
Alter the startup sequence of VM to PCI-device firstly
OS boot element and per-device boot elements are mutually exclusive,
It's necessary that remove all OS boots before setting PCI-device order
:param vm_name: VM name
:param pci_id: such as "0000:06:00.1"
:param boot_order: order priority, such as 1, 2, ...
"""
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
# remove all of OS boots
vmxml.remove_all_boots()
# prepare PCI-device XML with boot order
try:
device_domain = pci_id.split(':')[0]
device_domain = "0x%s" % device_domain
device_bus = pci_id.split(':')[1]
device_bus = "0x%s" % device_bus
device_slot = pci_id.split(':')[-1].split('.')[0]
device_slot = "0x%s" % device_slot
device_function = pci_id.split('.')[-1]
device_function = "0x%s" % device_function
except IndexError:
raise exceptions.TestError("Invalid PCI Info: %s" % pci_id)
attrs = {'domain': device_domain, 'slot': device_slot,
'bus': device_bus, 'function': device_function}
vmxml.add_hostdev(attrs, boot_order=boot_order)
# synchronize XML
vmxml.sync()
def create_disk_xml(params):
"""
Create a disk configuration file.
"""
# Create attributes dict for disk's address element
type_name = params.get("type_name", "file")
target_dev = params.get("target_dev", "vdb")
target_bus = params.get("target_bus", "virtio")
diskxml = disk.Disk(type_name)
diskxml.device = params.get("device_type", "disk")
snapshot_attr = params.get('disk_snapshot_attr')
if snapshot_attr:
diskxml.snapshot = snapshot_attr
source_attrs = {}
source_host = []
source_seclabel = []
auth_attrs = {}
driver_attrs = {}
try:
if type_name == "file":
source_file = params.get("source_file", "")
source_attrs = {'file': source_file}
elif type_name == "block":
source_file = params.get("source_file", "")
source_attrs = {'dev': source_file}
elif type_name == "dir":
source_dir = params.get("source_dir", "")
source_attrs = {'dir': source_dir}
elif type_name == "volume":
source_pool = params.get("source_pool")
source_volume = params.get("source_volume")
source_mode = params.get("source_mode", "")
source_attrs = {'pool': source_pool, 'volume': source_volume}
if source_mode:
source_attrs.update({"mode": source_mode})
elif type_name == "network":
source_protocol = params.get("source_protocol")
source_name = params.get("source_name")
source_host_name = params.get("source_host_name").split()
source_host_port = params.get("source_host_port").split()
transport = params.get("transport")
source_attrs = {'protocol': source_protocol, 'name': source_name}
source_host = []
for host_name, host_port in zip(source_host_name, source_host_port):
source_host.append({'name': host_name,
'port': host_port})
if transport:
source_host[0].update({'transport': transport})
else:
exceptions.TestNAError("Unsupport disk type %s" % type_name)
source_startupPolicy = params.get("source_startupPolicy")
if source_startupPolicy:
source_attrs['startupPolicy'] = source_startupPolicy
sec_model = params.get("sec_model")
relabel = params.get("relabel")
label = params.get("sec_label")
if sec_model or relabel:
sec_dict = {}
sec_xml = seclabel.Seclabel()
if sec_model:
sec_dict.update({'model': sec_model})
if relabel:
sec_dict.update({'relabel': relabel})
if label:
sec_dict.update({'label': label})
sec_xml.update(sec_dict)
logging.debug("The sec xml is %s", sec_xml.xmltreefile)
source_seclabel.append(sec_xml)
source_params = {"attrs": source_attrs, "seclabels": source_seclabel,
"hosts": source_host}
src_config_file = params.get("source_config_file")
if src_config_file:
source_params.update({"config_file": src_config_file})
# If we use config file, "hosts" isn't needed
if "hosts" in source_params:
source_params.pop("hosts")
snapshot_name = params.get('source_snap_name')
if snapshot_name:
source_params.update({"snapshot_name": snapshot_name})
diskxml.source = diskxml.new_disk_source(**source_params)
auth_user = params.get("auth_user")
secret_type = params.get("secret_type")
secret_uuid = params.get("secret_uuid")
secret_usage = params.get("secret_usage")
if auth_user:
auth_attrs['auth_user'] = auth_user
if secret_type:
auth_attrs['secret_type'] = secret_type
if secret_uuid:
auth_attrs['secret_uuid'] = secret_uuid
elif secret_usage:
auth_attrs['secret_usage'] = secret_usage
if auth_attrs:
diskxml.auth = diskxml.new_auth(**auth_attrs)
driver_name = params.get("driver_name", "qemu")
driver_type = params.get("driver_type", "")
driver_cache = params.get("driver_cache", "")
driver_discard = params.get("driver_discard", "")
if driver_name:
driver_attrs['name'] = driver_name
if driver_type:
driver_attrs['type'] = driver_type
if driver_cache:
driver_attrs['cache'] = driver_cache
if driver_discard:
driver_attrs['discard'] = driver_discard
if driver_attrs:
diskxml.driver = driver_attrs
diskxml.readonly = "yes" == params.get("readonly", "no")
diskxml.share = "yes" == params.get("shareable", "no")
diskxml.target = {'dev': target_dev, 'bus': target_bus}
except Exception, detail:
logging.error("Fail to create disk XML:\n%s", detail)
logging.debug("Disk XML %s:\n%s", diskxml.xml, str(diskxml))
# Wait for file completed
def file_exists():
if not process.run("ls %s" % diskxml.xml,
ignore_status=True).exit_status:
return True
utils_misc.wait_for(file_exists, 5)
return diskxml.xml
def create_net_xml(net_name, params):
"""
Create a new network or update an existed network xml
"""
dns_dict = {}
host_dict = {}
net_name = params.get("net_name", "default")
net_bridge = params.get("net_bridge", '{}')
net_forward = params.get("net_forward", '{}')
forward_iface = params.get("forward_iface")
net_dns_forward = params.get("net_dns_forward")
net_dns_txt = params.get("net_dns_txt")
net_dns_srv = params.get("net_dns_srv")
net_dns_forwarders = params.get("net_dns_forwarders", "").split()
net_dns_hostip = params.get("net_dns_hostip")
net_dns_hostnames = params.get("net_dns_hostnames", "").split()
net_domain = params.get("net_domain")
net_virtualport = params.get("net_virtualport")
net_bandwidth_inbound = params.get("net_bandwidth_inbound", "{}")
net_bandwidth_outbound = params.get("net_bandwidth_outbound", "{}")
net_ip_family = params.get("net_ip_family")
net_ip_address = params.get("net_ip_address")
net_ip_netmask = params.get("net_ip_netmask", "255.255.255.0")
net_ipv6_address = params.get("net_ipv6_address")
net_ipv6_prefix = params.get("net_ipv6_prefix", "64")
nat_port = params.get("nat_port")
guest_name = params.get("guest_name")
guest_ipv4 = params.get("guest_ipv4")
guest_ipv6 = params.get("guest_ipv6")
guest_mac = params.get("guest_mac")
dhcp_start_ipv4 = params.get("dhcp_start_ipv4", "192.168.122.2")
dhcp_end_ipv4 = params.get("dhcp_end_ipv4", "192.168.122.254")
dhcp_start_ipv6 = params.get("dhcp_start_ipv6")
dhcp_end_ipv6 = params.get("dhcp_end_ipv6")
tftp_root = params.get("tftp_root")
bootp_file = params.get("bootp_file")
routes = params.get("routes", "").split()
pg_name = params.get("portgroup_name", "").split()
try:
if not virsh.net_info(net_name, ignore_status=True).exit_status:
# Edit an existed network
netxml = network_xml.NetworkXML.new_from_net_dumpxml(net_name)
netxml.del_ip()
else:
netxml = network_xml.NetworkXML(net_name)
if net_dns_forward:
dns_dict["dns_forward"] = net_dns_forward
if net_dns_txt:
dns_dict["txt"] = ast.literal_eval(net_dns_txt)
if net_dns_srv:
dns_dict["srv"] = ast.literal_eval(net_dns_srv)
if net_dns_forwarders:
dns_dict["forwarders"] = [ast.literal_eval(x) for x in
net_dns_forwarders]
if net_dns_hostip:
host_dict["host_ip"] = net_dns_hostip
if net_dns_hostnames:
host_dict["hostnames"] = net_dns_hostnames
dns_obj = netxml.new_dns(**dns_dict)
if host_dict:
host = dns_obj.new_host(**host_dict)
dns_obj.host = host
netxml.dns = dns_obj
bridge = ast.literal_eval(net_bridge)
if bridge:
netxml.bridge = bridge
forward = ast.literal_eval(net_forward)
if forward:
netxml.forward = forward
if forward_iface:
interface = [
{'dev': x} for x in forward_iface.split()]
netxml.forward_interface = interface
if nat_port:
netxml.nat_port = ast.literal_eval(nat_port)
if net_domain:
netxml.domain_name = net_domain
net_inbound = ast.literal_eval(net_bandwidth_inbound)
net_outbound = ast.literal_eval(net_bandwidth_outbound)
if net_inbound:
netxml.bandwidth_inbound = net_inbound
if net_outbound:
netxml.bandwidth_outbound = net_outbound
if net_virtualport:
netxml.virtualport_type = net_virtualport
if net_ip_family == "ipv6":
ipxml = network_xml.IPXML()
ipxml.family = net_ip_family
ipxml.prefix = net_ipv6_prefix
del ipxml.netmask
if net_ipv6_address:
ipxml.address = net_ipv6_address
if dhcp_start_ipv6 and dhcp_end_ipv6:
ipxml.dhcp_ranges = {"start": dhcp_start_ipv6,
"end": dhcp_end_ipv6}
if guest_name and guest_ipv6 and guest_mac:
ipxml.hosts = [{"name": guest_name,
"ip": guest_ipv6}]
netxml.set_ip(ipxml)
if net_ip_address:
ipxml = network_xml.IPXML(net_ip_address,
net_ip_netmask)
if dhcp_start_ipv4 and dhcp_end_ipv4:
ipxml.dhcp_ranges = {"start": dhcp_start_ipv4,
"end": dhcp_end_ipv4}
if tftp_root:
ipxml.tftp_root = tftp_root
if bootp_file:
ipxml.dhcp_bootp = bootp_file
if guest_name and guest_ipv4 and guest_mac:
ipxml.hosts = [{"mac": guest_mac,
"name": guest_name,
"ip": guest_ipv4}]
netxml.set_ip(ipxml)
if routes:
netxml.routes = [ast.literal_eval(x) for x in routes]
if pg_name:
pg_default = params.get("portgroup_default",
"").split()
pg_virtualport = params.get(
"portgroup_virtualport", "").split()
pg_bandwidth_inbound = params.get(
"portgroup_bandwidth_inbound", "").split()
pg_bandwidth_outbound = params.get(
"portgroup_bandwidth_outbound", "").split()
pg_vlan = params.get("portgroup_vlan", "").split()
for i in range(len(pg_name)):
pgxml = network_xml.PortgroupXML()
pgxml.name = pg_name[i]
if len(pg_default) > i:
pgxml.default = pg_default[i]
if len(pg_virtualport) > i:
pgxml.virtualport_type = pg_virtualport[i]
if len(pg_bandwidth_inbound) > i:
pgxml.bandwidth_inbound = ast.literal_eval(
pg_bandwidth_inbound[i])
if len(pg_bandwidth_outbound) > i:
pgxml.bandwidth_outbound = ast.literal_eval(
pg_bandwidth_outbound[i])
if len(pg_vlan) > i:
pgxml.vlan_tag = ast.literal_eval(pg_vlan[i])
netxml.set_portgroup(pgxml)
logging.debug("New network xml file: %s", netxml)
netxml.xmltreefile.write()
return netxml
except Exception, detail:
stacktrace.log_exc_info(sys.exc_info())
raise exceptions.TestFail("Fail to create network XML: %s" % detail)
def create_nwfilter_xml(params):
"""
Create a new network filter or update an existed network filter xml
"""
filter_name = params.get("filter_name", "testcase")
exist_filter = params.get("exist_filter", "no-mac-spoofing")
filter_chain = params.get("filter_chain")
filter_priority = params.get("filter_priority", "")
filter_uuid = params.get("filter_uuid")
# process filterref_name
filterrefs_list = []
filterrefs_key = []
for i in params.keys():
if 'filterref_name_' in i:
filterrefs_key.append(i)
filterrefs_key.sort()
for i in filterrefs_key:
filterrefs_dict = {}
filterrefs_dict['filter'] = params[i]
filterrefs_list.append(filterrefs_dict)
# prepare rule and protocol attributes
protocol = {}
rule_dict = {}
rule_dict_tmp = {}
RULE_ATTR = ('rule_action', 'rule_direction', 'rule_priority',
'rule_statematch')
PROTOCOL_TYPES = ['mac', 'vlan', 'stp', 'arp', 'rarp', 'ip', 'ipv6',
'tcp', 'udp', 'sctp', 'icmp', 'igmp', 'esp', 'ah',
'udplite', 'all', 'tcp-ipv6', 'udp-ipv6', 'sctp-ipv6',
'icmpv6', 'esp-ipv6', 'ah-ipv6', 'udplite-ipv6',
'all-ipv6']
# rule should end with 'EOL' as separator, multiple rules are supported
rule = params.get("rule")
if rule:
rule_list = rule.split('EOL')
for i in range(len(rule_list)):
if rule_list[i]:
attr = rule_list[i].split()
for j in range(len(attr)):
attr_list = attr[j].split('=')
rule_dict_tmp[attr_list[0]] = attr_list[1]
rule_dict[i] = rule_dict_tmp
rule_dict_tmp = {}
# process protocol parameter
for i in rule_dict.keys():
if 'protocol' not in rule_dict[i]:
# Set protocol as string 'None' as parse from cfg is
# string 'None'
protocol[i] = 'None'
else:
protocol[i] = rule_dict[i]['protocol']
rule_dict[i].pop('protocol')
if protocol[i] in PROTOCOL_TYPES:
# replace '-' with '_' in ipv6 types as '-' is not
# supposed to be in class name
if '-' in protocol[i]:
protocol[i] = protocol[i].replace('-', '_')
else:
raise exceptions.TestFail("Given protocol type %s"
" is not in supported list %s"
% (protocol[i], PROTOCOL_TYPES))
try:
new_filter = nwfilter_xml.NwfilterXML()
filterxml = new_filter.new_from_filter_dumpxml(exist_filter)
# Set filter attribute
filterxml.filter_name = filter_name
filterxml.filter_priority = filter_priority
if filter_chain:
filterxml.filter_chain = filter_chain
if filter_uuid:
filterxml.uuid = filter_uuid
filterxml.filterrefs = filterrefs_list
# Set rule attribute
index_total = filterxml.get_rule_index()
rule = filterxml.get_rule(0)
rulexml = rule.backup_rule()
for i in index_total:
filterxml.del_rule()
for i in range(len(rule_dict.keys())):
rulexml.rule_action = rule_dict[i].get('rule_action')
rulexml.rule_direction = rule_dict[i].get('rule_direction')
rulexml.rule_priority = rule_dict[i].get('rule_priority')
rulexml.rule_statematch = rule_dict[i].get('rule_statematch')
for j in RULE_ATTR:
if j in rule_dict[i].keys():
rule_dict[i].pop(j)
# set protocol attribute
if protocol[i] != 'None':
protocolxml = rulexml.get_protocol(protocol[i])
new_one = protocolxml.new_attr(**rule_dict[i])
protocolxml.attrs = new_one
rulexml.xmltreefile = protocolxml.xmltreefile
else:
rulexml.del_protocol()
filterxml.add_rule(rulexml)
# Reset rulexml
rulexml = rule.backup_rule()
filterxml.xmltreefile.write()
logging.info("The network filter xml is:\n%s" % filterxml)
return filterxml
except Exception, detail:
stacktrace.log_exc_info(sys.exc_info())
raise exceptions.TestFail("Fail to create nwfilter XML: %s" % detail)
def create_channel_xml(params, alias=False, address=False):
"""
Create a XML contains channel information.
:param params: the params for Channel slot
:param alias: allow to add 'alias' slot
:param address: allow to add 'address' slot
"""
# Create attributes dict for channel's element
channel_source = {}
channel_target = {}
channel_alias = {}
channel_address = {}
channel_params = {}
channel_type_name = params.get("channel_type_name")
source_mode = params.get("source_mode")
source_path = params.get("source_path")
target_type = params.get("target_type")
target_name = params.get("target_name")
if channel_type_name is None:
raise exceptions.TestFail("channel_type_name not specified.")
# if these params are None, it won't be used.
if source_mode:
channel_source['mode'] = source_mode
if source_path:
channel_source['path'] = source_path
if target_type:
channel_target['type'] = target_type
if target_name:
channel_target['name'] = target_name
channel_params = {'type_name': channel_type_name,
'source': channel_source,
'target': channel_target}
if alias:
channel_alias = target_name
channel_params['alias'] = {'name': channel_alias}
if address:
channel_address = {'type': 'virtio-serial',
'controller': '0',
'bus': '0'}
channel_params['address'] = channel_address
channelxml = channel.Channel.new_from_dict(channel_params)
logging.debug("Channel XML:\n%s", channelxml)
return channelxml
def set_domain_state(vm, vm_state):
"""
Set domain state.
:param vm: the vm object
:param vm_state: the given vm state string "shut off", "running"
"paused", "halt" or "pm_suspend"
"""
# reset domain state
if vm.is_alive():
vm.destroy(gracefully=False)
if not vm_state == "shut off":
vm.start()
session = vm.wait_for_login()
if vm_state == "paused":
vm.pause()
elif vm_state == "halt":
try:
session.cmd("halt")
except (aexpect.ShellProcessTerminatedError, aexpect.ShellStatusError):
# The halt command always gets these errors, but execution is OK,
# skip these errors
pass
elif vm_state == "pm_suspend":
# Execute "pm-suspend-hybrid" command directly will get Timeout error,
# so here execute it in background, and wait for 3s manually
if session.cmd_status("which pm-suspend-hybrid"):
raise exceptions.TestNAError("Cannot execute this test for domain"
" doesn't have pm-suspend-hybrid command!")
session.cmd("pm-suspend-hybrid &")
time.sleep(3)
def set_guest_agent(vm):
"""
Set domain xml with guest agent channel and install guest agent rpm
in domain.
:param vm: the vm object
"""
logging.warning("This function is going to be deprecated. "
"Please use vm.prepare_guest_agent() instead.")
# reset domain state
if vm.is_alive():
vm.destroy(gracefully=False)
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
logging.debug("Attempting to set guest agent channel")
vmxml.set_agent_channel()
vmxml.sync()
vm.start()
session = vm.wait_for_login()
# Check if qemu-ga already started automatically
cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
stat_install = session.cmd_status(cmd, 300)
if stat_install != 0:
raise exceptions.TestFail("Fail to install qemu-guest-agent, make "
"sure that you have usable repo in guest")
# Check if qemu-ga already started
stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
if stat_ps != 0:
session.cmd("qemu-ga -d")
# Check if the qemu-ga really started
stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
if stat_ps != 0:
raise exceptions.TestFail("Fail to run qemu-ga in guest")
def set_vm_disk(vm, params, tmp_dir=None, test=None):
"""
Replace vm first disk with given type in domain xml, including file type
(local, nfs), network type(gluster, iscsi), block type(use connected iscsi
block disk).
For all types, all following params are common and need be specified:
disk_device: default to 'disk'
disk_type: 'block' or 'network'
disk_target: default to 'vda'
disk_target_bus: default to 'virtio'
disk_format: default to 'qcow2'
disk_src_protocol: 'iscsi', 'gluster' or 'netfs'
For 'gluster' network type, following params are gluster only and need be
specified:
vol_name: string
pool_name: default to 'gluster-pool'
transport: 'tcp', 'rdma' or '', default to ''
For 'iscsi' network type, following params need be specified:
image_size: default to "10G", 10G is raw size of jeos disk
disk_src_host: default to "127.0.0.1"
disk_src_port: default to "3260"
For 'netfs' network type, following params need be specified:
mnt_path_name: the mount dir name, default to "nfs-mount"
export_options: nfs mount options, default to "rw,no_root_squash,fsid=0"
For 'block' type, using connected iscsi block disk, following params need
be specified:
image_size: default to "10G", 10G is raw size of jeos disk
:param vm: the vm object
:param tmp_dir: string, dir path
:param params: dict, dict include setup vm disk xml configurations
"""
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
logging.debug("original xml is: %s", vmxml.xmltreefile)
disk_device = params.get("disk_device", "disk")
disk_snapshot_attr = params.get("disk_snapshot_attr")
disk_type = params.get("disk_type")
disk_target = params.get("disk_target", 'vda')
disk_target_bus = params.get("disk_target_bus", "virtio")
disk_src_protocol = params.get("disk_source_protocol")
disk_src_name = params.get("disk_source_name")
disk_src_host = params.get("disk_source_host", "127.0.0.1")
disk_src_port = params.get("disk_source_port", "3260")
disk_src_config = params.get("disk_source_config")
disk_snap_name = params.get("disk_snap_name")
emu_image = params.get("emulated_image", "emulated-iscsi")
image_size = params.get("image_size", "10G")
disk_format = params.get("disk_format", "qcow2")
mnt_path_name = params.get("mnt_path_name", "nfs-mount")
exp_opt = params.get("export_options", "rw,no_root_squash,fsid=0")
first_disk = vm.get_first_disk_devices()
blk_source = first_disk['source']
disk_xml = vmxml.devices.by_device_tag('disk')[0]
src_disk_format = disk_xml.xmltreefile.find('driver').get('type')
sec_model = params.get('sec_model')
relabel = params.get('relabel')
sec_label = params.get('sec_label')
pool_name = params.get("pool_name", "set-vm-disk-pool")
disk_src_mode = params.get('disk_src_mode', 'host')
auth_user = params.get("auth_user")
secret_type = params.get("secret_type")
secret_usage = params.get("secret_usage")
secret_uuid = params.get("secret_uuid")
disk_params = {'device_type': disk_device,
'disk_snapshot_attr': disk_snapshot_attr,
'type_name': disk_type,
'target_dev': disk_target,
'target_bus': disk_target_bus,
'driver_type': disk_format,
'driver_cache': 'none',
'sec_model': sec_model,
'relabel': relabel,
'sec_label': sec_label,
'auth_user': auth_user,
'secret_type': secret_type,
'secret_uuid': secret_uuid,
'secret_usage': secret_usage}
if not tmp_dir:
tmp_dir = data_dir.get_tmp_dir()
# gluster only params
vol_name = params.get("vol_name")
transport = params.get("transport", "")
brick_path = os.path.join(tmp_dir, pool_name)
image_convert = "yes" == params.get("image_convert", 'yes')
if vm.is_alive():
vm.destroy(gracefully=False)
# Replace domain disk with iscsi, gluster, block or netfs disk
if disk_src_protocol == 'iscsi':
if disk_type == 'block':
is_login = True
elif disk_type == 'network' or disk_type == 'volume':
is_login = False
else:
raise exceptions.TestFail("Disk type '%s' not expected, only disk "
"type 'block', 'network' or 'volume' work "
"with 'iscsi'" % disk_type)
if disk_type == 'volume':
pvt = PoolVolumeTest(test, params)
pvt.pre_pool(pool_name, 'iscsi', "/dev/disk/by-path",
emulated_image=emu_image,
image_size=image_size)
# Get volume name
cmd_result = virsh.vol_list(pool_name)
try:
vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
str(cmd_result.stdout))[1][0]
except IndexError:
raise exceptions.TestError("Fail to get volume name in "
"pool %s" % pool_name)
emulated_path = virsh.vol_path(vol_name, pool_name,
debug=True).stdout.strip()
else:
# Setup iscsi target
iscsi_target = setup_or_cleanup_iscsi(is_setup=True,
is_login=is_login,
image_size=image_size,
emulated_image=emu_image)
emulated_path = os.path.join(tmp_dir, emu_image)
# Copy first disk to emulated backing store path
cmd = "qemu-img convert -f %s -O %s %s %s" % (src_disk_format,
disk_format,
blk_source,
emulated_path)
process.run(cmd, ignore_status=False)
if disk_type == 'block':
disk_params_src = {'source_file': iscsi_target}
elif disk_type == "volume":
disk_params_src = {'source_pool': pool_name,
'source_volume': vol_name,
'source_mode': disk_src_mode}
else:
disk_params_src = {'source_protocol': disk_src_protocol,
'source_name': iscsi_target + "/1",
'source_host_name': disk_src_host,
'source_host_port': disk_src_port}
elif disk_src_protocol == 'gluster':
# Setup gluster.
host_ip = setup_or_cleanup_gluster(True, vol_name,
brick_path, pool_name)
logging.debug("host ip: %s " % host_ip)
dist_img = "gluster.%s" % disk_format
if image_convert:
# Convert first disk to gluster disk path
disk_cmd = ("qemu-img convert -f %s -O %s %s /mnt/%s" %
(src_disk_format, disk_format, blk_source, dist_img))
else:
# create another disk without convert
disk_cmd = "qemu-img create -f %s /mnt/%s 10M" % (src_disk_format,
dist_img)
# Mount the gluster disk and create the image.
process.run("mount -t glusterfs %s:%s /mnt; %s; umount /mnt"
% (host_ip, vol_name, disk_cmd))
disk_params_src = {'source_protocol': disk_src_protocol,
'source_name': "%s/%s" % (vol_name, dist_img),
'source_host_name': host_ip,
'source_host_port': "24007"}
if transport:
disk_params_src.update({"transport": transport})
elif disk_src_protocol == 'netfs':
# Setup nfs
res = setup_or_cleanup_nfs(True, mnt_path_name,
is_mount=True,
export_options=exp_opt)
exp_path = res["export_dir"]
mnt_path = res["mount_dir"]
params["selinux_status_bak"] = res["selinux_status_bak"]
dist_img = "nfs-img"
# Convert first disk to gluster disk path
disk_cmd = ("qemu-img convert -f %s -O %s %s %s/%s" %
(src_disk_format, disk_format,
blk_source, exp_path, dist_img))
process.run(disk_cmd, ignore_status=False)
src_file_path = "%s/%s" % (mnt_path, dist_img)
disk_params_src = {'source_file': src_file_path}
elif disk_src_protocol == 'rbd':
mon_host = params.get("mon_host")
if image_convert:
disk_cmd = ("qemu-img convert -f %s -O %s %s rbd:%s:mon_host=%s"
% (src_disk_format, disk_format, blk_source,
disk_src_name, mon_host))
process.run(disk_cmd, ignore_status=False)
disk_params_src = {'source_protocol': disk_src_protocol,
'source_name': disk_src_name,
'source_host_name': disk_src_host,
'source_host_port': disk_src_port,
'source_config_file': disk_src_config}
if disk_snap_name:
disk_params_src.update({'source_snap_name': disk_snap_name})
else:
# use current source file with update params
disk_params_src = {'source_file': blk_source}
# Delete disk elements
disks = vmxml.get_devices(device_type="disk")
for disk_ in disks:
if disk_.target['dev'] == disk_target:
vmxml.del_device(disk_)
# New disk xml
new_disk = disk.Disk(type_name=disk_type)
new_disk.new_disk_source(attrs={'file': blk_source})
disk_params.update(disk_params_src)
disk_xml = create_disk_xml(disk_params)
new_disk.xml = disk_xml
# Add new disk xml and redefine vm
vmxml.add_device(new_disk)
logging.debug("The vm xml now is: %s" % vmxml.xmltreefile)
vmxml.sync()
vm.start()
def attach_additional_device(vm_name, targetdev, disk_path, params, config=True):
"""
Create a disk with disksize, then attach it to given vm.
:param vm_name: Libvirt VM name.
:param disk_path: path of attached disk
:param targetdev: target of disk device
:param params: dict include necessary configurations of device
"""
logging.info("Attaching disk...")
# Update params for source file
params['source_file'] = disk_path
params['target_dev'] = targetdev
# Create a file of device
xmlfile = create_disk_xml(params)
# To confirm attached device do not exist.
if config:
extra = "--config"
else:
extra = ""
virsh.detach_disk(vm_name, targetdev, extra=extra)
return virsh.attach_device(domain_opt=vm_name, file_opt=xmlfile,
flagstr=extra, debug=True)
def device_exists(vm, target_dev):
"""
Check if given target device exists on vm.
"""
targets = vm.get_blk_devices().keys()
if target_dev in targets:
return True
return False
def create_local_disk(disk_type, path=None,
size="10", disk_format="raw",
vgname=None, lvname=None):
if disk_type != "lvm" and path is None:
raise exceptions.TestError("Path is needed for creating local disk")
if path:
process.run("mkdir -p %s" % os.path.dirname(path))
try:
size = str(float(size)) + "G"
except ValueError:
pass
cmd = ""
if disk_type == "file":
cmd = "qemu-img create -f %s %s %s" % (disk_format, path, size)
elif disk_type == "floppy":
cmd = "dd if=/dev/zero of=%s count=1024 bs=1024" % path
elif disk_type == "iso":
cmd = "mkisofs -o %s /root/*.*" % path
elif disk_type == "lvm":
if vgname is None or lvname is None:
raise exceptions.TestError("Both VG name and LV name are needed")
lv_utils.lv_create(vgname, lvname, size)
path = "/dev/%s/%s" % (vgname, lvname)
else:
raise exceptions.TestError("Unknown disk type %s" % disk_type)
if cmd:
process.run(cmd, ignore_status=True)
return path
def delete_local_disk(disk_type, path=None,
vgname=None, lvname=None):
if disk_type in ["file", "floppy", "iso"]:
if path is None:
raise exceptions.TestError(
"Path is needed for deleting local disk")
else:
cmd = "rm -f %s" % path
process.run(cmd, ignore_status=True)
elif disk_type == "lvm":
if vgname is None or lvname is None:
raise exceptions.TestError("Both VG name and LV name needed")
lv_utils.lv_remove(vgname, lvname)
else:
raise exceptions.TestError("Unknown disk type %s" % disk_type)
def create_scsi_disk(scsi_option, scsi_size="2048"):
"""
Get the scsi device created by scsi_debug kernel module
:param scsi_option. The scsi_debug kernel module options.
:return: scsi device if it is created successfully.
"""
try:
utils_misc.find_command("lsscsi")
except ValueError:
raise exceptions.TestNAError("Missing command 'lsscsi'.")
try:
# Load scsi_debug kernel module.
# Unload it first if it's already loaded.
if linux_modules.module_is_loaded("scsi_debug"):
linux_modules.unload_module("scsi_debug")
linux_modules.load_module("scsi_debug dev_size_mb=%s %s" %
(scsi_size, scsi_option))
# Get the scsi device name
scsi_disk = process.run("lsscsi|grep scsi_debug|"
"awk '{print $6}'").stdout.strip()
logging.info("scsi disk: %s" % scsi_disk)
return scsi_disk
except Exception, e:
logging.error(str(e))
return None
def delete_scsi_disk():
"""
Delete scsi device by removing scsi_debug kernel module.
"""
if linux_modules.module_is_loaded("scsi_debug"):
linux_modules.unload_module("scsi_debug")
def set_controller_multifunction(vm_name, controller_type='scsi'):
"""
Set multifunction on for controller device and expand to all function.
"""
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
exist_controllers = vmxml.get_devices("controller")
# Used to contain controllers in format:
# domain:bus:slot:func -> controller object
expanded_controllers = {}
# The index of controller
index = 0
for e_controller in exist_controllers:
if e_controller.type != controller_type:
continue
# Set multifunction on
address_attrs = e_controller.address.attrs
address_attrs['multifunction'] = "on"
domain = address_attrs['domain']
bus = address_attrs['bus']
slot = address_attrs['slot']
all_funcs = ["0x0", "0x1", "0x2", "0x3", "0x4", "0x5", "0x6"]
for func in all_funcs:
key = "%s:%s:%s:%s" % (domain, bus, slot, func)
address_attrs['function'] = func
# Create a new controller instance
new_controller = controller.Controller(controller_type)
new_controller.xml = str(xml_utils.XMLTreeFile(e_controller.xml))
new_controller.index = index
new_controller.address = new_controller.new_controller_address(
attrs=address_attrs)
# Expand controller to all functions with multifunction
if key not in expanded_controllers.keys():
expanded_controllers[key] = new_controller
index += 1
logging.debug("Expanded controllers: %s", expanded_controllers.values())
vmxml.del_controller(controller_type)
vmxml.set_controller(expanded_controllers.values())
vmxml.sync()
def attach_disks(vm, path, vgname, params):
"""
Attach multiple disks.According parameter disk_type in params,
it will create lvm or file type disks.
:param path: file type disk's path
:param vgname: lvm type disk's volume group name
"""
# Additional disk on vm
disks_count = int(params.get("added_disks_count", 1)) - 1
multifunction_on = "yes" == params.get("multifunction_on", "no")
disk_size = params.get("added_disk_size", "0.1")
disk_type = params.get("added_disk_type", "file")
disk_target = params.get("added_disk_target", "virtio")
disk_format = params.get("added_disk_format", "raw")
# Whether attaching device with --config
attach_config = "yes" == params.get("attach_disk_config", "yes")
def generate_disks_index(count, target="virtio"):
# Created disks' index
target_list = []
# Used to flag progression
index = 0
# A list to maintain prefix for generating device
# ['a','b','c'] means prefix abc
prefix_list = []
while count > 0:
# Out of range for current prefix_list
if (index / 26) > 0:
# Update prefix_list to expand disks, such as [] -> ['a'],
# ['z'] -> ['a', 'a'], ['z', 'z'] -> ['a', 'a', 'a']
prefix_index = len(prefix_list)
if prefix_index == 0:
prefix_list.append('a')
# Append a new prefix to list, then update pre-'z' in list
# to 'a' to keep the progression 1
while prefix_index > 0:
prefix_index -= 1
prefix_cur = prefix_list[prefix_index]
if prefix_cur == 'z':
prefix_list[prefix_index] = 'a'
# All prefix in prefix_list are 'z',
# it's time to expand it.
if prefix_index == 0:
prefix_list.append('a')
else:
# For whole prefix_list, progression is 1
prefix_list[prefix_index] = chr(ord(prefix_cur) + 1)
break
# Reset for another iteration
index = 0
prefix = "".join(prefix_list)
suffix_index = index % 26
suffix = chr(ord('a') + suffix_index)
index += 1
count -= 1
# Generate device target according to driver type
if target == "virtio":
target_dev = "vd%s" % (prefix + suffix)
elif target == "scsi":
target_dev = "sd%s" % (prefix + suffix)
target_list.append(target_dev)
return target_list
target_list = generate_disks_index(disks_count, disk_target)
# A dict include disks information: source file and size
added_disks = {}
for target_dev in target_list:
# Do not attach if it does already exist
if device_exists(vm, target_dev):
continue
# Prepare controller for special disks like virtio-scsi
# Open multifunction to add more controller for disks(150 or more)
if multifunction_on:
set_controller_multifunction(vm.name, disk_target)
disk_params = {}
disk_params['type_name'] = disk_type
disk_params['target_dev'] = target_dev
disk_params['target_bus'] = disk_target
disk_params['device_type'] = params.get("device_type", "disk")
device_name = "%s_%s" % (target_dev, vm.name)
disk_path = os.path.join(os.path.dirname(path), device_name)
disk_path = create_local_disk(disk_type, disk_path,
disk_size, disk_format,
vgname, device_name)
added_disks[disk_path] = disk_size
result = attach_additional_device(vm.name, target_dev, disk_path,
disk_params, attach_config)
if result.exit_status:
raise exceptions.TestFail("Attach device %s failed."
% target_dev)
logging.debug("New VM XML:\n%s", vm.get_xml())
return added_disks
def define_new_vm(vm_name, new_name):
"""
Just define a new vm from given name
"""
try:
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
vmxml.vm_name = new_name
del vmxml.uuid
vmxml.define()
return True
except xcepts.LibvirtXMLError, detail:
logging.error(detail)
return False
def remotely_control_libvirtd(server_ip, server_user, server_pwd,
action='restart', status_error='no'):
"""
Remotely restart libvirt service
"""
session = None
try:
session = remote.wait_for_login('ssh', server_ip, '22',
server_user, server_pwd,
r"[\#\$]\s*$")
logging.info("%s libvirt daemon\n", action)
service_libvirtd_control(action, session)
session.close()
except (remote.LoginError, aexpect.ShellError, process.CmdError), detail:
if session:
session.close()
if status_error == "no":
raise exceptions.TestFail("Failed to %s libvirtd service on "
"server: %s\n", action, detail)
else:
logging.info("It is an expect %s", detail)
def connect_libvirtd(uri, read_only="", virsh_cmd="list", auth_user=None,
auth_pwd=None, vm_name="", status_error="no",
extra="", log_level='LIBVIRT_DEBUG=3', su_user="",
patterns_virsh_cmd=".*Id\s*Name\s*State\s*.*"):
"""
Connect libvirt daemon
"""
patterns_yes_no = r".*[Yy]es.*[Nn]o.*"
patterns_auth_name_comm = r".*name:.*"
patterns_auth_name_xen = r".*name.*root.*:.*"
patterns_auth_pwd = r".*[Pp]assword.*"
command = "%s %s virsh %s -c %s %s %s" % (extra, log_level, read_only,
uri, virsh_cmd, vm_name)
# allow specific user to run virsh command
if su_user != "":
command = "su %s -c '%s'" % (su_user, command)
logging.info("Execute %s", command)
# setup shell session
session = aexpect.ShellSession(command, echo=True)
try:
# requires access authentication
match_list = [patterns_yes_no, patterns_auth_name_comm,
patterns_auth_name_xen, patterns_auth_pwd,
patterns_virsh_cmd]
while True:
match, text = session.read_until_any_line_matches(match_list,
timeout=30,
internal_timeout=1)
if match == -5:
logging.info("Matched 'yes/no', details: <%s>", text)
session.sendline("yes")
elif match == -3 or match == -4:
logging.info("Matched 'username', details: <%s>", text)
session.sendline(auth_user)
elif match == -2:
logging.info("Matched 'password', details: <%s>", text)
session.sendline(auth_pwd)
elif match == -1:
logging.info("Expected output of virsh command: <%s>", text)
break
else:
logging.error("The real prompt text: <%s>", text)
break
session.close()
return True
except (aexpect.ShellError, aexpect.ExpectError), details:
log = session.get_output()
session.close()
logging.error("Failed to connect libvirtd: %s\n%s", details, log)
return False
def get_all_vol_paths():
"""
Get all volumes' path in host
"""
vol_path = []
sp = libvirt_storage.StoragePool()
for pool_name in sp.list_pools().keys():
if sp.list_pools()[pool_name]['State'] != "active":
logging.warning(
"Inactive pool '%s' cannot be processed" % pool_name)
continue
pv = libvirt_storage.PoolVolume(pool_name)
for path in pv.list_volumes().values():
vol_path.append(path)
return set(vol_path)
def do_migration(vm_name, uri, extra, auth_pwd, auth_user="root",
options="--verbose", virsh_patterns=".*100\s%.*",
su_user="", timeout=30):
"""
Migrate VM to target host.
"""
patterns_yes_no = r".*[Yy]es.*[Nn]o.*"
patterns_auth_name = r".*name:.*"
patterns_auth_pwd = r".*[Pp]assword.*"
command = "%s virsh migrate %s %s %s" % (extra, vm_name, options, uri)
# allow specific user to run virsh command
if su_user != "":
command = "su %s -c '%s'" % (su_user, command)
logging.info("Execute %s", command)
# setup shell session
session = aexpect.ShellSession(command, echo=True)
try:
# requires access authentication
match_list = [patterns_yes_no, patterns_auth_name,
patterns_auth_pwd, virsh_patterns]
while True:
match, text = session.read_until_any_line_matches(match_list,
timeout=timeout,
internal_timeout=1)
if match == -4:
logging.info("Matched 'yes/no', details: <%s>", text)
session.sendline("yes")
elif match == -3:
logging.info("Matched 'username', details: <%s>", text)
session.sendline(auth_user)
elif match == -2:
logging.info("Matched 'password', details: <%s>", text)
session.sendline(auth_pwd)
elif match == -1:
logging.info("Expected output of virsh migrate: <%s>", text)
break
else:
logging.error("The real prompt text: <%s>", text)
break
session.close()
return True
except (aexpect.ShellError, aexpect.ExpectError), details:
log = session.get_output()
session.close()
logging.error("Failed to migrate %s: %s\n%s", vm_name, details, log)
return False
def update_vm_disk_source(vm_name, disk_source_path, source_type="file"):
"""
Update disk source path of the VM
:param source_type: it may be 'dev' or 'file' type, which is default
"""
if not os.path.isdir(disk_source_path):
logging.error("Require disk source path!!")
return False
# Prepare to update VM first disk source file
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
devices = vmxml.devices
disk_index = devices.index(devices.by_device_tag('disk')[0])
disks = devices[disk_index]
disk_source = disks.source.get_attrs().get(source_type)
logging.debug("The disk source file of the VM: %s", disk_source)
if not os.path.exists(disk_source):
logging.error("The disk source doesn't exist!!")
return False
vm_name_with_format = os.path.basename(disk_source)
new_disk_source = os.path.join(disk_source_path, vm_name_with_format)
logging.debug("The new disk source file of the VM: %s", new_disk_source)
# Update VM disk source file
disks.source = disks.new_disk_source(**{'attrs': {'%s' % source_type:
"%s" % new_disk_source}})
# SYNC VM XML change
vmxml.devices = devices
logging.debug("The new VM XML:\n%s", vmxml.xmltreefile)
vmxml.sync()
return True
def hotplug_domain_vcpu(domain, count, by_virsh=True, hotplug=True):
"""
Hot-plug/Hot-unplug vcpu for domian
:param domain: Domain name, id, uuid
:param count: to setvcpus it's the current vcpus number,
but to qemu-monitor-command,
we need to designate a specific CPU ID.
The default will be got by (count - 1)
:param by_virsh: True means hotplug/unplug by command setvcpus,
otherwise, using qemu_monitor
:param hotplug: True means hot-plug, False means hot-unplug
"""
if by_virsh:
result = virsh.setvcpus(domain, count, "--live", debug=True)
else:
if hotplug:
cpu_opt = "cpu-add"
else:
cpu_opt = "cpu-del"
# Note: cpu-del is supported currently, it will return error.
# as follow,
# {
# "id": "libvirt-23",
# "error": {
# "class": "CommandNotFound",
# "desc": "The command cpu-del has not been found"
# }
# }
# so, the caller should check the result.
# hot-plug/hot-plug the CPU has maximal ID
params = (cpu_opt, (count - 1))
cmd = '{\"execute\":\"%s\",\"arguments\":{\"id\":%d}}' % params
result = virsh.qemu_monitor_command(domain,
cmd,
"--pretty",
debug=True)
return result
def exec_virsh_edit(source, edit_cmd, connect_uri="qemu:///system"):
"""
Execute edit command.
:param source : virsh edit's option.
:param edit_cmd: Edit command list to execute.
:return: True if edit is successful, False if edit is failure.
"""
logging.info("Trying to edit xml with cmd %s", edit_cmd)
session = aexpect.ShellSession("sudo -s")
try:
session.sendline("virsh -c %s edit %s" % (connect_uri, source))
for cmd in edit_cmd:
session.sendline(cmd)
session.send('\x1b')
session.send('ZZ')
remote.handle_prompts(session, None, None, r"[\#\$]\s*$", debug=True)
session.close()
return True
except Exception, e:
session.close()
logging.error("Error occurred: %s", e)
return False
def new_disk_vol_name(pool_name):
"""
According to BZ#1138523, the new volume name must be the next
created partition(sdb1, etc.), so we need to inspect the original
partitions of the disk then count the new partition number.
:param pool_name: Disk pool name
:return: New volume name or none
"""
poolxml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
if poolxml.get_type(pool_name) != "disk":
logging.error("This is not a disk pool")
return None
disk = poolxml.get_source().device_path[5:]
part_num = len(filter(lambda s: s.startswith(disk), get_parts_list()))
return disk + str(part_num)
def update_polkit_rule(params, pattern, new_value):
"""
This function help to update the rule during testing.
:param params: Test run params
:param pattern: Regex pattern for updating
:param new_value: New value for updating
"""
polkit = LibvirtPolkitConfig(params)
polkit_rules_path = polkit.polkit_rules_path
try:
polkit_f = open(polkit_rules_path, 'r+')
rule = polkit_f.read()
new_rule = re.sub(pattern, new_value, rule)
polkit_f.seek(0)
polkit_f.truncate()
polkit_f.write(new_rule)
polkit_f.close()
logging.debug("New polkit config rule is:\n%s", new_rule)
polkit.polkitd.restart()
except IOError, e:
logging.error(e)
| rbian/avocado-vt | virttest/utils_test/libvirt.py | Python | gpl-2.0 | 106,637 |
#!/usr/bin/python3
import datetime
# HTTPヘッダ
# SSIで「include virtual」する時はHTTPヘッダが必要
print('Content-Type: text/plain;charset=utf-8')
print('')
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
if __name__ == '__main__':
pass | thinkAmi-sandbox/Docker_Apache-sample | alpine_apache_python36_ssi/ssi_html_cgi_using_xbithack_on/cgi/now.py | Python | unlicense | 273 |
print(input()*2)
| knuu/competitive-programming | atcoder/corp/codefes2015qb_a.py | Python | mit | 17 |
import typing # noqa
from django.http.request import HttpRequest
from django.http.response import HttpResponse
from django.shortcuts import get_object_or_404, render
from django.views.generic import ListView
from .models import Question
class IndexView(ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.order_by('-id')[:5]
def detail(request: HttpRequest, question_id: int) -> HttpResponse:
question = get_object_or_404(Question, pk=question_id)
if not question.is_active:
return 'invalid request'
return render(request, 'polls/detail.html', {'question': question})
| caulagi/django-mypy | polls/views.py | Python | mit | 759 |
#!/usr/bin/python
import os
import sys
import re
import glob
import operator
from collections import OrderedDict
from datetime import datetime
f = open( 'nagiosdata', 'w')
#path ="/root/nagios/log/"
#dirs = os.listdir( path )
#for file in dirs:
#nagioslog = open('nagios-06-08-2015-00.log')
nagioslog = open('/root/nagios/log/nagios-' + datetime.now().strftime("%m-%d-%Y") + '-00.log')
# nagioslog = open('nagios-' + datetime.now().strftime("%m-%d-%Y") + '-00.log')
for line in nagioslog:
line = line.rstrip()
if re.search('DOWN;SOFT;3;', line) :
f.write(line+'\n')
if re.search('DOWN;HARD;10;CRITICAL', line) :
f.write(line+'\n')
od = OrderedDict()
with open('nagiosdata') as f:
r = re.compile("(?<=HOST ALERT:\s)\S+")
for line in f:
name, st, con, _, _ = r.search(line).group().split(";")
# od.setdefault(name, {"State": st, "Total": con,"HARD": 0,"SOFT":0,"Count":0,})
od.setdefault(name, {"State": st, "HARD": 0,"SOFT":0,"Count":0})
od[name]["State"] = st
# od[name]["Total"] = con
od[name]["Count"] += 1
if con == 'HARD' :
od[name]["HARD"] +=1
else :
od[name]["SOFT"] +=1
print "Host Down Statistics for last 24 hours: \n"
for k,v in sorted(od.items(), key=operator.itemgetter(1),reverse=True):
print("{0} {1} ".format(k,v))
| rmstmg/nagios-log-parsing | nagioslog.py | Python | apache-2.0 | 1,362 |
import torch
from torch._thnn import type2backend
from torch.autograd import Function
from . import _all_functions
from .auto import _BCELoss
# TODO: move this code to THNN and remove _BCELoss from auto.py
class BCELoss(_BCELoss):
def _resize_weight(self, target):
self.old_weight = self.weight
if self.weight is not None and target.dim() != 1:
self.weight = self.weight.view(1, target.size(1)).expand_as(target)
def _unresize_weight(self):
self.weight = self.old_weight
del self.old_weight
def forward(self, input, target):
assert input.nelement() == target.nelement()
self._resize_weight(target)
result = super(BCELoss, self).forward(input, target)
self._unresize_weight()
return result
def backward(self, grad_output):
target = self.saved_tensors[1]
self._resize_weight(target)
result = super(BCELoss, self).backward(grad_output)
self._unresize_weight()
return result
_all_functions.append(BCELoss)
| RPGOne/Skynet | pytorch-master/torch/nn/_functions/thnn/loss.py | Python | bsd-3-clause | 1,051 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Reza(User:reza1615), 2014
#
# Distributed under the terms of the CC-BY-SA 3.0 .
# Python 3
import pywikibot,re,codecs
pywikibot.config.put_throttle = 0
pywikibot.config.maxthrottle = 0
bot_version='۴.۰'
_cache={}
def translation(firstsite,secondsite,enlink):
if _cache.get(tuple([enlink, firstsite,enlink, 'translation_en'])):
return _cache[tuple([enlink, firstsite,enlink, 'translation_en'])]
try:
enlink=unicode(str(enlink),'UTF-8').replace('[[','').replace(']]','').replace('en:','').replace('fa:','')
except:
enlink=enlink.replace('[[','').replace(']]','').replace('en:','').replace('fa:','')
if enlink.find('#')!=-1:
_cache[tuple([enlink, firstsite,enlink, 'translation_en'])]=False
return False
if enlink=='':
_cache[tuple([enlink, firstsite,enlink, 'translation_en'])]=False
return False
enlink=enlink.replace(' ','_')
site = pywikibot.Site(firstsite)
sitesecond= pywikibot.Site(secondsite)
try:
categoryname = pywikibot.data.api.Request(site=site, action="query", prop="langlinks",titles=enlink,redirects=1,lllimit=500)
categoryname=categoryname.submit()
for item in categoryname['query']['pages']:
case=categoryname['query']['pages'][item]['langlinks']
for item in case:
if item['lang']==secondsite:
intersec=item['*']
break
result=intersec
if result.find('#')!=-1:
_cache[tuple([enlink, firstsite,enlink, 'translation_en'])]=False
return False
_cache[tuple([enlink, firstsite,enlink, 'translation_en'])]=result
return result
except:
_cache[tuple([enlink, firstsite,enlink, 'translation_en'])]=False
return False
def Solve_linke_translation(fa_link,num):
if num==1:#link is persian
en_link=translation('fa','en',fa_link)
if en_link:
return '[['+fa_link+'|'+en_link+']]'
else:
return '[['+fa_link+']]'
if num==2:#link is english
en_link=fa_link
fa_link=translation('en','fa',en_link)
if fa_link:
return '[['+fa_link+'|'+en_link+']]'
else:
return '[['+en_link+']]'
def Check_link(fa_link):
fa_link=fa_link.split('|')[0].replace('[[','').replace(']]','')
fa_link_2 = re.sub(r'[ءاآأإئؤبپتثجچحخدذرزژسشصضطظعغفقکگلمنوهييك]', r'',fa_link)
if fa_link_2!=fa_link:
#pywikibot.output('The link '+fa_link+' is persian!')
return fa_link,1
else:
#pywikibot.output('The link '+fa_link+' is english!')
return fa_link,2
def check_ref_title_is_english(my_ref):
my_ref_3=my_ref.replace('= ','=').replace(' =','=').replace('{{ ','{{').replace(' }}','}}').replace('\r','').replace('\n','').replace(' |','|').replace('| ','|').replace(' ',' ').replace(' ',' ').replace(' ',' ')
if '{{یادکرد' in my_ref_3:
for item in ['|عنوان','|نویسنده','|کتاب','|نام=','|نام خانوادگی=','|مقاله','|ک=','|ف=','|اثر']:
if item in my_ref_3:
ref_title=my_ref_3.split(item)[1].split('|')[0].strip()
ref_title_2 = re.sub(r'[ضصثقفغعهخحشسيبلاتنمظطزرذدپوکگجچژ]', r"",ref_title)
if ref_title_2!=ref_title:
#pywikibot.output('!!!!!\03{lightblue}Title is persian so the links should be persian\03{default}')
return False
if ref_title.replace('=','').strip():
break
return True
elif ('{{Cit' in my_ref_3) or ('{{cit' in my_ref_3):
for item in ['|title','|first','|last','|work','|contribution','|publisher']:
if item in my_ref_3:
ref_title=my_ref_3.split(item)[1].split('|')[0].strip()
ref_title_2 = re.sub(r'[ضصثقفغعهخحشسيبلاتنمظطزرذدپوکگجچژ]', r"",ref_title)
if ref_title_2!=ref_title:
#pywikibot.output('!!!!!\03{lightblue}Title is persian so the links should be persian\03{default}')
return False
if ref_title.replace('=','').strip():
break
return True
else:
my_ref_3=my_ref_3.replace('[[','@1@').replace(']]','@2@')
if '[' in my_ref_3:
my_url=my_ref_3.split('[')[1].split(']')[0]
if ' ' in my_url:
my_url_title=my_url.split(' ')[1]
my_url_title_2 = re.sub(r'[ضصثقفغعهخحشسيبلاتنمظطزرذدپوکگجچژ]', r"",my_url_title)
if my_url_title_2!=my_url_title:
#pywikibot.output('!!!!!\03{lightblue}Title is persian so the links should be persian\03{default}')
return False
else:
#pywikibot.output('!!!!!\03{lightblue}Title is persian so the links should be persian\03{default}')
return False
else:
my_ref_3= re.sub(r'\@1\@.*?\@2\@', r"",my_ref_3)
my_ref_3_2 = re.sub(r'[ضصثقفغعهخحشسيبلاتنمظطزرذدپوکگجچژ]', r"",my_ref_3)
if my_ref_3_2!=my_ref_3:
#pywikibot.output('!!!!!\03{lightblue}Title is persian so the links should be persian\03{default}')
return False
return True
def run (text,sum):
old_text=text
RE=re.compile(r'<[\s]*ref[^>]*>([^<]*)<[\s]*\/[\s]*ref[\s]*>')
all_refs=RE.findall(text.replace('\n','').replace('\r',''))
RE2=re.compile(r'{{\s*(?:[Cc]ite|یادکرد)[\-_\s](?:{{.*?}}|[^}])*}}')
all_refs2=RE2.findall(text.replace('\n','').replace('\r',''))
our_ref=[]
if all_refs:
our_ref=all_refs
if all_refs2:
our_ref+=all_refs2
if not our_ref:
return text,sum
our_ref = list(set(our_ref))
for refs in our_ref:
if '[[رده:' in refs:
continue
should_english=check_ref_title_is_english(refs)
if should_english:
RE=re.compile(r'\[\[.*?\]\]')
fa_links=RE.findall(refs)
if fa_links:
#pywikibot.output('----links----')
for fa_link in fa_links:
fa_link_r,num=Check_link(fa_link)
if fa_link_r:
new_link=Solve_linke_translation(fa_link_r,num)
new_refs=refs.replace('[['+fa_link_r+']]',new_link)
old_text=old_text.replace(refs,new_refs)
refs=new_refs
else:
#pywikibot.output('It doesnt have any wiki link!')
continue
if old_text!=text:
return old_text,sum+'+'+'اصلاح ارجاع لاتین'
else:
return text,sum
def main(text,sum):
new_text,sum=run(text,sum)
if sum:
sum2=sum
return new_text,sum2
if __name__ == "__main__":
sum=''
PageTitle =raw_input('Page name > ').decode('utf-8')
faSite=pywikibot.Site('fa',fam='wikipedia')
fapage=pywikibot.Page(faSite,PageTitle)
text=fapage.get()
new_text,sum2=main(text,sum)
if text!=new_text:
fapage.put(new_text,'ربات:اصلاح پیوندهای ارجاع لاتین')
pywikibot.output("\03{lightgreen}Links of the page are updated!\03{default}")
else:
pywikibot.output("This Page doesn't need any change") | PersianWikipedia/fawikibot | Need2convert/zz_ref_link_correction_core.py | Python | mit | 7,533 |
# publications.urls
# DJANGO
from django.conf.urls import patterns, include, url
from django.views.generic import ListView
# NEWS
from .models import Publication
from .views import (
PublicationDetailView,
PublicationCreateView,
PublicationUpdateView
)
publications_list_view = ListView.as_view(
model=Publication,
paginate_by=10,
template_name='publications/index.html')
urlpatterns = patterns('',
url(r'^add/$',
view=PublicationCreateView.as_view(), name='add_publication'),
url(r'^(?P<pk>\d+)/$',
view=PublicationDetailView.as_view(), name='publication'),
url(r'^update/(?P<pk>\d+)/$',
view=PublicationUpdateView.as_view(), name='update_publication'),
url(r'^$', view=publications_list_view, name='publications_index'),
url(r'^page/(?P<page>\d+)/$', view=publications_list_view,
name='publications_index_paginated'),
)
| valuesandvalue/valuesandvalue | vavs_project/publications/urls.py | Python | mit | 987 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BGPCommunity(Model):
"""Contains bgp community information offered in Service Community resources.
:param service_supported_region: The region which the service support.
e.g. For O365, region is Global.
:type service_supported_region: str
:param community_name: The name of the bgp community. e.g. Skype.
:type community_name: str
:param community_value: The value of the bgp community. For more
information:
https://docs.microsoft.com/en-us/azure/expressroute/expressroute-routing.
:type community_value: str
:param community_prefixes: The prefixes that the bgp community contains.
:type community_prefixes: list of str
:param is_authorized_to_use: Customer is authorized to use bgp community
or not.
:type is_authorized_to_use: bool
:param service_group: The service group of the bgp community contains.
:type service_group: str
"""
_attribute_map = {
'service_supported_region': {'key': 'serviceSupportedRegion', 'type': 'str'},
'community_name': {'key': 'communityName', 'type': 'str'},
'community_value': {'key': 'communityValue', 'type': 'str'},
'community_prefixes': {'key': 'communityPrefixes', 'type': '[str]'},
'is_authorized_to_use': {'key': 'isAuthorizedToUse', 'type': 'bool'},
'service_group': {'key': 'serviceGroup', 'type': 'str'},
}
def __init__(self, service_supported_region=None, community_name=None, community_value=None, community_prefixes=None, is_authorized_to_use=None, service_group=None):
self.service_supported_region = service_supported_region
self.community_name = community_name
self.community_value = community_value
self.community_prefixes = community_prefixes
self.is_authorized_to_use = is_authorized_to_use
self.service_group = service_group
| SUSE/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/bgp_community.py | Python | mit | 2,386 |
from ReText import *
class ConfigDialog(QDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.initConfigOptions()
self.layout = QGridLayout(self)
buttonBox = QDialogButtonBox(self)
buttonBox.setStandardButtons(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
self.connect(buttonBox, SIGNAL('accepted()'), self.saveSettings)
self.connect(buttonBox, SIGNAL('rejected()'), self.close)
self.initWidgets()
self.layout.addWidget(buttonBox, len(self.options), 0, 1, 2)
def initConfigOptions(self):
# options is a tuple containing (displayname, name, default) tuples
self.options = (
(self.tr('Behavior'), None, None),
(self.tr('Automatically save documents'), 'autoSave', False),
(self.tr('Restore window geometry'), 'saveWindowGeometry', False),
(self.tr('Restore live preview state'), 'restorePreviewState', False),
(self.tr('Open external links in ReText window'), 'handleWebLinks', False),
(self.tr('Open unknown files in plain text mode'), 'autoPlainText', True),
(self.tr('Editor'), None, None),
(self.tr('Highlight current line'), 'highlightCurrentLine', True),
(self.tr('Show line numbers'), 'lineNumbersEnabled', False),
(self.tr('Tab key inserts spaces'), 'tabInsertsSpaces', True),
(self.tr('Tabulation width'), 'tabWidth', 4),
(self.tr('Display right margin at column'), 'rightMargin', 0),
(self.tr('Interface'), None, None),
(self.tr('Icon theme name'), 'iconTheme', '')
# Ideas for future: styleSheet, editorFont
)
def initWidgets(self):
self.configurators = {}
for index in range(len(self.options)):
displayname, name, default = self.options[index]
if name is None:
header = QLabel('<h3>%s</h3>' % displayname, self)
self.layout.addWidget(header, index, 0, 1, 2, Qt.AlignHCenter)
continue
value = readFromSettings(name, type(default), default=default)
label = QLabel(displayname, self)
if isinstance(default, bool):
self.configurators[name] = QCheckBox(self)
self.configurators[name].setChecked(value)
elif isinstance(default, int):
self.configurators[name] = QSpinBox(self)
if name == 'tabWidth':
self.configurators[name].setRange(1, 10)
else:
self.configurators[name].setMaximum(100)
self.configurators[name].setValue(value)
elif isinstance(default, str):
self.configurators[name] = QLineEdit(self)
self.configurators[name].setText(value)
self.layout.addWidget(label, index, 0)
self.layout.addWidget(self.configurators[name], index, 1, Qt.AlignRight)
def saveSettings(self):
for displayname, name, default in self.options:
if name is None:
continue
configurator = self.configurators[name]
if isinstance(default, bool):
value = configurator.isChecked()
elif isinstance(default, int):
value = configurator.value()
elif isinstance(default, str):
value = configurator.text()
writeToSettings(name, value, default)
self.close()
| codemedic/retext | ReText/config.py | Python | gpl-3.0 | 2,955 |
# Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
from random import randrange
import re
import wikipedia as wiki
from adapt.intent import IntentBuilder
from os.path import join, dirname
from mycroft.skills.core import MycroftSkill
from mycroft.util import read_stripped_lines
from mycroft.util.log import getLogger
__author__ = 'jdorleans'
LOGGER = getLogger(__name__)
class WikipediaSkill(MycroftSkill):
def __init__(self):
super(WikipediaSkill, self).__init__(name="WikipediaSkill")
self.max_results = self.config.get('max_results', 3)
self.max_phrases = self.config.get('max_phrases', 3)
self.question = 'Would you like to know more about ' # TODO - i10n
self.feedback_prefix = read_stripped_lines(
join(dirname(__file__), 'dialog', self.lang,
'FeedbackPrefix.dialog'))
self.feedback_search = read_stripped_lines(
join(dirname(__file__), 'dialog', self.lang,
'FeedbackSearch.dialog'))
def initialize(self):
intent = IntentBuilder("WikipediaIntent").require(
"WikipediaKeyword").require("ArticleTitle").build()
self.register_intent(intent, self.handle_intent)
def handle_intent(self, message):
try:
title = message.data.get("ArticleTitle")
self.__feedback_search(title)
results = wiki.search(title, self.max_results)
summary = re.sub(
r'\([^)]*\)|/[^/]*/', '',
wiki.summary(results[0], self.max_phrases))
self.speak(summary)
except wiki.exceptions.DisambiguationError as e:
options = e.options[:self.max_results]
LOGGER.debug("Multiple options found: " + ', '.join(options))
self.__ask_more_about(options)
except Exception as e:
LOGGER.error("Error: {0}".format(e))
def __feedback_search(self, title):
prefix = self.feedback_prefix[randrange(len(self.feedback_prefix))]
feedback = self.feedback_search[randrange(len(self.feedback_search))]
sentence = feedback.replace('<prefix>', prefix).replace(
'<title>', title)
self.speak(sentence, metadata={"more_speech": True})
def __ask_more_about(self, opts):
sentence = self.question
size = len(opts)
for idx, opt in enumerate(opts):
sentence += opt
if idx < size - 2:
sentence += ', '
elif idx < size - 1:
sentence += ' or ' # TODO - i10n
self.speak(sentence)
def stop(self):
pass
def create_skill():
return WikipediaSkill()
| JarbasAI/JarbasAI | jarbas_skills/skill_wiki/__init__.py | Python | gpl-3.0 | 3,331 |
__author__ = 'PC-LiNing'
from bigchaindb import Bigchain
b=Bigchain()
### get blocks ids and status by tx_id
def getblocksBytx_id(tx_id):
blocks=b.get_blocks_status_containing_tx(tx_id)
return blocks
### get block by block_id
def getblockbyid(block_id):
pass
return
| charitychain/Charitychain | Simplechaindb/test/blockquery.py | Python | apache-2.0 | 288 |
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.conf import settings
from django.conf.urls import include, patterns, url
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^sa/', include('shuup.admin.urls', namespace="shuup_admin", app_name="shuup_admin")),
url(r'^api/', include('shuup.api.urls')),
url(r'^', include('shuup.front.urls', namespace="shuup", app_name="shuup")),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| hrayr-artunyan/shuup | shuup_workbench/urls.py | Python | agpl-3.0 | 750 |
"""
payer: language and parser tools
"""
from setuptools import setup
import os
import re
version_re = re.compile(r"^__version__ = ['\"](?P<version>[^'\"]*)['\"]", re.M)
def find_version(*file_paths):
"""Get version from python file."""
path = os.path.join(os.path.dirname(__file__), *file_paths)
with open(path) as version_file: contents = version_file.read()
m = version_re.search(contents)
if not m: raise RuntimeError("Unable to find version string.")
return m.group('version')
HERE = os.path.abspath(os.path.dirname(__file__))
setup(
name='payer',
version=find_version('payer/__init__.py'),
author='Stephen [Bracket] McCray',
author_email='mcbracket@gmail.com',
packages=['payer'],
classifiers=[
'Development Status :: 4 - Beta'
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
]
)
| bracket/payer | python/payer/setup.py | Python | bsd-2-clause | 1,049 |
# Copyright (C) 2011 Bradley N. Miller
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__author__ = 'bmiller'
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst import Directive
def setup(app):
app.add_directive('video',Video)
app.add_stylesheet('video.css')
CODE = """\
<a id="%(divid)s_thumb" style='position:relative;'>
<img src="%(thumb)s" />
<div class='video-play-overlay'></div>
</a>
<div id="%(divid)s" class="video_popup" >
<video %(controls)s %(preload)s %(loop)s >
%(sources)s
No supported video types
</video>
</div>
"""
POPUP = """\
<script>
jQuery(function ($) {
$('#%(divid)s_thumb').click(function (e) {
$('#%(divid)s').modal();
return false;
});
});
</script>
"""
INLINE = """\
<script>
jQuery(function($) {
$('#%(divid)s_thumb').click(function(e) {
$('#%(divid)s').show();
$('#%(divid)s_thumb').hide();
logBookEvent({'event':'video','act':'play','div_id': '%(divid)s'});
// Log the run event
});
});
</script>
"""
SOURCE = """<source src="%s" type="video/%s"></source>"""
class Video(Directive):
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
has_content = True
option_spec = {'controls':directives.flag,
'loop': directives.flag,
'thumb': directives.uri,
'preload': directives.flag
}
def run(self):
"""
process the video directive and generate html for output.
:param self:
:return:
"""
mimeMap = {'mov':'mp4','webm':'webm', 'm4v':'m4v'}
sources = [SOURCE % (directives.uri(line),mimeMap[line[line.rindex(".")+1:]]) for line in self.content]
self.options['divid'] = self.arguments[0]
if 'controls' in self.options:
self.options['controls'] = 'controls'
if 'loop' in self.options:
self.options['loop'] = 'loop'
else:
self.options['loop'] = ''
if 'preload' in self.options:
self.options['preload'] = 'preload="auto"'
else:
self.options['preload'] = 'preload="none"'
self.options['sources'] = "\n ".join(sources)
res = CODE % self.options
if 'popup' in self.options:
res += POPUP % self.options
else:
res += INLINE % self.options
return [nodes.raw('',res , format='html')]
source = """\
This is some text.
.. video:: divid
:controls:
:thumb: static/turtlestill.png
:loop:
http://knuth.luther.edu/~bmiller/foo.mov
http://knuth.luther.edu/~bmiller/foo.webm
This is some more text.
"""
if __name__ == '__main__':
from docutils.core import publish_parts
directives.register_directive('video',Video)
doc_parts = publish_parts(source,
settings_overrides={'output_encoding': 'utf8',
'initial_header_level': 2},
writer_name="html")
print doc_parts['html_body']
| 42cs/book | modules/luther/sphinx/video/video.py | Python | mit | 3,703 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (C) 2011-2014 German Aerospace Center DLR
(Deutsches Zentrum fuer Luft- und Raumfahrt e.V.),
Institute of System Dynamics and Control
All rights reserved.
This file is part of PySimulator.
PySimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PySimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with PySimulator. If not, see www.gnu.org/licenses.
'''
import os, numpy, scipy.io, string, collections
from .. import IntegrationResults
# Exception classes
class FileDoesNotExist (Exception): pass
class UnknownIndex (Exception): pass
class UnknownArgument (Exception): pass
class WrongDymolaResultFile(Exception): pass
fileExtension = 'mat'
description = 'Dymola Simulation Result File'
def charArrayToStrList(charArray):
"""Transform a numpy character array to a list of strings
"""
strList = []
for item in charArray:
strList.append(str(string.rstrip(string.join([x for x in item if len(x) > 0 and ord(x) < 128], ""))))
return strList
class Results(IntegrationResults.Results):
""" Result Object to hold a Dymola result file, see also
class IntegrationResults.Results
"""
def __init__(self, fileName):
IntegrationResults.Results.__init__(self)
# Not possible to load data from a partially written mat-file
self.canLoadPartialData = False
self.fileName = fileName
if fileName is None:
return
if fileName is '':
return
# Check if fileName exists
if not os.path.isfile(fileName):
raise FileDoesNotExist("File '" + fileName + "' does not exist")
# Determine complete file name
fullFileName = os.path.abspath(fileName)
# Read data from file
fileData = scipy.io.loadmat(fullFileName, matlab_compatible=True)
# Check Aclass array
if not("Aclass" in fileData):
raise WrongDymolaResultFile("Matrix 'Aclass' is missing in result file " + fullFileName)
Aclass = charArrayToStrList(fileData["Aclass"])
if len(Aclass) < 3:
raise WrongDymolaResultFile("Matrix 'Aclass' has not 3 or more rows in result file " + fullFileName)
if Aclass[1] != "1.1":
raise WrongDymolaResultFile("Amatrix[1] is not '1.1' in result file " + fullFileName)
# Check whether other matrices are on the result file
if not("name" in fileData):
raise WrongDymolaResultFile("Matrix 'name' is not in result file " + fullFileName)
if not("description" in fileData):
raise WrongDymolaResultFile("Matrix 'description' is not in result file " + fullFileName)
if not("dataInfo" in fileData):
raise WrongDymolaResultFile("Matrix 'dataInfo' is not in result file " + fullFileName)
if not("data_1" in fileData):
raise WrongDymolaResultFile("Matrix 'data_1' is not in result file " + fullFileName)
if not("data_2" in fileData):
raise WrongDymolaResultFile("Matrix 'data_2' is not in result file " + fullFileName)
# Get the raw matrices
name = fileData["name"]
description = fileData["description"]
dataInfo = fileData["dataInfo"]
#data = [ fileData["data_1"], fileData["data_2"][:, :-1] ]
data = [ fileData["data_1"], fileData["data_2"] ]
# Transpose the data, if necessary
if len(Aclass) > 3 and Aclass[3] == "binTrans":
name = name.T
description = description.T
dataInfo = dataInfo.T
data[0] = data[0].T
data[1] = data[1].T
# Transform the charArrays in string lists
name = charArrayToStrList(name)
# Hack for OpenModelica: Rename variable 'time' to 'Time'
if name.count('time') > 0:
name[name.index('time')] = 'Time'
description = charArrayToStrList(description)
# Extract units and update description
unit, description = extractUnits(description)
# Collect data
self._name = name
self._description = description
self._unit = unit
self._dataInfo = dataInfo
self._data = data
t = self.data("Time")
data0 = data[0][0, :]
data0 = numpy.reshape(data0, (1, len(data0)))
self.timeSeries.append(IntegrationResults.TimeSeries(None, data0, "constant"))
self.timeSeries.append(IntegrationResults.TimeSeries(t, data[1], "linear"))
self.nTimeSeries = len(self.timeSeries)
self.isAvailable = True
def index(self, name):
""" Return the index of variable 'name' (= full Modelica name)
Examples:
result = loadDymolaResult()
i_v1 = result.index("a.b.c") # get index of signal
"""
try:
nameIndex = self._name.index(name)
except ValueError:
return -1
# print("'" + name + "' is not present in the result")
return nameIndex
def readData(self, variableName):
nameIndex = self.index(variableName)
if nameIndex < 0:
return None, None, None
seriesIndex = self._dataInfo[nameIndex, 0] - 1
y = self.data(variableName)
t = self.timeSeries[seriesIndex].independentVariable
method = self.timeSeries[seriesIndex].interpolationMethod
return t, y, method
def data(self, name):
""" Return the result values of variable 'name' (= full Modelica name)
Examples:
result = loadDymolaResult()
time = result.data("Time") # numpy vector of time instants
v1 = result.data("a.b.c") # numpy vector of v1 values
i_v1 = result.index("a.b.c") # get index of signal
v1 = result.data(i_v1) # numpy vector of v1 values
"""
# Get index of the desired signal and check it
if isinstance(name, str) or isinstance(name, unicode):
nameIndex = self.index(name)
elif isinstance(name, int):
if name < 0 or name >= len(self._name):
raise UnknownIndex(u"Index = " + str(name) + u" is not correct")
nameIndex = name
else:
print name
raise UnknownArgument("Argument name must be a string or an int, got %s %s" % (name,str(type(name))))
if nameIndex < 0:
return None
# Determine location of data
signalInfo = self._dataInfo[nameIndex, :]
signalMatrix = signalInfo[0] if nameIndex > 0 else 2
if signalMatrix < 1 or signalMatrix > 2:
raise WrongDymolaResultFile("dataInfo[" + str(nameIndex) +
",0] = " + str(signalMatrix) +
", but must be 1 or 2")
signalColumn = abs(signalInfo[1]) - 1
signalSign = +1 if signalInfo[1] >= 0 else -1
if signalMatrix == 1:
# Data consists of constant data, expand data to match abscissa vector
# n = self._data[1].shape[0]
signalData = numpy.array([signalSign * self._data[0][0, signalColumn]]) # *numpy.ones(n)
else: # signalMatrix = 2
signalData = signalSign * self._data[1][:, signalColumn]
return signalData
def getFileInfos(self):
# No relevant file infos stored in a Dymola result file
return dict()
def getVariables(self):
# Generate the dict
variables = dict()
# Fill the values of the dict
for i in xrange(len(self._name)):
name = self._name[i]
if self._dataInfo[i, 0] == 1:
variability = 'fixed'
seriesIndex = 0
else:
variability = 'continuous'
seriesIndex = 1
column = abs(self._dataInfo[i, 1]) - 1
sign = 1 if self._dataInfo[i, 1] > 0 else -1
value = None
if variability == 'fixed':
y = self.data(self._name[i])
value = y[0]
infos = collections.OrderedDict()
if self._description[i] is not None:
if len(self._description[i]) > 0:
infos['Description'] = self._description[i]
infos['Variability'] = variability
if len(self._unit[i]):
unit = self._unit[i]
else:
unit = None
variables[name] = IntegrationResults.ResultVariable(value, unit, variability, infos, seriesIndex, column, sign)
return variables
def extractUnits(description):
''' Extract units from description and update description
'''
unit = ['' for i in xrange(len(description))]
for index, s in enumerate(description):
t = s.rsplit('[', 1)
if len(t) > 1:
if len(t[1]) > 0:
if t[1][-1] == ']':
if '|' in t[1]:
if ':#' not in t[1]:
unit[index] = t[1].split('|', 1)[0]
elif ':#' not in t[1]:
unit[index] = t[1][:-1]
if len(t[0]) > 0:
description[index] = t[0][:-1] # Delete space
else:
description[index] = '' #
return unit, description
class DymolaInit():
''' Separate class for initialization file of Dymola's simulation executable
'''
def __init__(self, name, value, unit, description):
self.name = name
self.value = value
self.unit = unit
self.description = description
def loadDymolaInit(fileName):
""" Load Dymola initial data in an object.
"""
# If no fileName given, return
if fileName == None:
return
# Check if fileName exists
if not os.path.isfile(fileName):
raise FileDoesNotExist("File '" + fileName + "' does not exist")
# Determine complete file name
fullFileName = os.path.abspath(fileName)
# Read data from file
fileData = scipy.io.loadmat(fullFileName, matlab_compatible=True)
# Check Aclass array
if not("Aclass" in fileData):
raise WrongDymolaResultFile("Matrix 'Aclass' is missing in file " + fullFileName)
Aclass = charArrayToStrList(fileData["Aclass"])
if len(Aclass) < 3:
raise WrongDymolaResultFile("Matrix 'Aclass' has not 3 or more rows in file " + fullFileName)
if Aclass[1] != "1.4":
raise WrongDymolaResultFile("Amatrix[1] is not '1.1' in file " + fullFileName)
# Check whether other matrices are on the result file
if not("initialName" in fileData):
raise WrongDymolaResultFile("Matrix 'initialName' is not in file " + fullFileName)
if not("initialDescription" in fileData):
raise WrongDymolaResultFile("Matrix 'initialDescription' is not in file " + fullFileName)
if not("initialValue" in fileData):
raise WrongDymolaResultFile("Matrix 'initialValue' is not in file " + fullFileName)
# Get the raw matrices
name = fileData["initialName"]
description = fileData["initialDescription"]
value = fileData["initialValue"]
# Transpose the data, if necessary
if len(Aclass) > 3 and Aclass[3] == "binTrans":
name = name.T
description = description.T
value = value.T
# Transform the charArrays in string lists
name = charArrayToStrList(name)
description = charArrayToStrList(description)
# Extract units
unit, description = extractUnits(description)
# Generate a DymolaInit object and return it
result = DymolaInit(name, value, unit, description)
return result
##+++++++++++++++++++++++++++++++++++++++++++++++++++++
##++++++++++++++++ +++++++++++++++++++++++++++++
##+++++++++++++++ main() ++++++++++++++++++++++++++++
##++++++++++++++++ +++++++++++++++++++++++++++++
##+++++++++++++++++++++++++++++++++++++++++++++++++++++
if __name__ == "__main__":
print("... started")
result = loadDymolaResult('Modelica.Blocks.Examples.PID_Controller.mat')
print result.fileName
print result._name
print result._description
t = result.index("Time")
print("time=" + str(t))
t1 = result.data("Time")
v1 = result.data("PI.y")
result.plot("PI.y")
raw_input("Press Enter: ")
| PySimulator/PySimulator | PySimulator/Plugins/SimulationResult/DymolaMat/DymolaMat.py | Python | lgpl-3.0 | 12,890 |
import sys, string, os, getopt
# Run this passing a ".i" file as param. Will generate ".d"
g_com_parent = ""
def GetComments(line, lineNo, lines):
# Get the comment from this and continuous lines, if they exist.
data = line.split("//", 2)
doc = ""
if len(data)==2: doc=data[1].strip()
lineNo = lineNo + 1
while lineNo < len(lines):
line = lines[lineNo]
data = line.split("//", 2)
if len(data)!=2:
break
if data[0].strip():
break # Not a continutation!
if data[1].strip().startswith("@"):
# new command
break
doc = doc + "\n// " + data[1].strip()
lineNo = lineNo + 1
# This line doesnt match - step back
lineNo = lineNo - 1
return doc, lineNo
def make_doc_summary(inFile, outFile):
methods = []
modDoc = ""
modName = ""
lines = inFile.readlines()
curMethod = None
constants = []
extra_tags = []
lineNo = 0
bInRawBlock = 0
while lineNo < len(lines):
line = lines[lineNo]
if bInRawBlock and len(line)>2 and line[:2]=="%}":
bInRawBlock = 0
if not bInRawBlock and len(line)>2 and line[:2]=="%{":
bInRawBlock = 1
try:
if line[:7]=="%module":
extra = line.split("//")
if len(extra)>1:
modName = extra[0][7:].strip()
modDoc, lineNo = GetComments(line, lineNo, lines)
lineNo += 1
elif line[:7]=="#define" and not bInRawBlock:
cname = line.split()[1]
doc, lineNo = GetComments(line, lineNo, lines)
constants.append((cname, doc))
else:
try:
pos = line.index("//")
except ValueError:
pass
else:
rest = line[pos+2:].strip()
if rest.startswith("@pymeth"):
# manual markup - reset the current method.
curMethod = None
if rest.startswith("@doc"):
pass
elif rest.startswith("@pyswig"):
doc, lineNo = GetComments(line, lineNo, lines)
curMethod = doc[8:], []
methods.append(curMethod)
elif rest.startswith("@const"):
doc, lineNo = GetComments(line, lineNo, lines)
else:
if rest.startswith("@"):
doc, lineNo = GetComments(line, lineNo, lines)
if curMethod:
curMethod[1].append("// " + doc + '\n')
else:
extra_tags.append("// " + doc + '\n')
except:
_, msg, _ = sys.exc_info()
print("Line %d is badly formed - %s" % (lineNo, msg))
lineNo = lineNo + 1
# autoduck seems to crash when > ~97 methods. Loop multiple times,
# creating a synthetic module name when this happens.
# Hrmph - maybe this was related to the way we generate -
# see rev 1.80 of win32gui.i for a change that prevents this!
max_methods = 999
method_num = 0
chunk_number = 0
while 1:
these_methods = methods[method_num:method_num+max_methods]
if not these_methods:
break
thisModName = modName
if g_com_parent:
thisModName = "Py" + modName
if chunk_number == 0:
pass
elif chunk_number == 1:
thisModName = thisModName + " (more)"
else:
thisModName = thisModName + " (more %d)" % (chunk_number+1,)
outFile.write("\n")
for (meth, extras) in these_methods:
fields = meth.split('|')
if len(fields)!=3:
print("**Error - %s does not have enough fields" % meth)
else:
outFile.write("// @pymethod %s|%s|%s|%s\n" % (fields[0],thisModName,fields[1], fields[2]))
for extra in extras:
outFile.write(extra)
if g_com_parent:
outFile.write("\n// @object %s|%s" % (thisModName,modDoc))
outFile.write("\n// <nl>Derived from <o %s>\n" % (g_com_parent))
else:
outFile.write("\n// @module %s|%s\n" % (thisModName,modDoc))
for (meth, extras) in these_methods:
fields = meth.split('|')
outFile.write("// @pymeth %s|%s\n" % (fields[1], fields[2]))
chunk_number += 1
method_num += max_methods
outFile.write("\n")
for extra in extra_tags:
outFile.write("%s\n" % (extra) )
for (cname, doc) in constants:
outFile.write("// @const %s|%s|%s\n" % (modName, cname, doc) )
def doit():
global g_com_parent
outName = ""
try:
opts, args = getopt.getopt(sys.argv[1:], 'p:o:')
for o,a in opts:
if o=='-p':
g_com_parent = a
elif o=='-o':
outName = a
msg = ' '.join(args)
except getopt.error:
_, msg, _ = sys.exc_info()
print(msg)
print("Usage: %s [-o output_name] [-p com_parent] filename" % sys.argv[0])
return
inName = args[0]
if not outName:
outName = os.path.splitext(os.path.split(inName)[1])[0] + ".d"
inFile = open(inName)
outFile = open(outName, "w")
outFile.write("// @doc\n// Generated file - built from %s\n// DO NOT CHANGE - CHANGES WILL BE LOST!\n\n" % inName)
make_doc_summary(inFile, outFile)
inFile.close()
outFile.close()
if __name__=='__main__':
doit()
| DavidGuben/rcbplayspokemon | app/pywin32-220/AutoDuck/makedfromi.py | Python | mit | 4,584 |
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Test the collection viewer interface."""
from grr.gui import runtests_test
from grr.lib import aff4
from grr.lib import flags
from grr.lib import test_lib
class TestContainerViewer(test_lib.GRRSeleniumTest):
"""Test the collection viewer interface."""
def CreateCollectionFixture(self):
with aff4.FACTORY.Create("aff4:/C.0000000000000001/analysis/FindFlowTest",
"AFF4Collection", token=self.token) as out_fd:
out_fd.CreateView(
["stat.st_mtime", "type", "stat.st_size", "size", "Age"])
for urn in [
"aff4:/C.0000000000000001/fs/os/c/bin C.0000000000000001/rbash",
"aff4:/C.0000000000000001/fs/os/c/bin C.0000000000000001/bash",
"aff4:/C.0000000000000001/fs/os/c/bin/bash",
"aff4:/C.0000000000000001/fs/os/c/bin/rbash",
]:
fd = aff4.FACTORY.Open(urn, token=self.token)
out_fd.Add(urn=urn, stat=fd.Get(fd.Schema.STAT))
def setUp(self):
super(TestContainerViewer, self).setUp()
# Create a new collection
with self.ACLChecksDisabled():
self.CreateCollectionFixture()
self.GrantClientApproval("C.0000000000000001")
def testContainerViewer(self):
self.Open("/")
self.Type("client_query", "C.0000000000000001")
self.Click("client_query_submit")
self.WaitUntilEqual(u"C.0000000000000001",
self.GetText, "css=span[type=subject]")
# Choose client 1
self.Click("css=td:contains('0001')")
# Go to Browse VFS
self.Click("css=a:contains('Browse Virtual Filesystem')")
# Navigate to the analysis directory
self.Click("link=analysis")
self.Click("css=span[type=subject]:contains(\"FindFlowTest\")")
self.WaitUntil(self.IsElementPresent, "css=td:contains(\"VIEW\")")
self.assert_("View details" in self.GetText(
"css=a[href=\"#"
"c=aff4%3A%2FC.0000000000000001&"
"container=aff4%3A%2FC.0000000000000001%2Fanalysis%2FFindFlowTest&"
"main=ContainerViewer&"
"reason=Running+tests\"]"))
self.Click("css=a:contains(\"View details\")")
self.WaitUntil(self.IsElementPresent, "css=button[id=export]")
self.ClickUntil("css=#_C_2E0000000000000001 ins.jstree-icon",
self.IsElementPresent,
"css=#_C_2E0000000000000001-fs ins.jstree-icon")
self.ClickUntil("css=#_C_2E0000000000000001-fs ins.jstree-icon",
self.IsElementPresent,
"css=#_C_2E0000000000000001-fs-os ins.jstree-icon")
self.ClickUntil("css=#_C_2E0000000000000001-fs-os ins.jstree-icon",
self.IsElementPresent,
"link=c")
# Navigate to the bin C.0000000000000001 directory
self.Click("link=c")
# Check the filter string
self.assertEqual("subject startswith 'aff4:/C.0000000000000001/fs/os/c/'",
self.GetValue("query"))
# We should have exactly 4 files
self.WaitUntilEqual(4, self.GetCssCount,
"css=.containerFileTable tbody > tr")
# Check the rows
self.assertEqual(
"C.0000000000000001/fs/os/c/bin C.0000000000000001/bash",
self.GetText("css=.containerFileTable tbody > tr:nth(0) td:nth(1)"))
self.assertEqual(
"C.0000000000000001/fs/os/c/bin C.0000000000000001/rbash",
self.GetText("css=.containerFileTable tbody > tr:nth(1) td:nth(1)"))
self.assertEqual(
"C.0000000000000001/fs/os/c/bin/bash",
self.GetText("css=.containerFileTable tbody > tr:nth(2) td:nth(1)"))
self.assertEqual(
"C.0000000000000001/fs/os/c/bin/rbash",
self.GetText("css=.containerFileTable tbody > tr:nth(3) td:nth(1)"))
# Check that query filtering works (Pressing enter)
self.Type("query", "stat.st_size < 5000")
self.Click("css=form[name=query_form] button[type=submit]")
self.WaitUntilEqual("4874", self.GetText,
"css=.containerFileTable tbody > tr:nth(0) td:nth(4)")
# We should have exactly 1 file
self.assertEqual(
1, self.GetCssCount("css=.containerFileTable tbody > tr"))
def main(argv):
# Run the full test suite
runtests_test.SeleniumTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| darrenbilby/grr | gui/plugins/container_viewer_test.py | Python | apache-2.0 | 4,342 |
import simplejson as json
from django.conf import settings as django_settings
from django.http import HttpResponse
from cio.conf import settings
import djedi
# TODO: Switch simplejson to ujson or other?
class JSONResponseMixin(object):
"""
A mixin that can be used to render a JSON response.
"""
response_class = HttpResponse
def render_to_json(self, context, **response_kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
response_kwargs['content_type'] = 'application/json'
return self.response_class(
self.convert_context_to_json(context),
**response_kwargs
)
def convert_context_to_json(self, context):
"""Convert the context dictionary into a JSON object"""
return json.dumps(context, indent=4, for_json=True)
class DjediContextMixin(object):
def get_context_data(self, **context):
theme = settings.THEME
if '/' not in theme:
theme = '{static}djedi/themes/{theme}/theme.css'.format(static=django_settings.STATIC_URL, theme=theme)
context['THEME'] = theme
context['VERSION'] = djedi.__version__
return context
| chrippa/djedi-cms | djedi/admin/mixins.py | Python | bsd-3-clause | 1,227 |
try:
from magic import from_buffer as magic_from_buffer
except ImportError:
import mimetypes
def get_file_type(file, filename):
try:
file_type = magic_from_buffer(file.read(1024), mime=True)
file.seek(0)
return maybe_decode(file_type)
except NameError:
return mimetypes.guess_type(filename)[0]
# return str on python3. Don't want to unconditionally
# decode because that results in unicode on python2
def maybe_decode(s):
try:
decoded = s.decode("utf-8")
except AttributeError:
decoded = s
return decoded
| randomchars/pushbullet.py | pushbullet/filetype.py | Python | mit | 586 |
__author__ = 'Bohdan Mushkevych'
import unittest
from datetime import datetime, timedelta
from synergy.system.event_clock import EventClock, EventTime, parse_time_trigger_string, format_time_trigger_string
from synergy.system.repeat_timer import RepeatTimer
class TestEventClock(unittest.TestCase):
def test_utc_now(self):
utc_now = datetime.utcnow()
self.obj = EventTime.utc_now()
print(str(self.obj))
assert self.obj.day_of_week == str(utc_now.weekday()) \
and self.obj.time_of_day.hour == utc_now.hour \
and self.obj.time_of_day.minute == utc_now.minute \
and self.obj.time_of_day.second == 0 \
and self.obj.time_of_day.microsecond == 0
other_obj = EventTime.utc_now()
self.assertEqual(other_obj, self.obj)
def test_eq(self):
params = [EventTime(x) for x in ['17:00', '4-15:45', '*-09:00', '8:01']]
expected = [EventTime(x) for x in ['*-17:00', '2-17:00', '4-15:45', '*-09:00', '*-9:00', '*-08:01']]
not_expected = [EventTime(x) for x in ['*-17:15', '1-15:45', '*-9:01', '*-18:01']]
for event in expected:
self.assertIn(event, params)
for event in not_expected:
self.assertNotIn(event, params)
def test_parser(self):
fixture = {'every 300': (300, RepeatTimer),
'every 500': (500, RepeatTimer),
'every 1': (1, RepeatTimer),
'at *-17:00, 4-15:45,*-09:00 ': (['*-17:00', '4-15:45', '*-09:00'], EventClock),
'at 5-18:00 ,4-18:05 ,1-9:01 ': (['5-18:00', '4-18:05', '1-9:01'], EventClock),
'at *-08:01': (['*-08:01'], EventClock),
'at 8:30': (['8:30'], EventClock)}
for line, expected_output in fixture.items():
processed_tuple = parse_time_trigger_string(line)
self.assertEqual(processed_tuple, expected_output)
def test_formatter(self):
fixture = {RepeatTimer(300, None): 'every 300',
RepeatTimer(500, None): 'every 500',
RepeatTimer(1, None): 'every 1',
EventClock(['*-17:00', '4-15:45', '*-09:00'], None): 'at *-17:00,4-15:45,*-09:00',
EventClock(['5-18:00', '4-18:05', '1-9:01'], None): 'at 5-18:00,4-18:05,1-09:01',
EventClock(['*-08:01'], None): 'at *-08:01',
EventClock(['8:30'], None): 'at *-08:30'}
for handler, expected_output in fixture.items():
processed_tuple = format_time_trigger_string(handler)
self.assertEqual(processed_tuple, expected_output)
def test_next_run_in(self):
# 2014-05-01 is Thu. In Python it is weekday=3
fixed_utc_now = \
datetime(year=2014, month=05, day=01, hour=13, minute=00, second=00, microsecond=00, tzinfo=None)
fixture = {EventClock(['*-17:00', '4-15:45', '*-09:00'], None):
timedelta(days=0, hours=4, minutes=0, seconds=0, microseconds=0, milliseconds=0),
EventClock(['5-18:00', '4-18:05', '1-9:01'], None):
timedelta(days=1, hours=5, minutes=5, seconds=0, microseconds=0, milliseconds=0),
EventClock(['*-08:01'], None):
timedelta(days=0, hours=19, minutes=1, seconds=0, microseconds=0, milliseconds=0),
EventClock(['8:30'], None):
timedelta(days=0, hours=19, minutes=30, seconds=0, microseconds=0, milliseconds=0)}
for handler, expected_output in fixture.items():
handler.is_alive = lambda: True
processed_output = handler.next_run_in(utc_now=fixed_utc_now)
self.assertEqual(processed_output, expected_output)
if __name__ == '__main__':
unittest.main()
| eggsandbeer/scheduler | tests/test_event_clock.py | Python | bsd-3-clause | 3,847 |
#!/usr/bin/env python
# Copyright 2009-2014 Eucalyptus Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from config_manager.eucalyptus.topology.cluster.nodecontroller import NodeController
class Hyperv(NodeController):
def __init__(self,
name,
description=None,
read_file_path=None,
write_file_path=None,
property_type=None,
version=None
):
# Baseconfig init() will read in default values from read_file_path if it is populated.
super(Hyperv, self).__init__(name=name,
hypervisor=str(self.__class__.__name__).lower(),
description=description,
read_file_path=read_file_path,
write_file_path=write_file_path,
version=version)
| tbeckham/DeploymentManager | config_manager/eucalyptus/topology/cluster/nodecontroller/hyperv.py | Python | apache-2.0 | 1,463 |
import asyncio
import logging
import urllib.parse
from functools import wraps
from typing import Optional
import aiohttp
import aiohttp_session
from aiohttp import web
from hailtop import httpx
from hailtop.auth import async_get_userinfo
from hailtop.config import get_deploy_config
log = logging.getLogger('gear.auth')
deploy_config = get_deploy_config()
BEARER = 'Bearer '
def maybe_parse_bearer_header(value: str) -> Optional[str]:
if value.startswith(BEARER):
return value[len(BEARER) :]
return None
async def _userdata_from_session_id(session_id: str, client_session: httpx.ClientSession):
try:
return await async_get_userinfo(
deploy_config=deploy_config, session_id=session_id, client_session=client_session
)
except asyncio.CancelledError:
raise
except aiohttp.ClientResponseError as e:
log.exception('unknown exception getting userinfo')
raise web.HTTPInternalServerError() from e
except Exception as e: # pylint: disable=broad-except
log.exception('unknown exception getting userinfo')
raise web.HTTPInternalServerError() from e
async def userdata_from_web_request(request):
session = await aiohttp_session.get_session(request)
if 'session_id' not in session:
return None
return await _userdata_from_session_id(session['session_id'], request.app['client_session'])
async def userdata_from_rest_request(request):
if 'Authorization' not in request.headers:
return None
auth_header = request.headers['Authorization']
session_id = maybe_parse_bearer_header(auth_header)
if not session_id:
return session_id
return await _userdata_from_session_id(auth_header[7:], request.app['client_session'])
def rest_authenticated_users_only(fun):
async def wrapped(request, *args, **kwargs):
userdata = await userdata_from_rest_request(request)
if not userdata:
web_userdata = await userdata_from_web_request(request)
if web_userdata:
return web.HTTPUnauthorized(reason="provided web auth to REST endpoint")
raise web.HTTPUnauthorized()
return await fun(request, userdata, *args, **kwargs)
return wrapped
def _web_unauthorized(request, redirect):
if not redirect:
return web.HTTPUnauthorized()
login_url = deploy_config.external_url('auth', '/login')
# request.url is a yarl.URL
request_url = request.url
x_forwarded_host = request.headers.get('X-Forwarded-Host')
if x_forwarded_host:
request_url = request_url.with_host(x_forwarded_host)
x_forwarded_proto = request.headers.get('X-Forwarded-Proto')
if x_forwarded_proto:
request_url = request_url.with_scheme(x_forwarded_proto)
return web.HTTPFound(f'{login_url}?next={urllib.parse.quote(str(request_url))}')
def web_authenticated_users_only(redirect=True):
def wrap(fun):
@wraps(fun)
async def wrapped(request, *args, **kwargs):
userdata = await userdata_from_web_request(request)
if not userdata:
rest_userdata = await userdata_from_rest_request(request)
if rest_userdata:
return web.HTTPUnauthorized(reason="provided REST auth to web endpoint")
raise _web_unauthorized(request, redirect)
return await fun(request, userdata, *args, **kwargs)
return wrapped
return wrap
def web_maybe_authenticated_user(fun):
@wraps(fun)
async def wrapped(request, *args, **kwargs):
return await fun(request, await userdata_from_web_request(request), *args, **kwargs)
return wrapped
def web_authenticated_developers_only(redirect=True):
def wrap(fun):
@web_authenticated_users_only(redirect)
@wraps(fun)
async def wrapped(request, userdata, *args, **kwargs):
if userdata['is_developer'] == 1:
return await fun(request, userdata, *args, **kwargs)
raise _web_unauthorized(request, redirect)
return wrapped
return wrap
def rest_authenticated_developers_only(fun):
@rest_authenticated_users_only
@wraps(fun)
async def wrapped(request, userdata, *args, **kwargs):
if userdata['is_developer'] == 1:
return await fun(request, userdata, *args, **kwargs)
raise web.HTTPUnauthorized()
return wrapped
| hail-is/hail | gear/gear/auth.py | Python | mit | 4,434 |
# All auth methods should follow this protocol.
class AbstractAuth(object):
def __init__(self):
pass
def is_authenticated(self, event):
pass
def configure(self, options=""):
pass
| Tuxemon/Tuxemon-Server | tuxemon_server/core/auth/abstract_auth.py | Python | gpl-3.0 | 218 |
"""Support for NIFTI files."""
import numpy as np
import nibabel
def read(path):
"""Read a NIFTI file."""
img = nibabel.load(str(path))
attrs = dict(img.header)
a = img.get_data().astype(np.float32)
assert a.ndim == 3, a.shape
a = a.T # Set axes to (depth, height, width).
a.shape += (1,) # Add parameter axis.
return attrs, a
| HACKMESEU/DicomBrowser | dwilib/dwi/nifti.py | Python | gpl-3.0 | 364 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para canal22
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
from core import logger
from core import scrapertools
from lib import youtube_dl
def get_video_url( page_url , premium = False , user="" , password="", video_password="", page_data="" ):
logger.info("[eitb.py] get_video_url(page_url='%s')" % page_url)
ydl = youtube_dl.YoutubeDL({'outtmpl': u'%(id)s%(ext)s'})
result = ydl.extract_info(page_url, download=False)
video_urls = []
if 'formats' in result:
for entry in result['formats']:
logger.info("entry="+repr(entry))
if 'http' in entry['protocol']:
video_urls.append([scrapertools.safe_unicode(entry['format']).encode('utf-8'), scrapertools.safe_unicode(entry['url']).encode('utf-8')])
#logger.info('Append: {}'.format(entry['url']))
video_urls.reverse()
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
return devuelve
| tvalacarta/tvalacarta | python/main-classic/servers/canal22.py | Python | gpl-3.0 | 1,220 |
from comics.aggregator.crawler import CrawlerBase
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "Dungeons & Denizens"
language = "en"
url = "http://dungeond.com/"
start_date = "2005-08-23"
end_date = "2014-03-05"
active = False
rights = "Graveyard Greg"
class Crawler(CrawlerBase):
def crawl(self, pub_date):
pass # Comic no longer published
| jodal/comics | comics/comics/dungeond.py | Python | agpl-3.0 | 429 |
# -*- coding: utf-8 -*-
#
# conncon_sources.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module Example
Create two 30x30 layers of iaf_psc_alpha neurons,
connect with convergent projection and rectangular mask,
visualize connection from target perspective.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
'''
import nest
import nest.topology as topo
import pylab
pylab.ion()
nest.ResetKernel()
nest.set_verbosity('M_WARNING')
# create two test layers
a = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': 'iaf_psc_alpha', 'edge_wrap': True})
b = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': 'iaf_psc_alpha', 'edge_wrap': True})
topo.ConnectLayers(a, b, {'connection_type': 'convergent',
'mask': {'rectangular': {'lower_left': [-0.2, -0.5],
'upper_right': [0.2, 0.5]}},
'kernel': 0.5,
'weights': {'uniform': {'min': 0.5, 'max': 2.0}},
'delays': 1.0})
pylab.clf()
# plot sources of neurons in different grid locations
for tgt_pos in [[15, 15], [0, 0]]:
# obtain node id for center
tgt = topo.GetElement(b, tgt_pos)
# obtain list of outgoing connections for ctr
# int() required to cast numpy.int64
spos = tuple(zip(*[topo.GetPosition([int(conn[0])])[0] for conn in
nest.GetConnections(target=tgt)]))
# scatter-plot
pylab.scatter(spos[0], spos[1], 20, zorder=10)
# mark sender position with transparent red circle
ctrpos = pylab.array(topo.GetPosition(tgt)[0])
pylab.gca().add_patch(pylab.Circle(ctrpos, radius=0.1, zorder=99,
fc='r', alpha=0.4, ec='none'))
# mark mask position with open red rectangle
pylab.gca().add_patch(
pylab.Rectangle(ctrpos - (0.2, 0.5), 0.4, 1.0, zorder=1,
fc='none', ec='r', lw=3))
# mark layer edge
pylab.gca().add_patch(pylab.Rectangle((-1.5, -1.5), 3.0, 3.0, zorder=1,
fc='none', ec='k', lw=3))
# beautify
pylab.axes().set_xticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.axes().set_yticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.grid(True)
pylab.axis([-2.0, 2.0, -2.0, 2.0])
pylab.axes().set_aspect('equal', 'box')
pylab.title('Connection sources')
| tobikausk/nest-simulator | topology/examples/conncon_sources.py | Python | gpl-2.0 | 3,092 |
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class VerifyGlobalNavigation(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://epa-eds-2-elbwebex-104nmt8jecxg3-500581402.us-east-1.elb.amazonaws.com/"
self.verificationErrors = []
self.accept_next_alert = True
def test_verify_global_navigation(self):
driver = self.driver
driver.get(self.base_url + "/")
# ERROR: Caught exception [ERROR: Unsupported command [selectWindow | null | ]]
for i in range(60):
try:
if "Select an ENERGY STAR appliance:" == driver.find_element_by_css_selector("label").text: break
except: pass
time.sleep(1)
else: self.fail("time out")
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| Aquilent/epa-eds | src/test/selenium/test-scripts/Verify-Global-Navigation.py | Python | mit | 1,975 |
#!/usr/bin/python
""" infiniband_topo_map.py
v1.0 April 29, 2015
Copyright 2015 Blake Caldwell
Oak Ridge National Laboratory
Purpose: Map out the IB topology
Where/How/When: Run from CLI
Return Values: 1 on success, 0 on unexpected failure
Expected Output:
1) with -t or --tree a JSON file in hierarcal structure
(parent-child) relationship
2) with -g or --graph a JSON file in links-nodes format
with the links defined between nodes
3) with -d --dot FILE, create a JPEG names FILE using the
pydot library (must be installed separately)
This program is licensed under GNU GPLv3. Full license in LICENSE
"""
#__email__ =
debug = True
import sys
from optparse import OptionParser, OptionError
from operator import itemgetter
import re,os
def cleanDescr(descr):
# only call strip methods if desc is not None
if descr:
descr = descr.lstrip()
descr = descr.rstrip()
descr = descr.lstrip("\'")
descr = descr.rstrip("\'")
else:
descr = " [Unused]"
return descr
class Port(object):
def __init__(self, portNum, lid, width, speed, parentType, parentGuid, parentDescr):
self.portNum=portNum
self.lid=lid
self.speed=speed
self.width=width
self.remotePort=None
self.parent=None
self.errors={}
self.parentType = parentType
self.parentGuid = parentGuid
self.parentDescr = parentDescr
def addRemotePort(self,remotePort):
self.remotePort=remotePort
def addParent(self,parent):
self.parent = parent
def printPort(self):
print "local lid: %s"%self.lid
print "local port number: %s"%self.portNum
print "local node guid: %s"%self.parentGuid
print "local node description: %s"%self.parentDescr
if self.remotePort:
print "remote lid: %s"%self.remotePort.lid
print "remote port number: %s"%self.remotePort.portNum
print "remote guid: %s"%self.remotePort.parentGuid
print "remote node description: %s"%self.remotePort.parentDescr
print "width: %s"%self.width
print "speed: %s"%self.speed
else:
print "remote port: None"
def checkForErrors(self,errorKey,threshold):
if threshold == None:
threshold = 0
try:
if self.errors[errorKey] > threshold:
return True
except KeyError:
pass
return False
class Node(object):
'''
A Node is a collection of ports
'''
def __init__(self,descr,guid):
self.guid=guid
self.descr=cleanDescr(descr)
self.ports={}
class switchNode(Node):
''' Switch nodes have ports that are all connected '''
def __init__(self, guid, descr):
self.subSwitches = {}
self.hosts={}
self.guid=guid
self.descr=cleanDescr(descr)
self.ports={}
def addHCA(self,newHCA):
self.hosts[newHCA.descr] = newHCA
def addSwitch(self,newSwitch):
self.subSwitches[newSwitch.descr] = newSwitch
class HCANode(Node):
def __init__(self, guid, descr):
super(HCANode,self).__init__(guid, descr)
class chassisSwitch:
def __init__(self,descr):
self.spines = {}
self.leafs = {}
self.descr=cleanDescr(descr)
def addSpine(self,newSpine):
self.spines[newSpine.guid] = newSpine
def addLeaf(self,newLeaf):
self.leafs[newLeaf.guid] = newLeaf
class Topology:
global debug
def __init__(self):
self.chassisSwitches = {}
self.switches = {}
self.HCAs = {}
def addChassisSwitch(self,newChassisSwitch):
self.chassisSwitches[newChassisSwitch.descr] = newChassisSwitch
# need to remove spines and leafs from switches dictionary
def build(self,portList):
for port in portList.ports:
self._addPort(port)
#self._addPort(port.remotePort)
def _addPort(self, port):
if port.parentType == "SW":
myNode = switchNode(port.parentGuid, port.parentDescr)
myNode.ports[port.portNum] = port
port.addParent=myNode
self._addSwitch(myNode)
elif port.parentType == "CA":
myNode = HCANode(port.parentGuid, port.parentDescr)
myNode.ports[port.portNum] = port
port.addParent=myNode
self._addHCA(myNode)
else:
print "Unrecognized type: %s" % port.parentType
return
def _addSwitch(self,newSwitch):
# If the switch already isn't in the topology
if newSwitch.guid not in self.switches:
self.switches[newSwitch.guid] = newSwitch
else:
# only need to add another port
# since newSwitch will only have a single port, just add that one
if debug:
if len(newSwitch.ports) > 1:
print "There should only be one port in newly created switch!"
raise KeyError
port_keys = newSwitch.ports.keys()
only_port = port_keys[0]
if debug:
if only_port in self.switches[newSwitch.guid].ports:
print "This port is being added twice"
newSwitch.ports[only_port].printPort()
raise BaseException
self.switches[newSwitch.guid].ports[only_port] = newSwitch.ports[only_port]
def _addHCA(self,newHCA):
if newHCA.guid not in self.HCAs:
self.HCAs[newHCA.guid] = newHCA
else:
# only need to add another port
if debug:
if len(newHCA.ports) > 1:
print "There should only be one port in newly created HCA!"
raise KeyError
port_keys = newHCA.ports.keys()
only_port = port_keys[0]
if debug:
if only_port in self.HCAs[newHCA.guid].ports:
print "This port is being added twice"
newHCA.ports[only_port].printPort()
raise BaseException
self.HCAs[newHCA.guid].ports[only_port] = newHCA.ports[only_port]
# only need to add another port
def printSwitches(self):
for switch_guid in self.switches.iterkeys():
print "Switch: %s" % (self.switches[switch_guid].descr)
for index,myPort in sorted(self.switches[switch_guid].ports.items(),key=itemgetter(1),reverse=True):
if not myPort.remotePort:
# disconnected, so continue
continue
print "[%d] %s port %d " % (int(index),myPort.remotePort.parentDescr,int(myPort.remotePort.portNum))
print
def createDot(self, output_file, cluster = None):
graph = pydot.Dot(graph_type='graph',size="500, 300", ratio="expand",mode="major")
# cluster_XXX=pydot.Cluster('yyyy',label="zzz")
for switch_guid in self.switches:
# we only need to go through each switch and print all of the connected nodes
switch_descr = self.switches[switch_guid].descr
switch_descr = uniqueDescr(switch_descr,switch_guid)
node_switch=pydot.Node("%s" % switch_descr)
for index,myPort in sorted(self.switches[switch_guid].ports.items(),key=itemgetter(1),reverse=True):
if not myPort.remotePort:
# disconnected, so continue
continue
node_descr=myPort.remotePort.parentDescr
node_descr = uniqueDescr(node_descr,imyport.remotePort.parentGuid)
node=pydot.Node("%s" % node_descr)
edge=pydot.Edge(node_switch,node)
graph.add_edge(edge)
#if re.match("XXXX",remotePort.descr):
# cluster_XXX.add_node(node)
#else:
edge=pydot.Edge(node_switch,node)
graph.add_node(node_switch)
#graph.add_subgraph(cluster_XXX)
full_path = "%s" % (output_file)
graph.write_jpeg(full_path,prog="neato")
def createTree(self,output_file):
import json
rootDict = {}
added_list = {}
leftoverDict = {}
root = None
for switch_guid in self.switches.iterkeys():
# find a spine
if "Spine" in uniqueDescr(self.switches[switch_guid].descr,switch_guid):
root=switch_guid
if not root:
root=switch_guid
# start at the root
rootDict = {
"name": uniqueDescr(self.switches[root].descr,root)
}
rootDict['children'] = []
print "root is %s" % rootDict['name']
for index,myPort in sorted(self.switches[root].ports.items(),key=itemgetter(1),reverse=True):
if not myPort.remotePort:
# disconnected, so continue
continue
if myPort.remotePort.parentGuid in added_list:
continue
tempDict = {}
tempDict['name'] = uniqueDescr(myPort.remotePort.parentDescr,myPort.remotePort.parentGuid)
tempDict['children'] = []
if myPort.remotePort.parentType == "SW":
added_list[myPort.remotePort.parentGuid] = tempDict
rootDict['children'].append(tempDict)
print "adding to root: %s"% uniqueDescr(myPort.remotePort.parentDescr,myPort.remotePort.parentGuid)
for switch_guid in self.switches.iterkeys():
if switch_guid == root:
continue
if switch_guid in added_list:
continue
switch_descr = self.switches[switch_guid].descr
switch_descr = uniqueDescr(switch_descr,switch_guid)
thisDict = {}
thisDict['name'] = switch_descr
thisDict['children'] = []
# add HCAS
for index,myPort in sorted(self.switches[switch_guid].ports.items(),key=itemgetter(1),reverse=True):
if not myPort.remotePort:
# disconnected, so continue
continue
elif myPort.remotePort.parentType == "CA":
hostDict = { 'name': myPort.remotePort.parentDescr }
thisDict['children'].append(hostDict)
for index,myPort in sorted(self.switches[switch_guid].ports.items(),key=itemgetter(1),reverse=True):
if not myPort.remotePort:
# disconnected, so continue
continue
elif myPort.remotePort.parentGuid in added_list:
# this remote port has already been added, slide ourselves under it's children
added_list[myPort.remotePort.parentGuid]['children'].append(thisDict)
added_list[switch_guid] = thisDict
print "adding %s to %s" %(thisDict['name'], myPort.remotePort.parentDescr)
break
if not switch_guid in added_list:
thisDict['children'] = {}
leftoverDict[switch_guid] = thisDict
moreleftoverDict = {}
# Now go through the rest of the switches
for switch_guid in leftoverDict.iterkeys():
if switch_guid in added_list:
continue
switch_descr = self.switches[switch_guid].descr
switch_descr = uniqueDescr(switch_descr,switch_guid)
thisDict = {}
thisDict['name'] = switch_descr
thisDict['children'] = []
# find ports that connect to a switch already added
for index,myPort in sorted(self.switches[switch_guid].ports.items(),key=itemgetter(1),reverse=True):
if not myPort.remotePort:
# disconnected, so continue
continue
elif myPort.remotePort.parentType == "CA":
hostDict = { 'name': myPort.remotePort.parentDescr }
thisDict['children'].append(hostDict)
elif myPort.remotePort.parentGuid in added_list:
print "adding %s" % switch_descr
# this remote port has already been added, slide ourselves under it's children
added_list[myPort.remotePort.parentGuid]['children'].append(thisDict)
added_list[switch_guid] = thisDict
break
if not switch_guid in added_list:
thisDict['children'] = {}
moreleftoverDict[switch_guid] = thisDict
print "length of leftover = %d" % len(moreleftoverDict)
f = open(output_file, 'w')
f.write(json.dumps(
rootDict,
sort_keys=True,
indent=2,
separators=(',', ': ')))
f.close()
def createGraph(self,output_file):
import json
complete_graph = {}
complete_graph["nodes"] = []
complete_graph["links"] = []
node_map = {}
group_map = {}
group_counter = 0
for switch_guid in self.switches.iterkeys():
this_switch = {}
switch_descr = self.switches[switch_guid].descr
core_pattern=".*MellanoxIS5600-([0-9])+.*"
m = re.match(core_pattern,switch_descr)
# convert switch name to unique name after having matched against it
switch_descr = uniqueDescr(switch_descr,switch_guid)
if m:
if m.group(1) in group_map:
this_switch["group"] = group_map[m.group(1)]
else:
this_switch["group"] = len(group_map)
group_map[m.group(1)] = len(group_map)
group_counter += 1
else:
this_switch["group"] = len(group_map)
group_map[switch_guid] = len(group_map)
group_counter += 1
# convert switch name to unique name
this_switch["name"] = switch_descr
this_switch["guid"] = switch_guid
this_switch["size"] = 4
complete_graph["nodes"].append(this_switch)
# add this index to the node map for finding source and destination nodes below
node_map[switch_guid] = len(node_map)
for index,myPort in sorted(self.switches[switch_guid].ports.items(),key=itemgetter(1),reverse=True):
if not myPort.remotePort:
# disconnected, so continue
continue
if myPort.remotePort.parentType == "SW":
#switches have already been added
continue
# at this point, it's an HCA
this_node = {}
if isLeafSwitch(myPort.parentDescr):
this_node["group"] = group_map[myPort.parentGuid]
this_node["name"] = myPort.remotePort.parentDescr
this_node["guid"] = myPort.remotePort.parentGuid
this_node["size"] = 4
complete_graph["nodes"].append(this_node)
node_map[myPort.remotePort.parentGuid] = len(node_map)
# after the first pass, all nodes are part of the dictionary
for switch_guid in self.switches.iterkeys():
for index,myPort in sorted(self.switches[switch_guid].ports.items(),key=itemgetter(1),reverse=True):
if not myPort.remotePort:
# disconnected, so continue
continue
#if myPort.remotePort.parentType == "SW":
#if isGroupedSwitch(myPort.remotePort.parentDescr):
# # if its a TOR switch, put this in its group
# node_num = node_map[myPort.parentGuid]
# print node_num
# print myPort.remotePort.parentGuid
# complete_graph["nodes"][node_num]["group"] = group_map[myPort.remotePort.parentGuid]
node_descr = myPort.remotePort.parentDescr
node_descr = uniqueDescr(node_descr,myPort.remotePort.parentGuid)
this_link = {}
this_link["source"] = node_map[myPort.parentGuid]
this_link["target"] = node_map[myPort.remotePort.parentGuid]
# this_link["value"] = 5
complete_graph["links"].append(this_link)
f = open(output_file, 'w')
complete_graph
f.write(json.dumps(
complete_graph,
sort_keys=True,
indent=2,
separators=(',', ': ')))
f.close()
def isLeafSwitch(descr):
if "Infiniscale-IV Mellanox Technologies" in descr:
return True
else:
return False
def isSpineSwitch(descr):
if "Spine" in uniqueDescr(descr):
return True
else:
return False
def isLineSwitch(descr):
if "Line" in uniqueDescr(descr):
return True
else:
return False
def uniqueDescr(descr,guid=""):
""" Clean up description if possible or use supplied GUID for uniqueness """
spine_pattern=".*(MellanoxIS5600-[0-9]+).*\/S([0-9]+)\/.*"
line_pattern=".*(MellanoxIS5600-[0-9]+).*\/L([0-9]+)\/.*"
if "Infiniscale-IV Mellanox Technologies" in descr:
descr = "Mellanox TOR: " + guid
elif "MellanoxIS5600" in descr:
m = re.match(spine_pattern, descr)
if m:
descr = "%s Spine %s" % (m.group(1), m.group(2))
else:
j = re.match(line_pattern, descr)
if j:
descr = "%s Line %s" % (j.group(1), j.group(2))
return descr
class portList:
def __init__(self):
self.ports = []
def add(self,singlePort):
self.ports.append(singlePort)
def remove(self,singlePort):
index = self.find(singlePort)
if not index:
print "Could not find port to remove with guid %s and number %d in port list"%(singlePort.guid,singlePort.portNum)
raise
self.ports.remove(index)
def find(self,guid,port):
for port in self.ports:
if (port.guid == guid and port.portNum == port):
return port
return None
def update_errors_from_ibqueryerrors(errors_file,switch_list):
global debug
error_list = NodeList()
try:
infile = open(errors_file)
except:
print "Unkown error opening file: ", infile
sys.exit(1)
pattern="^\s+GUID\s+([0-9xa-f]+)\s+port\s+(\d+):\s+(.*)$"
skip_pattern="^Errors for.*$"
for line in infile:
line=line.rstrip()
if re.match(skip_pattern, line):
continue
m = re.match(pattern, line)
if m:
guid = m.group(1).replace("0x","0x000")
portnum = m.group(2)
error_string=m.group(3)
errors = parseErrorStr(error_string)
switch = switch_list.find(guid)
if switch:
if switch.portsByNum.has_key(portnum):
thisPort = switch.portsByNum[portnum]
switch.updatePortErrors(thisPort,errors)
error_list.add(thisPort)
print "adding port %s to error list" % thisPort.descr
else:
print "Error: port %s does not exist on switch %s" % (portnum,switch.descr)
continue
else:
print "not doing anything for guid %s" % guid
return error_list
def parseErrorStr(errorString):
errors = {}
errorList = errorString.split('] [')
for errorCounter in errorList:
(counterName,counterValue) = errorCounter.split(" == ")
counterName=counterName.replace("[",'')
counterValue=counterValue.replace("]",'')
try:
errors[counterName]+=int(counterValue)
except KeyError:
errors[counterName]=int(counterValue)
return errors
def parse_netdiscover(ibnetdiscover_file):
global debug
try:
topology_infile = open(ibnetdiscover_file)
except:
print "Unkown error opening file: ", ibnetdiscover_file
sys.exit(1)
# the active port pattern will have the descriptions within parenthesis
active_port_pattern="^(.*)\((.*)\).*$"
# the disconnected port will always be on a switch and the name of the switch will
# be in single quotes
disconn_port_pattern="^(SW.*)\'(.*)\'.*$"
counter = 0
port_list = portList()
all_lines = topology_infile.readlines()
for line in all_lines:
active_port = None
disconn_port = None
active_port = re.match(active_port_pattern, line)
if not active_port:
disconn_port = re.match(disconn_port_pattern, line)
if active_port:
(type1, lid1, port1, guid1, width, rate, dash, type2, lid2, port2, guid2) = active_port.group(1).split()
(description1,description2) = active_port.group(2).split(" - ")
thisPort = Port(port1,lid1,width,rate,type1,guid1,description1)
remotePort = Port(port2,lid2,width,rate,type2,guid2,description2)
thisPort.addRemotePort(remotePort)
elif disconn_port:
(type1, lid1, port1, guid1, width, rate) = disconn_port.group(1).split()
description1 = disconn_port.group(2)
thisPort = Port(port1,lid1,width,rate,type1,guid1,description1)
else:
sys.stdout.write("*** No match found for line: %s ***")% line
pass
port_list.add(thisPort)
return port_list
def main():
"""main subroutine"""
usage = "usage: %prog [-pvh] [-c cluster] [-o output_dir] [-t tree_file] [-g graph_file] [-d dot_file] ibnetdiscover_file [ ibqueryerrors_file ]"
parser = OptionParser(usage=usage)
parser.add_option("-c", "--cluster",dest="cluster", help="Color highlight a cluster (DISABLED)")
parser.add_option("-o", "--output-dir", dest="output", help="Image output directory")
parser.add_option("-d", "--dot", dest="dot", help="Generate graphviz (DOT) diagram", action="count")
parser.add_option("-t", "--tree", dest="tree", help="Generate JSON tree")
parser.add_option("-g", "--graph", dest="graph", help="Generate JSON graph")
parser.add_option("-p", "--print", help="print topology to stdout", action="count", dest="printout")
parser.add_option("-v", "--verbose", help="Be verbose", action="count")
try:
(options, args) = parser.parse_args()
except OptionError:
parser.print_help()
return 1
if options.cluster:
print "%s %s %s" % ("NOTICE: the --cluster option is disabled because it requires site",
"specific options. Please modify the source with cluster strings and enable",
"this option manually")
print
#cluster = options.cluster
else:
cluster = ''
if options.output:
output_dir=options.output
else:
output_dir='.'
if len(args) >= 1:
ports=parse_netdiscover(args[0])
flat_topology = Topology()
flat_topology.build(ports)
if len(args) == 2:
nodes_with_errors = update_errors_from_ibqueryerrors(args[1],nodes)
else:
print "ERROR: no ibnetdiscover file given"
sys.exit(1)
if options.printout:
flat_topology.printSwitches()
if options.dot:
# NOTE: these modules must be found in the current directory
sys.path.append("pydot-1.0.28")
sys.path.append("pyparsing-1.5.7")
import pydot
dot_outputfile = ""
if output_dir:
dot_output_file = output_dir+'/'+options.dot
flat_topology.createDot(dot_outputfile)
if options.tree:
tree_outputfile = ""
if output_dir:
tree_outputfile = output_dir+'/'+options.tree
flat_topology.createTree(tree_outputfile)
if options.graph:
graph_outputfile = ""
if output_dir:
graph_outputfile = output_dir+'/'+options.graph
flat_topology.createGraph(graph_outputfile)
if __name__ == "__main__":
sys.exit(main())
| bacaldwell/ib_d3viz | ib_topology_graph.py | Python | gpl-3.0 | 24,257 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from collections import OrderedDict
from textwrap import dedent
from unittest import mock
from unittest.mock import call
import fixtures
import hashlib
import sys
import yaml
from testtools.matchers import Contains, Equals, FileExists
from xdg import BaseDirectory
from tests import (
fixture_setup,
unit
)
from . import CommandBaseTestCase
class UpdateCommandTestCase(CommandBaseTestCase, unit.TestWithFakeRemoteParts):
yaml_template = dedent("""\
name: snap-test
version: 1.0
summary: test snapping
description: if snap is successful a snap package will be available
architectures: ['amd64']
type: app
confinement: strict
grade: stable
parts:
part1:
plugin: nil
""")
def _parts_dir(self):
parts_uri = os.environ.get('SNAPCRAFT_PARTS_URI')
return os.path.join(
BaseDirectory.xdg_data_home, 'snapcraft',
hashlib.sha384(parts_uri.encode(
sys.getfilesystemencoding())).hexdigest())
def setUp(self):
super().setUp()
self.parts_dir = self._parts_dir()
self.parts_yaml = os.path.join(self.parts_dir, 'parts.yaml')
self.headers_yaml = os.path.join(self.parts_dir, 'headers.yaml')
def test_changed_parts_uri(self):
result = self.run_command(['update'])
self.assertThat(result.exit_code, Equals(0))
self.useFixture(fixture_setup.FakeParts())
self.useFixture(fixtures.EnvironmentVariable('CUSTOM_PARTS', '1'))
self.parts_dir = self._parts_dir()
self.parts_yaml = os.path.join(self.parts_dir, 'parts.yaml')
result = self.run_command(['update'])
self.assertThat(result.exit_code, Equals(0))
expected_parts = OrderedDict()
expected_parts['curl-custom'] = p = OrderedDict()
p['plugin'] = 'autotools'
p['source'] = 'http://curl.org'
p['description'] = 'custom curl part'
p['maintainer'] = 'none'
with open(self.parts_yaml) as parts_file:
parts = yaml.load(parts_file)
self.assertThat(parts, Equals(expected_parts))
def test_update(self):
result = self.run_command(['update'])
self.assertThat(result.exit_code, Equals(0))
self.assertThat(self.parts_yaml, FileExists())
self.assertThat(self.headers_yaml, FileExists())
expected_parts = OrderedDict()
expected_parts['curl'] = p = OrderedDict()
p['plugin'] = 'autotools'
p['source'] = 'http://curl.org'
p['description'] = 'test entry for curl'
p['maintainer'] = 'none'
expected_parts['part1'] = p = OrderedDict()
p['plugin'] = 'go'
p['source'] = 'http://source.tar.gz'
p['description'] = 'test entry for part1'
p['maintainer'] = 'none'
expected_parts['long-described-part'] = p = OrderedDict()
p['plugin'] = 'go'
p['source'] = 'http://source.tar.gz'
p['description'] = 'this is a repetitive description ' * 3
p['maintainer'] = 'none'
expected_parts['multiline-part'] = p = OrderedDict()
p['plugin'] = 'go'
p['source'] = 'http://source.tar.gz'
p['description'] = 'this is a multiline description\n' * 3
p['maintainer'] = 'none'
expected_headers = {
'If-Modified-Since': 'Thu, 07 Jul 2016 10:00:20 GMT',
}
with open(self.parts_yaml) as parts_file:
parts = yaml.load(parts_file)
with open(self.headers_yaml) as headers_file:
headers = yaml.load(headers_file)
self.assertThat(parts, Equals(expected_parts))
self.assertThat(headers, Equals(expected_headers))
def test_update_with_unchanged_date_does_not_download_again(self):
result = self.run_command(['update'])
self.assertThat(result.exit_code, Equals(0))
result = self.run_command(['update'])
self.assertThat(result.exit_code, Equals(0))
self.assertThat(result.output, Contains(
'The parts cache is already up to date.'))
def test_update_with_changed_date_downloads_again(self):
os.makedirs(self.parts_dir)
with open(self.headers_yaml, 'w') as headers_file:
yaml.dump(
{'If-Modified-Since': 'Fri, 01 Jan 2016 12:00:00 GMT'},
headers_file)
result = self.run_command(['update'])
self.assertThat(result.exit_code, Equals(0))
self.assertThat(result.output, Equals(''))
def test_update_with_no_content_length_is_supported(self):
self.useFixture(fixtures.EnvironmentVariable('NO_CONTENT_LENGTH', '1'))
result = self.run_command(['update'])
self.assertThat(result.exit_code, Equals(0))
self.assertThat(self.parts_yaml, FileExists())
self.assertThat(self.headers_yaml, FileExists())
@mock.patch('snapcraft.internal.lxd.Containerbuild._container_run')
@mock.patch('os.getuid')
def test_update_containerized_exists_running(self,
mock_getuid,
mock_container_run):
mock_container_run.side_effect = lambda cmd, **kwargs: cmd
mock_getuid.return_value = 1234
fake_lxd = fixture_setup.FakeLXD()
self.useFixture(fake_lxd)
# Container was created before and is running
fake_lxd.name = 'local:snapcraft-snap-test'
fake_lxd.status = 'Running'
self.useFixture(fixtures.EnvironmentVariable(
'SNAPCRAFT_CONTAINER_BUILDS', '1'))
self.make_snapcraft_yaml(self.yaml_template)
result = self.run_command(['update'])
self.assertThat(result.exit_code, Equals(0))
project_folder = '/root/build_snap-test'
mock_container_run.assert_has_calls([
call(['snapcraft', 'update'], cwd=project_folder, user='root'),
])
| elopio/snapcraft | tests/unit/commands/test_update.py | Python | gpl-3.0 | 6,656 |
#!/usr/bin/env python
import sys
import react
from react import conf
from react import core
from react.core import node
from react.utils import curry
import carsim.gui
from carsim.model import *
conf.heartbeat = True
def usage():
return "usage:\n rosrun carsim %s <machine_name>" % sys.argv[0].split("/")[-1]
if __name__ == "__main__":
if len(sys.argv) == 2:
machine_name = str(sys.argv[1])
if machine_name in ["Master", "Car", "RemoteCtrl"]:
file_logger = curry(conf.E_LOGGER.FILE, "%s.log" % machine_name)
conf.cli = conf.E_THR_OPT.FALSE
conf.log = curry(conf._prepend, "[LOG] ", file_logger)
conf.debug = curry(conf._prepend, "[DEBUG] ", file_logger)
conf.warn = curry(conf._prepend, "[WARN] ", file_logger)
conf.error = curry(conf._prepend, "[ERROR] ", file_logger)
conf.trace = conf.E_LOGGER.NULL
# if machine_name == "Master":
# conf.debug = conf.E_LOGGER.NULL
# conf.cli = conf.E_THR_OPT.FALSE
react.core.node.ReactMachine(machine_name).start_machine()
else:
print usage()
sys.exit(1)
| aleksandarmilicevic/react | src/carsim/scripts/react_machine.py | Python | gpl-2.0 | 1,178 |
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Yoda
# Addon id: plugin.video.Yoda
# Addon Provider: Supremacy
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['myvideolinks.net','iwantmyshow.tk']
self.base_link = 'http://myvideolinks.net/'
self.search_link = '/?s=%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
s = client.request(self.base_link)
s = re.findall('\'(http.+?)\'', s) + re.findall('\"(http.+?)\"', s)
s = [i for i in s if urlparse.urlparse(self.base_link).netloc in i and len(i.strip('/').split('/')) > 3]
s = s[0] if s else urlparse.urljoin(self.base_link, 'v2')
s = s.strip('/')
url = s + self.search_link % urllib.quote_plus(query)
r = client.request(url)
r = client.parseDOM(r, 'h2')
l = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
r = [(i[0], i[1], re.sub('(\.|\(|\[|\s)(\d{4}|3D)(\.|\)|\]|\s|)(.+|)', '', i[1]), re.findall('[\.|\(|\[|\s](\d{4}|)([\.|\)|\]|\s|].+)', i[1])) for i in l]
r = [(i[0], i[1], i[2], i[3][0][0], i[3][0][1]) for i in r if i[3]]
r = [(i[0], i[1], i[2], i[3], re.split('\.|\(|\)|\[|\]|\s|\-', i[4])) for i in r]
r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[2]) and data['year'] == i[3]]
r = [i for i in r if not any(x in i[4] for x in ['HDCAM', 'CAM', 'DVDR', 'DVDRip', 'DVDSCR', 'HDTS', 'TS', '3D'])]
r = [i for i in r if '1080p' in i[4]][:1] + [i for i in r if '720p' in i[4]][:1]
if 'tvshowtitle' in data:
posts = [(i[1], i[0]) for i in l]
else:
posts = [(i[1], i[0]) for i in l]
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
t = post[0]
u = client.request(post[1])
u = re.findall('"(http.+?)"', u) + re.findall('"(http.+?)"', u)
u = [i for i in u if not '/embed/' in i]
u = [i for i in u if not 'youtube' in i]
items += [(t, i) for i in u]
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '2160p' in fmt:
quality = '4K'
elif '4K' in fmt:
quality = '4K'
elif '1080p' in fmt:
quality = '1080p'
elif '720p' in fmt:
quality = '720p'
else:
quality = '720p'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return
def resolve(self, url):
return url
| repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/sources/en/iwantmyshow.py | Python | gpl-2.0 | 7,827 |
"""
AWS Alexa speech helper functions
Developed by Zac Patel on 1/10/17
Created using template: Alexa Skills Blueprint for Python 2.7
"""
# reading the version from the main file so it is easier to update
#from GameHelperMain import VERSION
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
"""
Standard Alexa function (taken from template) that takes in the elements of a proper Alexa
reponse, and returns a constructud JSON file to the user.
"""
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': title,
'content': output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
"""
Standard Alexa function (taken from template) that takes in a speechlet response and the
necessary session attributes and returns the finished JSON that Alexa will read out to the user
"""
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
| zbpatel/AlexaGameHelper | SpeechHelpers.py | Python | gpl-3.0 | 1,416 |
# coding: utf-8
import pandas as pd
def format_datetime_to_date(obj):
if pd.isnull(obj):
return ''
return obj.strftime("%Y/%m/%d")
| niamoto/niamoto-core | niamoto/bin/utils.py | Python | gpl-3.0 | 150 |
# Django
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
# Third Party
from django_extensions.db import fields
# First Party
from modulestatus.models import statusDateRangeMixin
class message(statusDateRangeMixin, models.Model):
title = models.CharField(_("Title"), max_length=150)
message = models.TextField(_("Message"), max_length=1280)
dismissible = models.BooleanField(_("Dismissible"), default=True)
slug = fields.AutoSlugField(populate_from="title")
def __str__(self):
return f"{self.title}"
@property
def dismiss_url(self):
return reverse("banner_messages:dismiss", kwargs={"slug": self.slug})
@property
def session_key(self):
return f"message-{self.slug}-dismissed"
@classmethod
def get_messages(cls, session):
return [o for o in cls.objects.all() if not session.get(o.session_key, False) or not o.dismissible]
| bengosney/rhgd3 | banner_messages/models.py | Python | gpl-3.0 | 973 |
import report
import report_paperformat
| ttfseiko/openerp-trunk | openerp/addons/report/models/__init__.py | Python | agpl-3.0 | 40 |
#!/usr/bin/env python3
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# vim: set fileencoding=utf-8 filetype=python syntax=python.doxygen fileformat=unix tabstop=4 expandtab :
# kate: encoding utf-8; bom off; syntax python; indent-mode python; eol unix; replace-tabs off; indent-width 4; tab-width 4; remove-trailing-space on;
"""@brief EZ-Win: Pre-built windows using GTK3 for Python3.
@file __init__.py
@package pybooster.ezwin
@version 2019.07.14
@author Devyn Collier Johnson <DevynCJohnson@Gmail.com>
@copyright LGPLv3
@section LICENSE
GNU Lesser General Public License v3
Copyright (c) Devyn Collier Johnson, All rights reserved.
This software is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software.
"""
__all__: list = [
r'ezwin'
]
__author__: str = r'Devyn Collier Johnson'
__docformat__: str = r'restructuredtext en'
__email__: str = r'DevynCJohnson@Gmail.com'
__copyright__: str = r'LGPLv3'
__version__: str = r'2019.07.14'
__description__: str = r'EZ-Win - Pre-built windows using GTK3 for Python3'
| DevynCJohnson/Pybooster | pylib/ezwin/__init__.py | Python | lgpl-3.0 | 1,569 |
import scipy as sp
import numpy as np
import scipy.io
import scipy.misc
import scipy.ndimage
import os
import os.path
import copy
import re
from mango import mpi
logger, rootLogger = mpi.getLoggers(__name__)
havePyDicom = False
try:
import dicom
havePyDicom = True
except ImportError as e:
dicom = e
# print "Error importing pydicom package: %s" % str(e)
havePyDicom = False
class VolumeMetaData:
def __init__(self):
self.sampleId = None
self.voxelSize = None
self.voxelUnit = None
self.volumeSize = None
self.volumeOrigin = None
self.numFiles = None
self.attribDict = None
def log(self, logfunc=rootLogger.debug):
logfunc("sampleId = %s" % self.sampleId)
logfunc("voxelSize = %s" % self.voxelSize)
logfunc("voxelUnit = %s" % self.voxelUnit)
logfunc("volumeSize = %s" % self.volumeSize)
logfunc("volumeOrigin = %s" % self.volumeOrigin)
logfunc("numFiles = %s" % self.numFiles)
logfunc("attribDict = %s" % self.attribDict)
class Dicom2NetcdfJobMetaData(VolumeMetaData):
def __init__(self):
VolumeMetaData.__init__(self)
self.subVolumeSize = None
self.subVolumeOrigin = None
class Dicom2NetcdfConvertJob:
"""
Convert a single DICOM data file to a corresponding netCDF file.
"""
def __init__(self, inDicomFileName, outNetcdfFileName, metaData):
self.inDicomFileName = inDicomFileName
self.outNetcdfFileName = outNetcdfFileName
self.metaData = metaData
def __call__(self):
import mango.io
logger.debug("Converting %s to %s..." % (self.inDicomFileName, self.outNetcdfFileName))
try:
dcm = dicom.read_file(self.inDicomFileName)
if (not hasattr(dcm, "SamplesPerPixel")):
dcm.SamplesPerPixel = 1
img = dcm.pixel_array
logger.debug("Read %s shaped pixel_array from DICOM file." % (img.shape,))
except Exception as e:
logger.error("Error encountered reading DICOM file, dicom state:\n %s" % str(dcm))
raise
(isSigned,dtype) = mango.io.convertToNetcdfDtype[str(img.dtype).lower()]
if (len(img.shape) <= 3):
varName = mango.io.dtypeToNetcdfVariable[str(img.dtype).lower()]
else:
varName = "rgba8"
dims = [varName + "_zdim", varName + "_ydim", varName + "_xdim"]
ncdf = scipy.io.netcdf.netcdf_file(self.outNetcdfFileName, mode='w')
if (len(img.shape) < len(dims)):
outImg = scipy.zeros((1,)*(len(dims)-len(img.shape)) + img.shape, dtype=img.dtype)
slicesTuple = ((0,)*(len(dims)-len(img.shape)) + (slice(None),)*len(img.shape))
outImg[slicesTuple] = img
img = outImg
if (len(img.shape) <= 3):
for i in range(len(dims)):
ncdf.createDimension(dims[i], img.shape[i])
else:
raise RuntimeError("Converting to multi-channel 3D data not supported.")
logger.debug("NetCDF img shape=%s." % (img.shape,))
ncdf.createVariable(varName, dtype, dims)
ncdf.variables[varName].value_signed=str(isSigned).lower()
ncdf.variables[varName].data.flat[:] = scipy.array(img, copy=True, dtype=dtype).flat
ncdf.zdim_total = self.metaData.volumeSize[2]
ncdf.number_of_files = self.metaData.numFiles
ncdf.zdim_range = \
sp.array(
[
self.metaData.subVolumeOrigin[2],
self.metaData.subVolumeOrigin[2] + self.metaData.subVolumeSize[2]-1
],
dtype="int32"
)
ncdf.data_description = "DICOM converted to netCDF" ;
ncdf.voxel_size_xyz = self.metaData.voxelSize
ncdf.voxel_unit = self.metaData.voxelUnit
ncdf.coord_transform = "",
ncdf.total_grid_size_xyz = self.metaData.volumeSize
ncdf.coordinate_origin_xyz = self.metaData.volumeOrigin
ncdf.flush()
def log(self, logfunc=rootLogger.debug):
logfunc("inDicomFileName = %s" % self.inDicomFileName)
logfunc("outNetcdfFileName = %s" % self.outNetcdfFileName)
logfunc("meta-data:")
self.metaData.log(logfunc)
def readXtekMetaData(inputDir, metaData = VolumeMetaData()):
if (inputDir != None):
if (os.path.exists(inputDir)):
dirListing = os.listdir(inputDir)
dirListing.sort()
fileRegex = re.compile('.*xtekct')
d = dict()
for fName in dirListing:
fileMatch = fileRegex.match(fName)
if (fileMatch != None):
if (hasattr(metaData, "infoFileName")):
metaData.infoFileName = copy.deepcopy(fName)
fName = os.path.join(inputDir, fName)
rootLogger.info("Reading meta-data from file %s..." % fName)
fLines = file(fName, 'r').readlines()
rex = re.compile('(.*)=(.*)')
for line in fLines:
m = rex.match(line)
if (m != None):
d[m.group(1).strip()] = m.group(2).strip()
voxMmSz = None
voxSzUnit= None
voxCount = None
sampleId = None
if (("VoxelsX" in d.keys()) and ("VoxelsY" in d.keys()) and ("VoxelsZ" in d.keys())):
voxCount = [int(d["VoxelsX"]), int(d["VoxelsY"]), int(d["VoxelsZ"])]
if (("VoxelSizeX" in d.keys()) and ("VoxelSizeY" in d.keys()) and ("VoxelSizeZ" in d.keys())):
voxMmSz = [float(d["VoxelSizeX"]), float(d["VoxelSizeY"]), float(d["VoxelSizeZ"])]
voxSzUnit = "mm"
if ("Name" in d.keys()):
sampleId = d["Name"]
metaData.sampleId = sampleId
metaData.voxelSize = sp.array(voxMmSz, dtype="float64")
metaData.voxelUnit = voxSzUnit
metaData.volumeSize = sp.array(voxCount, dtype="int32")
metaData.volumeOrigin = sp.array([0,0,0], dtype="int32")
metaData.attribDict = d
break
else:
raise RuntimeError("Path %s does not exist." % inputDir)
return metaData
class Dicom2Netcdf:
"""
Converts DICOM file format to mango NetCDF format.
"""
def __init__(self):
self.netcdfExt = "nc"
self.dicomExt = "dcm"
self.dicomDir = None
self.netcdfDir = None
self.xtekInfoDir = None
def readMetaData(self):
return readXtekMetaData(self.xtekInfoDir)
def getDicomFileNameList(self):
rex = re.compile(".*%s" % self.dicomExt)
dirListing = os.listdir(self.dicomDir)
dirListing.sort()
dicomFileNameList = []
for fileName in dirListing:
m = rex.match(fileName)
if (m != None):
dicomFileNameList.append(os.path.join(self.dicomDir, fileName))
return dicomFileNameList
def createJobList(self):
metaData = self.readMetaData()
dicomFileNameList = self.getDicomFileNameList()
metaData.numFiles = len(dicomFileNameList)
jobList = []
rex = re.compile("[^0-9]*([0-9]+)([^0-9]*)%s" % self.dicomExt)
for fileName in dicomFileNameList:
m = rex.match(os.path.split(fileName)[1])
if (m != None):
idxStr = m.group(1).strip()
idxStrStripped = idxStr.lstrip('0')
if (len(idxStrStripped) > 0):
idx = int(idxStrStripped)
else:
idx = 0
jobMetaData = Dicom2NetcdfJobMetaData()
jobMetaData.sampleId = copy.deepcopy(metaData.sampleId)
jobMetaData.voxelSize = copy.deepcopy(metaData.voxelSize)
jobMetaData.voxelUnit = copy.deepcopy(metaData.voxelUnit)
jobMetaData.volumeSize = copy.deepcopy(metaData.volumeSize)
jobMetaData.volumeOrigin = copy.deepcopy(metaData.volumeOrigin)
jobMetaData.numFiles = copy.deepcopy(metaData.numFiles)
jobMetaData.attribDict = None
if (
(jobMetaData.volumeOrigin != None)
and
(jobMetaData.volumeSize != None)
):
jobMetaData.subVolumeSize = copy.deepcopy(metaData.volumeSize)
jobMetaData.subVolumeOrigin = copy.deepcopy(metaData.volumeOrigin)
jobMetaData.subVolumeOrigin[2] = idx
jobMetaData.subVolumeSize[2] = 1
job = \
Dicom2NetcdfConvertJob(
inDicomFileName = fileName,
outNetcdfFileName = os.path.join(self.netcdfDir, ("block%s" % idxStr) + "." + self.netcdfExt),
metaData = jobMetaData
)
jobList.append(job)
else:
raise RuntimeError("Could not parse index string from file name %s" % fileName)
return jobList
def prepareNetcdfDir(self):
if (os.path.exists(self.netcdfDir)):
if (os.path.isdir(self.netcdfDir)):
for f in os.listdir(self.netcdfDir):
ff = os.path.join(self.netcdfDir, f)
ffExt = os.path.splitext(ff)[1]
if (os.path.isfile(ff) and (ffExt.find(self.netcdfExt) < len(ffExt))):
rootLogger.debug("Removing file %s" % ff)
os.remove(ff)
else:
os.remove(self.netcdfDir)
os.makedirs(self.netcdfDir)
else:
os.makedirs(self.netcdfDir)
def runJob(self, job):
try:
job()
except Exception as e:
logger.error("Exception encountered converting %s to %s:" % (job.inDicomFileName, job.outNetcdfFileName))
logger.error(str(e))
def executeJobListUsingMpi(self, jobList):
myRank = mpi.world.Get_rank()
if (myRank == 0):
self.prepareNetcdfDir()
mpi.world.barrier()
startIdx = myRank
for jobIdx in range(startIdx, len(jobList), mpi.world.Get_size()):
job = jobList[jobIdx]
self.runJob(job)
def executeJobList(self, jobList):
if (mpi.haveMpi4py):
self.executeJobListUsingMpi(jobList)
else:
self.prepareNetcdfDir()
for job in jobList:
self.runJob(job)
def convert(self, dicomDir, netcdfDir, xtekInfoDir=None):
self.dicomDir = dicomDir
self.netcdfDir = netcdfDir
self.xtekInfoDir = xtekInfoDir
jobList = self.createJobList()
self.executeJobList(jobList)
| pymango/pymango | misc/python/mango/utils/_dicom2netcdf.py | Python | bsd-2-clause | 11,291 |
import _plotly_utils.basevalidators
class PadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="pad", parent_name="sankey.node", **kwargs):
super(PadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", False),
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/sankey/node/_pad.py | Python | mit | 478 |
# -*- coding: utf-8 -*-
from parse import do_the_whole_thing as do_it
bnf_text = "E -> T E'\n" \
"E' -> + T E' | ε\n" \
"T -> F T'\n" \
"T' -> * F T' | ε\n" \
"F -> ( E ) | id"
indirect_recursive = "S -> A a | b\n" \
"A -> A c | S d | ε"
bnf_recursive = "E -> E + T | T\n" \
"T -> T * F | F\n" \
"F -> ( E ) | id"
ambigous_text = "S -> A | B\n" \
"A -> a A b | ε\n" \
"B -> a B b b | ε"
second_text = "E -> pa Q R | pa Q S | pa T\n" \
"U -> e"
third_text = "S -> i E t S | i E t S e S | a\n" \
"E -> b"
final_test = "X -> a A\n" \
"A -> x X"
extra_test = "S -> ( A ) | ε\n" \
"A -> T E\n" \
"E -> & T E | ε\n" \
"T -> ( A ) | a | b | c"
bonus_test = "L -> % w D | U#\n" \
"U -> ! w D U | ε\n" \
"D -> : w D | w L"
additional_test = "S -> A B e\n" \
"A -> d B | a S | c\n" \
"B -> a S | c"
free_test = "Exp -> Exp + Exp2 | Exp - Exp2 | Exp2\n" \
"Exp2 -> Exp2 * Exp3 | Exp2 / Exp3 | Exp3\n" \
"Exp3 -> num | ( Exp )"
courtesy_test = "E -> T + E | T\n" \
"T -> int | int * T | ( E )"
do_it(bnf_recursive)
do_it(ambigous_text)
do_it(second_text)
do_it(third_text)
do_it(indirect_recursive)
do_it(final_test)
do_it(extra_test)
do_it(bonus_test)
do_it(additional_test)
do_it(free_test)
do_it(courtesy_test)
| BreakingBugs/LL1-parser | examples.py | Python | mit | 1,543 |
#!/usr/bin/env python
# coding: utf-8
import unittest
import sys
import os
PROJECT_PATH = os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-2])
ROOT_PATH = os.path.dirname(__file__)
if __name__ == '__main__':
if 'GAE_SDK' in os.environ:
SDK_PATH = os.environ['GAE_SDK']
sys.path.insert(0, SDK_PATH)
import dev_appserver
dev_appserver.fix_sys_path()
sys.path.append(os.path.join(PROJECT_PATH, 'src'))
tests = unittest.TestLoader().discover(ROOT_PATH, "*.py")
result = unittest.TextTestRunner().run(tests)
if not result.wasSuccessful():
sys.exit(1)
| renzon/blob_app | test/testloader.py | Python | mit | 630 |
from django.conf.urls import url, include
from . import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'meals', views.MealViewSet)
router.register(r'ingredients', views.IngredientViewSet)
urlpatterns = [
url('^api/', include(router.urls)),
url(r'^$', views.index, name='index')
]
| srenner/homeweb-v2 | homeweb/dinner/urls.py | Python | mit | 334 |
# pylint: disable=redefined-builtin, wildcard-import
"""Raspberry pi specific declaration and schedules."""
from __future__ import absolute_import as _abs
from .conv2d import *
from .depthwise_conv2d import *
| phisiart/tvm | topi/python/topi/rasp/__init__.py | Python | apache-2.0 | 210 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.