repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
lisael/pg-django | refs/heads/master | django/contrib/messages/storage/session.py | 456 | from django.contrib.messages.storage.base import BaseStorage
class SessionStorage(BaseStorage):
"""
Stores messages in the session (that is, django.contrib.sessions).
"""
session_key = '_messages'
def __init__(self, request, *args, **kwargs):
assert hasattr(request, 'session'), "The session-based temporary "\
"message storage requires session middleware to be installed, "\
"and come before the message middleware in the "\
"MIDDLEWARE_CLASSES list."
super(SessionStorage, self).__init__(request, *args, **kwargs)
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the request's session. This storage
always stores everything it is given, so return True for the
all_retrieved flag.
"""
return self.request.session.get(self.session_key), True
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages to the request's session.
"""
if messages:
self.request.session[self.session_key] = messages
else:
self.request.session.pop(self.session_key, None)
return []
|
jhoenicke/python-trezor | refs/heads/master | trezorlib/transport/webusb.py | 1 | # This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import atexit
import logging
import sys
import time
from typing import Iterable, Optional
from . import TREZORS, UDEV_RULES_STR, TransportException
from .protocol import ProtocolBasedTransport, ProtocolV1
LOG = logging.getLogger(__name__)
try:
import usb1
except Exception as e:
LOG.warning("WebUSB transport is disabled: {}".format(e))
usb1 = None
if False:
# mark Optional as used, otherwise it only exists in comments
Optional
INTERFACE = 0
ENDPOINT = 1
DEBUG_INTERFACE = 1
DEBUG_ENDPOINT = 2
class WebUsbHandle:
def __init__(self, device: "usb1.USBDevice", debug: bool = False) -> None:
self.device = device
self.interface = DEBUG_INTERFACE if debug else INTERFACE
self.endpoint = DEBUG_ENDPOINT if debug else ENDPOINT
self.count = 0
self.handle = None # type: Optional[usb1.USBDeviceHandle]
def open(self) -> None:
self.handle = self.device.open()
if self.handle is None:
if sys.platform.startswith("linux"):
args = (UDEV_RULES_STR,)
else:
args = ()
raise IOError("Cannot open device", *args)
self.handle.claimInterface(self.interface)
def close(self) -> None:
if self.handle is not None:
self.handle.releaseInterface(self.interface)
self.handle.close()
self.handle = None
def write_chunk(self, chunk: bytes) -> None:
assert self.handle is not None
if len(chunk) != 64:
raise TransportException("Unexpected chunk size: %d" % len(chunk))
self.handle.interruptWrite(self.endpoint, chunk)
def read_chunk(self) -> bytes:
assert self.handle is not None
endpoint = 0x80 | self.endpoint
while True:
chunk = self.handle.interruptRead(endpoint, 64)
if chunk:
break
else:
time.sleep(0.001)
if len(chunk) != 64:
raise TransportException("Unexpected chunk size: %d" % len(chunk))
return chunk
class WebUsbTransport(ProtocolBasedTransport):
"""
WebUsbTransport implements transport over WebUSB interface.
"""
PATH_PREFIX = "webusb"
ENABLED = usb1 is not None
context = None
def __init__(
self, device: str, handle: WebUsbHandle = None, debug: bool = False
) -> None:
if handle is None:
handle = WebUsbHandle(device, debug)
self.device = device
self.handle = handle
self.debug = debug
super().__init__(protocol=ProtocolV1(handle))
def get_path(self) -> str:
return "%s:%s" % (self.PATH_PREFIX, dev_to_str(self.device))
@classmethod
def enumerate(cls) -> Iterable["WebUsbTransport"]:
if cls.context is None:
cls.context = usb1.USBContext()
cls.context.open()
atexit.register(cls.context.close)
devices = []
for dev in cls.context.getDeviceIterator(skip_on_error=True):
usb_id = (dev.getVendorID(), dev.getProductID())
if usb_id not in TREZORS:
continue
if not is_vendor_class(dev):
continue
try:
# workaround for issue #223:
# on certain combinations of Windows USB drivers and libusb versions,
# Trezor is returned twice (possibly because Windows know it as both
# a HID and a WebUSB device), and one of the returned devices is
# non-functional.
dev.getProduct()
devices.append(WebUsbTransport(dev))
except usb1.USBErrorNotSupported:
pass
return devices
def find_debug(self) -> "WebUsbTransport":
if self.protocol.VERSION >= 2:
# TODO test this
# XXX this is broken right now because sessions don't really work
# For v2 protocol, use the same WebUSB interface with a different session
return WebUsbTransport(self.device, self.handle)
else:
# For v1 protocol, find debug USB interface for the same serial number
return WebUsbTransport(self.device, debug=True)
def is_vendor_class(dev: "usb1.USBDevice") -> bool:
configurationId = 0
altSettingId = 0
return (
dev[configurationId][INTERFACE][altSettingId].getClass()
== usb1.libusb1.LIBUSB_CLASS_VENDOR_SPEC
)
def dev_to_str(dev: "usb1.USBDevice") -> str:
return ":".join(
str(x) for x in ["%03i" % (dev.getBusNumber(),)] + dev.getPortNumberList()
)
|
alorence/django-modern-rpc | refs/heads/master | modernrpc/tests/test_jsonrpc_errors.py | 1 | # coding: utf-8
import json
import random
import pytest
import requests
from django.core.serializers.json import DjangoJSONEncoder
from jsonrpcclient.exceptions import ReceivedErrorResponse
from modernrpc.exceptions import RPC_INVALID_REQUEST, RPC_METHOD_NOT_FOUND, RPC_PARSE_ERROR, RPC_INVALID_PARAMS, \
RPC_CUSTOM_ERROR_BASE, RPC_CUSTOM_ERROR_MAX, RPC_INTERNAL_ERROR
def test_jsonrpc_call_unknown_method(jsonrpc_client):
with pytest.raises(ReceivedErrorResponse) as excinfo:
jsonrpc_client.non_existing_method()
assert 'Method not found: "non_existing_method"' in excinfo.value.message
assert excinfo.value.code == RPC_METHOD_NOT_FOUND
def test_jsonrpc_invalid_request_1(all_rpc_url):
# Missing 'method' in payload
headers = {'content-type': 'application/json'}
payload = {
# "method": 'add',
"params": [5, 6],
"jsonrpc": "2.0",
"id": random.randint(1, 1000),
}
req_data = json.dumps(payload, cls=DjangoJSONEncoder)
response = requests.post(all_rpc_url, data=req_data, headers=headers).json()
assert 'Missing parameter "method"' in response['error']['message']
assert RPC_INVALID_REQUEST == response['error']['code']
def test_jsonrpc_invalid_request_2(all_rpc_url):
# Missing 'jsonrpc' in payload
headers = {'content-type': 'application/json'}
payload = {
"method": 'add',
"params": [5, 6],
# "jsonrpc": "2.0",
"id": random.randint(1, 1000),
}
req_data = json.dumps(payload, cls=DjangoJSONEncoder)
response = requests.post(all_rpc_url, data=req_data, headers=headers).json()
assert 'Missing parameter "jsonrpc"' in response['error']['message']
assert RPC_INVALID_REQUEST == response['error']['code']
def test_jsonrpc_invalid_request_3(all_rpc_url):
# Bad value for payload member 'jsonrpc'
headers = {'content-type': 'application/json'}
payload = {
"method": 'add',
"params": [5, 6],
"jsonrpc": "1.0",
"id": random.randint(1, 1000),
}
req_data = json.dumps(payload, cls=DjangoJSONEncoder)
response = requests.post(all_rpc_url, data=req_data, headers=headers).json()
assert 'The attribute "jsonrpc" must contain "2.0"' in response['error']['message']
assert RPC_INVALID_REQUEST == response['error']['code']
def test_jsonrpc_invalid_request_4(all_rpc_url):
# Closing '}' is missing from this payload => invalid json data
invalid_json_payload = '''
{
"method": "add",
"params": [},
"jsonrpc": "2.0",
"id": 74,
'''
headers = {'content-type': 'application/json'}
response = requests.post(all_rpc_url, data=invalid_json_payload, headers=headers).json()
assert 'error' in response
assert 'result' not in response
# On ParseError, JSON has not been properly deserialized, so the request ID can't be returned in error response
assert response['id'] is None
error = response['error']
assert 'Parse error' in error['message']
assert 'unable to read the request' in error['message']
assert error['code'] == RPC_PARSE_ERROR
def test_jsonrpc_invalid_request_5(all_rpc_url):
# Json payload is not a struct or a list
headers = {'content-type': 'application/json'}
response = requests.post(all_rpc_url, data='10', headers=headers).json()
assert 'error' in response
assert 'result' not in response
assert response['id'] is None
error = response['error']
assert 'Invalid request: Bad JSON-RPC payload' in error['message']
assert error['code'] == RPC_INVALID_REQUEST
def test_jsonrpc_no_content_type(all_rpc_url):
payload = {
"method": "add",
"params": [5, 6],
"jsonrpc": "2.0",
"id": 51,
}
req_data = json.dumps(payload)
headers = {'content-type': ''}
response = requests.post(all_rpc_url, data=req_data, headers=headers).json()
assert 'error' in response
assert 'result' not in response
assert response['id'] is None
error = response['error']
assert 'Missing header' in error['message']
assert error['code'] == RPC_INVALID_REQUEST
def test_jsonrpc_invalid_params(jsonrpc_client):
with pytest.raises(ReceivedErrorResponse) as excinfo:
jsonrpc_client.add(42)
assert 'Invalid parameters' in excinfo.value.message
# Python2: takes exactly 2 arguments (1 given)
# Python3: 1 required positional argument
assert 'argument' in excinfo.value.message
assert excinfo.value.code == RPC_INVALID_PARAMS
def test_jsonrpc_invalid_params2(jsonrpc_client):
with pytest.raises(ReceivedErrorResponse) as excinfo:
jsonrpc_client.add(42, -51, 98)
assert 'Invalid parameters' in excinfo.value.message
# Python2: takes exactly 2 arguments (3 given)
# Python3: takes 2 positional arguments but 3 were given
assert 'arguments' in excinfo.value.message
assert excinfo.value.code == RPC_INVALID_PARAMS
def test_jsonrpc_internal_error(jsonrpc_client):
with pytest.raises(ReceivedErrorResponse) as excinfo:
jsonrpc_client.raise_custom_exception()
assert 'This is a test error' in excinfo.value.message
assert RPC_CUSTOM_ERROR_BASE <= excinfo.value.code <= RPC_CUSTOM_ERROR_MAX
def test_jsonrpc_exception_with_data(jsonrpc_client):
with pytest.raises(ReceivedErrorResponse) as excinfo:
jsonrpc_client.raise_custom_exception_with_data()
assert ['a', 'b', 'c'] == excinfo.value.data
def test_jsonrpc_divide_by_zero(jsonrpc_client):
with pytest.raises(ReceivedErrorResponse) as excinfo:
jsonrpc_client.divide(42, 0)
assert 'Internal error' in excinfo.value.message
# Python2: integer division or modulo by zero
# Python3: division by zero
assert 'by zero' in excinfo.value.message
assert excinfo.value.code == RPC_INTERNAL_ERROR
def test_jsonrpc_invalid_result(jsonrpc_client):
with pytest.raises(ReceivedErrorResponse) as excinfo:
jsonrpc_client.get_invalid_result()
assert 'Unable to serialize result as' in excinfo.value.message
assert excinfo.value.code == RPC_INTERNAL_ERROR
|
mcrowson/django | refs/heads/master | tests/test_discovery_sample/doctests.py | 471 | """
Doctest example from the official Python documentation.
https://docs.python.org/3/library/doctest.html
"""
def factorial(n):
"""Return the factorial of n, an exact integer >= 0.
>>> [factorial(n) for n in range(6)]
[1, 1, 2, 6, 24, 120]
>>> factorial(30) # doctest: +ELLIPSIS
265252859812191058636308480000000...
>>> factorial(-1)
Traceback (most recent call last):
...
ValueError: n must be >= 0
Factorials of floats are OK, but the float must be an exact integer:
>>> factorial(30.1)
Traceback (most recent call last):
...
ValueError: n must be exact integer
>>> factorial(30.0) # doctest: +ELLIPSIS
265252859812191058636308480000000...
It must also not be ridiculously large:
>>> factorial(1e100)
Traceback (most recent call last):
...
OverflowError: n too large
"""
import math
if not n >= 0:
raise ValueError("n must be >= 0")
if math.floor(n) != n:
raise ValueError("n must be exact integer")
if n + 1 == n: # catch a value like 1e300
raise OverflowError("n too large")
result = 1
factor = 2
while factor <= n:
result *= factor
factor += 1
return result
|
hectord/lettuce | refs/heads/master | tests/integration/lib/Django-1.3/tests/regressiontests/admin_views/customadmin.py | 52 | """
A second, custom AdminSite -- see tests.CustomAdminSiteTests.
"""
from django.conf.urls.defaults import patterns
from django.contrib import admin
from django.http import HttpResponse
import models, forms
class Admin2(admin.AdminSite):
login_form = forms.CustomAdminAuthenticationForm
login_template = 'custom_admin/login.html'
logout_template = 'custom_admin/logout.html'
index_template = 'custom_admin/index.html'
password_change_template = 'custom_admin/password_change_form.html'
password_change_done_template = 'custom_admin/password_change_done.html'
# A custom index view.
def index(self, request, extra_context=None):
return super(Admin2, self).index(request, {'foo': '*bar*'})
def get_urls(self):
return patterns('',
(r'^my_view/$', self.admin_view(self.my_view)),
) + super(Admin2, self).get_urls()
def my_view(self, request):
return HttpResponse("Django is a magical pony!")
site = Admin2(name="admin2")
site.register(models.Article, models.ArticleAdmin)
site.register(models.Section, inlines=[models.ArticleInline])
site.register(models.Thing, models.ThingAdmin)
site.register(models.Fabric, models.FabricAdmin)
site.register(models.ChapterXtra1, models.ChapterXtra1Admin)
|
zooniverse/aggregation | refs/heads/master | experimental/algorithms/old_weather/alignment.py | 2 | import cv2
import numpy as np
import os
from scipy import spatial
import math
def deter(a,b,c,d):
return a*d - c*b
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
minVal = 25
maxVal = 100
img = cv2.imread(base_directory + "/Dropbox/066e48f5-812c-4b5f-ab04-df6c35f50393.jpeg")
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,minVal,maxVal,apertureSize = 3)
lines_t = cv2.HoughLines(edges,1,np.pi/180,350)
lines = [l[0] for l in lines_t]
# for l in lines_t:
# print l[0]
# print len(lines)
# print lines[0]
# assert False
img = cv2.imread(base_directory + "/Dropbox/789c61ed-84b5-4f8b-b372-a244889f6588.jpeg")
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,minVal,maxVal,apertureSize = 3)
lines_2_t = cv2.HoughLines(edges,1,np.pi/180,350)
lines_2 = [l[0] for l in lines_2_t]
# intercepts,slopes = zip(*lines[0])
# intercepts_2,slopes_2 = zip(*lines_2[0])
intercepts,slopes = zip(*lines)
intercepts_2,slopes_2 = zip(*lines_2)
print intercepts
print slopes
max_i = max(max(intercepts),max(intercepts_2))
min_i = min(min(intercepts),min(intercepts_2))
max_s = max(max(slopes),max(slopes_2))
min_s = min(min(slopes),min(slopes_2))
normalized_s = [(s-min_s)/(max_s-min_s) for s in slopes]
normalized_i = [(i-min_i)/(max_i-min_i) for i in intercepts]
normalized_s_2 = [(s-min_s)/(max_s-min_s) for s in slopes_2]
normalized_i_2 = [(i-min_i)/(max_i-min_i) for i in intercepts_2]
tree = spatial.KDTree(zip(normalized_i,normalized_s))
tree_2 = spatial.KDTree(zip(normalized_i_2,normalized_s_2))
mapping_to_1 = [[] for i in lines]
mapping_to_2 = [[] for i in lines_2]
for ii,x in enumerate(zip(normalized_i_2,normalized_s_2)):
dist,neighbour = tree.query(x)
# print dist,neighbour
# print neighbour
# print lines[0]
mapping_to_1[neighbour].append((ii,dist))
for ii,x in enumerate(zip(normalized_i,normalized_s)):
dist,neighbour = tree_2.query(x)
mapping_to_2[neighbour].append((ii,dist))
# print mapping_to_1
# print mapping_to_2
to_draw_1 = []
to_draw_2 = []
for i in range(len(lines)):
# find a bijection
# so line[0][i] is the closest line to line_2[0][j], make sure that
# line_2[0][j] is also the closest line to line[0][i]
# if such a bijection does not exist, ignore this line
# bijection = None
for j,dist in mapping_to_1[i]:
for i_temp,dist_2 in mapping_to_2[j]:
if i_temp == i:
# print max(dist,dist_2)
if max(dist,dist_2) < 0.001:
to_draw_1.append(lines[i])
to_draw_2.append(lines_2[j])
# print lines[0][i]
# print lines_2[0][j]
# print
# print max(dist,dist_2)
break
# bijection_l = [j for j in mapping_to_1[i] if (i in mapping_to_2[j])]
# # for j in mapping_to_1[i]:
# # print i in mapping_to_2[j]
# # print
# # print bijection_l
# # there is a bijection
# if bijection_l != []:
# bijection = bijection_l[0]
#
# to_draw_1.append(lines[0][i])
# to_draw_2.append(lines_2[0][bijection])
#
# print lines[0][i]
# print lines_2[0][bijection]
# print
# assert False
bijections = zip(to_draw_1,to_draw_2)
for a,b in bijections:
print a,b
# img = cv2.imread(base_directory + "/Dropbox/066e48f5-812c-4b5f-ab04-df6c35f50393.jpeg")
# for rho,theta in to_draw_1:
# a = np.cos(theta)
# b = np.sin(theta)
# x0 = a*rho
# y0 = b*rho
# x1 = int(x0 + 1000*(-b))
# y1 = int(y0 + 1000*(a))
# x2 = int(x0 - 1000*(-b))
# y2 = int(y0 - 1000*(a))
#
# cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)
# cv2.imwrite(base_directory + '/houghlines3.jpg',img)
#
# print len(to_draw_1)
# print len(to_draw_2)
#
# img = cv2.imread(base_directory + "/Dropbox/789c61ed-84b5-4f8b-b372-a244889f6588.jpeg")
# for rho,theta in to_draw_2:
# a = np.cos(theta)
# b = np.sin(theta)
# x0 = a*rho
# y0 = b*rho
# x1 = int(x0 + 1000*(-b))
# y1 = int(y0 + 1000*(a))
# x2 = int(x0 - 1000*(-b))
# y2 = int(y0 - 1000*(a))
#
# cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)
#
# cv2.imwrite(base_directory + '/houghlines1.jpg',img)
def intersections(lines):
line_intersections = {}
for l1_index,l1 in enumerate(lines):
if l1[0] < 0:
l1 = -l1[0],(l1[1]+math.pi/2.)%math.pi
for l2_index,l2 in enumerate(lines[:l1_index+1]):
l2_index += l1_index+1
if l2[0] < 0:
l2 = -l2[0],(l2[1]+math.pi/2.)%math.pi
angle_diff = math.fabs(min(l1[1]-l2[1], math.pi-l1[1]-l2[1]))
if angle_diff > 0.1:
# print angle_diff
# print l1
# print l2
# print
x1 = math.cos(l1[1])*l1[0]
y1 = math.sin(l1[1])*l1[0]
y2 = None
x2 = None
theta_1 = l1[1]
# vertical line
if math.fabs(theta_1%math.pi) < 0.01:
x2 = x1
y2 = 10
# horizontal line
elif math.fabs(theta_1%(math.pi/2.)) < 0.01:
x2 = 10
y2 = y1
# elif
# print (x1,y1),(x2,y2)
# see https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection
# for logic and notation
x3 = math.cos(l2[1])*l2[0]
y3 = math.sin(l2[1])*l2[0]
x4 = None
y4 = None
theta_2 = l2[1]
# vertical line
if math.fabs(theta_2%math.pi) < 0.01:
x4 = x3
y4 = 15
# horizontal
elif math.fabs(theta_2%(math.pi/2.)) < 0.01:
x4 = 15
y4 = y3
if None in [x1,x2,x3,x4,y1,y2,y3,y4]:
# print "skipping"
continue
d1 = deter(x1,y1,x2,y2)
d2 = deter(x1,1,x2,1)
d3 = deter(x3,y3,x4,y4)
d4 = deter(x3,1,x4,1)
D1 = deter(d1,d2,d3,d4)
d5 = deter(y1,1,y2,1)
d6 = deter(y3,1,y4,1)
D2 = deter(d1,d5,d3,d6)
d7 = deter(x3,1,x4,1)
d8 = deter(y3,1,y4,1)
D3 = deter(d2,d5,d7,d8)
intersect_x = int(D1/D3)
intersect_y = int(D2/D3)
# print intersect_x,intersect_y
line_intersections[(l1_index,l2_index)] = (intersect_x,intersect_y)
# cv2.circle(img,(intersect_x,intersect_y),20,(0,0,255))
return line_intersections
pts1 = intersections(to_draw_1)
pts2 = intersections(to_draw_2)
x_displacements = []
y_displacements = []
for line_tuple in pts1.keys():
if line_tuple in pts2:
d_x = pts2[line_tuple][0] - pts1[line_tuple][0]
d_y = pts2[line_tuple][1] - pts1[line_tuple][1]
x_displacements.append(d_x)
y_displacements.append(d_y)
print min(x_displacements),np.mean(x_displacements),max(x_displacements)
print min(y_displacements),np.mean(y_displacements),max(y_displacements)
# cv2.imwrite(base_directory + '/houghlines3.jpg',img) |
Avira/pootle | refs/heads/master | pootle/apps/reports/views.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import calendar
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.utils.html import escape
from django.utils.translation import ugettext_lazy as _
from django.views.generic import View, CreateView
from django.views.generic.detail import SingleObjectMixin
from accounts.models import CURRENCIES
from pootle.core.decorators import admin_required
from pootle.core.http import (JsonResponse, JsonResponseBadRequest,
JsonResponseNotFound)
from pootle.core.log import PAID_TASK_ADDED, PAID_TASK_DELETED, log
from pootle.core.utils.json import jsonify
from pootle.core.utils.timezone import make_aware, make_naive
from pootle.core.views import AjaxResponseMixin
from pootle_misc.util import (ajax_required, get_date_interval,
get_max_month_datetime, import_func)
from pootle_profile.views import (NoDefaultUserMixin, TestUserFieldMixin,
DetailView)
from pootle_statistics.models import ScoreLog
from .forms import UserRatesForm, PaidTaskForm
from .models import PaidTask, PaidTaskTypes
# Django field query aliases
LANG_CODE = 'translation_project__language__code'
LANG_NAME = 'translation_project__language__fullname'
PRJ_CODE = 'translation_project__project__code'
PRJ_NAME = 'translation_project__project__fullname'
INITIAL = 'old_value'
POOTLE_WORDCOUNT = 'unit__source_wordcount'
SCORE_TRANSLATION_PROJECT = 'submission__translation_project'
# field aliases
DATE = 'creation_time_date'
STAT_FIELDS = ['n1']
INITIAL_STATES = ['new', 'edit']
class UserStatsView(NoDefaultUserMixin, DetailView):
model = get_user_model()
slug_field = 'username'
slug_url_kwarg = 'username'
template_name = 'user/stats.html'
def get_context_data(self, **kwargs):
ctx = super(UserStatsView, self).get_context_data(**kwargs)
now = make_aware(datetime.now())
ctx.update({
'now': now.strftime('%Y-%m-%d %H:%M:%S'),
})
if self.object.rate > 0:
ctx.update({
'paid_task_form': PaidTaskForm(user=self.object),
'paid_task_types': PaidTaskTypes,
})
return ctx
class UserActivityView(NoDefaultUserMixin, SingleObjectMixin, View):
model = get_user_model()
slug_field = 'username'
slug_url_kwarg = 'username'
@method_decorator(ajax_required)
def dispatch(self, request, *args, **kwargs):
self.month = request.GET.get('month', None)
return super(UserActivityView, self).dispatch(request, *args, **kwargs)
def get(self, *args, **kwargs):
data = get_activity_data(self.request, self.get_object(), self.month)
return JsonResponse(data)
class UserDetailedStatsView(NoDefaultUserMixin, DetailView):
model = get_user_model()
slug_field = 'username'
slug_url_kwarg = 'username'
template_name = 'user/detailed_stats.html'
def dispatch(self, request, *args, **kwargs):
self.month = request.GET.get('month', None)
self.user = request.user
return super(UserDetailedStatsView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(UserDetailedStatsView, self).get_context_data(**kwargs)
object = self.get_object()
ctx.update(get_detailed_report_context(user=object, month=self.month))
ctx.update({'own_report': object.username == self.user.username})
return ctx
class PaidTaskFormView(AjaxResponseMixin, CreateView):
form_class = PaidTaskForm
template_name = 'admin/reports/paid_task_form.html'
def get_success_url(self):
# XXX: This is unused. We don't need this URL, but
# the parent :cls:`PaidTaskFormView` enforces us to set some value here
return reverse('pootle-user-stats', kwargs=self.kwargs)
def form_valid(self, form):
super(PaidTaskFormView, self).form_valid(form)
# ignore redirect response
log('%s\t%s\t%s' % (self.object.user.username, PAID_TASK_ADDED,
self.object))
return JsonResponse({'result': self.object.id})
class AddUserPaidTaskView(NoDefaultUserMixin, TestUserFieldMixin, PaidTaskFormView):
model = get_user_model()
slug_field = 'username'
slug_url_kwarg = 'username'
@admin_required
def reports(request):
User = get_user_model()
now = make_aware(datetime.now())
ctx = {
'page': 'admin-reports',
'users': jsonify(map(
lambda x: {'id': x.username, 'text': escape(x.formatted_name)},
User.objects.hide_meta()
)),
'user_rates_form': UserRatesForm(),
'paid_task_form': PaidTaskForm(),
'now': now.strftime('%Y-%m-%d %H:%M:%S'),
'admin_report': True,
'paid_task_types': PaidTaskTypes,
}
return render_to_response('admin/reports.html', ctx,
context_instance=RequestContext(request))
def get_detailed_report_context(user, month):
[start, end] = get_date_interval(month)
totals = {'translated': {}, 'reviewed': {}, 'total': 0,
'paid_tasks': {},
'all': 0}
if user and start and end:
scores = ScoreLog.objects \
.select_related('submission__unit__store') \
.filter(user=user,
creation_time__gte=start,
creation_time__lte=end) \
.order_by('creation_time')
tasks = PaidTask.objects \
.filter(user=user,
datetime__gte=start,
datetime__lte=end) \
.order_by('datetime')
scores = list(scores)
tasks = list(tasks)
for score in scores:
translated, reviewed = score.get_paid_words()
if translated is not None:
score.action = PaidTask.get_task_type_title(PaidTaskTypes.TRANSLATION)
score.subtotal = score.rate * translated
score.words = translated
if score.rate in totals['translated']:
totals['translated'][score.rate]['words'] += translated
else:
totals['translated'][score.rate] = {'words': translated}
elif reviewed is not None:
score.action = PaidTask.get_task_type_title(PaidTaskTypes.REVIEW)
score.subtotal = score.review_rate * reviewed
score.words = score.wordcount
if score.review_rate in totals['reviewed']:
totals['reviewed'][score.review_rate]['words'] += reviewed
else:
totals['reviewed'][score.review_rate] = {'words': reviewed}
score.similarity = score.get_similarity() * 100
paid_tasks = totals['paid_tasks']
for task in tasks:
task.action = PaidTask.get_task_type_title(task.task_type)
task.subtotal = task.amount * task.rate
totals['all'] += task.subtotal
if task.task_type not in paid_tasks:
paid_tasks[task.task_type] = {
'rates': {},
'action': task.action
}
if task.rate in paid_tasks[task.task_type]['rates']:
current = paid_tasks[task.task_type]['rates'][task.rate]
current['amount'] += task.amount
current['subtotal'] += task.subtotal
else:
paid_tasks[task.task_type]['rates'][task.rate] = {
'amount': task.amount,
'subtotal': task.subtotal,
}
for rate, words in totals['translated'].items():
totals['translated'][rate]['words'] = totals['translated'][rate]['words']
totals['translated'][rate]['subtotal'] = rate * totals['translated'][rate]['words']
totals['all'] += totals['translated'][rate]['subtotal']
for rate, words in totals['reviewed'].items():
totals['reviewed'][rate]['words'] = totals['reviewed'][rate]['words']
totals['reviewed'][rate]['subtotal'] = rate * totals['reviewed'][rate]['words']
totals['all'] += totals['reviewed'][rate]['subtotal']
totals['all'] = totals['all']
items = [{'score': x, 'creation_time': x.creation_time} for x in scores] + \
[{'task': x, 'creation_time': x.datetime}
for x in tasks]
items = sorted(items, key=lambda x: x['creation_time'])
if user != '' and user.currency is None:
user.currency = CURRENCIES[0][0]
return {
'items': items,
'object': user,
'start': start,
'end': end,
'next': start.replace(day=1) + timedelta(days=31),
'previous': start.replace(day=1) - timedelta(days=1),
'totals': totals,
'utc_offset': start.strftime("%z"),
}
@admin_required
def reports_detailed(request):
username = request.GET.get('username', None)
month = request.GET.get('month', None)
User = get_user_model()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = ''
ctx = get_detailed_report_context(user=user, month=month)
ctx.update({'admin_report': True})
return render_to_response('admin/detailed_reports.html', ctx,
context_instance=RequestContext(request))
def get_min_month_datetime(dt):
return dt.replace(day=1, hour=0, minute=0, second=0)
@ajax_required
@admin_required
def update_user_rates(request):
form = UserRatesForm(request.POST)
if form.is_valid():
try:
User = get_user_model()
user = User.objects.get(username=form.cleaned_data['username'])
except User.DoesNotExist:
error_text = _("User %s not found" % form.cleaned_data['username'])
return JsonResponseNotFound({'msg': error_text})
user.currency = form.cleaned_data['currency']
user.rate = form.cleaned_data['rate']
user.review_rate = form.cleaned_data['review_rate']
user.hourly_rate = form.cleaned_data['hourly_rate']
scorelog_filter = {'user': user}
paid_task_filter = scorelog_filter.copy()
if form.cleaned_data['effective_from'] is not None:
effective_from = form.cleaned_data['effective_from']
scorelog_filter.update({
'creation_time__gte': effective_from
})
paid_task_filter.update({
'datetime__gte': effective_from
})
scorelog_query = ScoreLog.objects.filter(**scorelog_filter)
scorelog_count = scorelog_query.count()
paid_task_query = PaidTask.objects.filter(**paid_task_filter)
paid_task_count = paid_task_query.count()
scorelog_query.update(rate=user.rate, review_rate=user.review_rate)
def get_task_rate_for(user, task_type):
return {
PaidTaskTypes.TRANSLATION: user.rate,
PaidTaskTypes.REVIEW: user.review_rate,
PaidTaskTypes.HOURLY_WORK: user.hourly_rate,
PaidTaskTypes.CORRECTION: 1,
}.get(task_type, 0)
for task in paid_task_query:
task.rate = get_task_rate_for(user, task.task_type)
task.save()
user.save()
return JsonResponse({
'scorelog_count': scorelog_count,
'paid_task_count': paid_task_count,
})
return JsonResponseBadRequest({'errors': form.errors})
@ajax_required
@admin_required
def add_paid_task(request):
form = PaidTaskForm(request.POST)
if form.is_valid():
form.save()
obj = form.instance
log('%s\t%s\t%s' % (request.user.username, PAID_TASK_ADDED, obj))
return JsonResponse({'result': obj.id})
return JsonResponseBadRequest({'errors': form.errors})
@ajax_required
@admin_required
def remove_paid_task(request, task_id=None):
if request.method == 'DELETE':
try:
obj = PaidTask.objects.get(id=task_id)
str = '%s\t%s\t%s' % (request.user.username,
PAID_TASK_DELETED, obj)
obj.delete()
log(str)
return JsonResponse({'removed': 1})
except PaidTask.DoesNotExist:
return JsonResponseNotFound({})
return JsonResponseBadRequest({'error': _('Invalid request method')})
def get_scores(user, start, end):
return ScoreLog.objects \
.select_related('submission__translation_project__project',
'submission__translation_project__language',) \
.filter(user=user,
creation_time__gte=start,
creation_time__lte=end)
def get_activity_data(request, user, month):
[start, end] = get_date_interval(month)
json = {}
user_dict = {
'id': user.id,
'username': user.username,
'formatted_name': user.formatted_name,
'currency': user.currency if user.currency else CURRENCIES[0][0],
'rate': user.rate,
'review_rate': user.review_rate,
'hourly_rate': user.hourly_rate,
} if user != '' else user
now = make_aware(datetime.now())
json['meta'] = {
'user': user_dict,
'month': month,
'now': now.strftime('%Y-%m-%d %H:%M:%S'),
'start': start.strftime('%Y-%m-%d'),
'end': end.strftime('%Y-%m-%d'),
'utc_offset': start.strftime("%z"),
'admin_permalink': request.build_absolute_uri(reverse('pootle-reports')),
}
if user != '':
scores = get_scores(user, start, end)
scores = list(scores.order_by(SCORE_TRANSLATION_PROJECT))
json['grouped'] = get_grouped_paid_words(scores, user, month)
scores.sort(key=lambda x: x.creation_time)
json['daily'] = get_daily_activity(user, scores, start, end)
json['summary'] = get_summary(scores, start, end)
tasks = get_paid_tasks(user, start, end)
for task in tasks:
if settings.USE_TZ:
task['datetime'] = timezone.localtime(task['datetime'])
task['datetime'] = task['datetime'].strftime('%Y-%m-%d %H:%M:%S')
json['paid_tasks'] = tasks
return json
@ajax_required
@admin_required
def user_date_prj_activity(request):
username = request.GET.get('username', None)
month = request.GET.get('month', None)
try:
User = get_user_model()
user = User.objects.get(username=username)
except:
user = ''
data = get_activity_data(request, user, month)
return JsonResponse(data)
def get_daily_activity(user, scores, start, end):
result_translated = {
'label': PaidTask.get_task_type_title(
PaidTaskTypes.TRANSLATION),
'data': [],
}
result_reviewed = {
'label': PaidTask.get_task_type_title(
PaidTaskTypes.REVIEW),
'data': [],
}
result = {
'data': [result_translated, result_reviewed],
'max_day_score': 10,
'min_ts': "%d" % (calendar.timegm(start.timetuple()) * 1000),
'max_ts': "%d" % (calendar.timegm(end.timetuple()) * 1000),
'nonempty': False,
}
if settings.POOTLE_REPORTS_MARK_FUNC:
try:
get_mark_data = import_func(settings.POOTLE_REPORTS_MARK_FUNC)
result['data'].append({
'data': [],
'marks': {'show': True},
'markdata': get_mark_data(user, start, end)
})
except ImproperlyConfigured:
pass
saved_date = None
current_day_score = 0
translated_group = {}
reviewed_group = {}
for score in scores:
score_time = make_naive(score.creation_time)
date = score_time.date()
translated, reviewed = score.get_paid_words()
if translated or reviewed:
translated = 0 if translated is None else translated
reviewed = 0 if reviewed is None else reviewed
if saved_date != date:
saved_date = date
reviewed_group[date] = 0
translated_group[date] = 0
if result['max_day_score'] < current_day_score:
result['max_day_score'] = current_day_score
current_day_score = 0
current_day_score += int(reviewed + translated)
result['nonempty'] |= current_day_score > 0
translated_group[date] += translated
reviewed_group[date] += reviewed
if result['max_day_score'] < current_day_score:
result['max_day_score'] = current_day_score
for date, item in sorted(translated_group.items(), key=lambda x: x[0]):
ts = int(calendar.timegm(date.timetuple()) * 1000)
result_translated['data'].append((ts, item))
for date, item in sorted(reviewed_group.items(), key=lambda x: x[0]):
ts = int(calendar.timegm(date.timetuple()) * 1000)
result_reviewed['data'].append((ts, item))
return result
def get_paid_tasks(user, start, end):
result = []
tasks = PaidTask.objects \
.filter(user=user,
datetime__gte=start,
datetime__lte=end) \
.order_by('pk')
for task in tasks:
result.append({
'id': task.id,
'description': task.description,
'amount': task.amount,
'type': task.task_type,
'action': PaidTask.get_task_type_title(task.task_type),
'rate': task.rate,
'datetime': task.datetime,
})
return result
def get_grouped_paid_words(scores, user=None, month=None):
result = []
tp = None
for score in scores:
if tp != score.submission.translation_project:
tp = score.submission.translation_project
row = {
'translation_project': u'%s / %s' %
(tp.project.fullname, tp.language.fullname),
'project_code': tp.project.code,
'score_delta': 0,
'translated': 0,
'reviewed': 0,
}
if user is not None:
editor_filter = {
'state': 'user-submissions',
'user': user.username,
}
if month is not None:
editor_filter['month'] = month
row['tp_translate_url'] = tp.get_translate_url(**editor_filter)
result.append(row)
translated_words, reviewed_words = score.get_paid_words()
if translated_words:
row['translated'] += translated_words
if reviewed_words:
row['reviewed'] += reviewed_words
row['score_delta'] += score.score_delta
return sorted(result, key=lambda x: x['translation_project'])
def get_summary(scores, start, end):
rate = review_rate = None
translation_month = review_month = None
translated_row = reviewed_row = None
translations = []
reviews = []
start = make_naive(start)
end = make_naive(end)
for score in scores:
score_time = make_naive(score.creation_time)
if (score.rate != rate or
translation_month != score_time.month):
rate = score.rate
translation_month = score_time.month
translated_row = {
'type': PaidTaskTypes.TRANSLATION,
'action': PaidTaskTypes.TRANSLATION,
'amount': 0,
'rate': score.rate,
'start': score_time,
'end': score_time,
}
translations.append(translated_row)
if (score.review_rate != review_rate or
review_month != score_time.month):
review_rate = score.review_rate
review_month = score_time.month
reviewed_row = {
'type': PaidTaskTypes.REVIEW,
'action': PaidTaskTypes.REVIEW,
'amount': 0,
'rate': score.review_rate,
'start': score_time,
'end': score_time,
}
reviews.append(reviewed_row)
translated_words, reviewed_words = score.get_paid_words()
if translated_words > 0:
translated_row['end'] = score_time
translated_row['amount'] += translated_words
elif reviewed_words > 0:
reviewed_row['end'] = score_time
reviewed_row['amount'] += reviewed_words
for group in [translations, reviews]:
for i, item in enumerate(group):
if i == 0:
item['start'] = start
else:
item['start'] = get_min_month_datetime(item['start'])
if item['end'].month == end.month and item['end'].year == end.year:
item['end'] = end
else:
item['end'] = get_max_month_datetime(item['end'])
result = filter(lambda x: x['amount'] > 0, translations + reviews)
result = sorted(result, key=lambda x: x['start'])
for item in result:
item['type'] = item['action']
item['action'] = PaidTask.get_task_type_title(item['action'])
for item in result:
item['start'] = item['start'].strftime('%Y-%m-%d')
item['end'] = item['end'].strftime('%Y-%m-%d')
return result
def users(request):
User = get_user_model()
data = list(
User.objects.hide_meta()
.values('id', 'username', 'full_name')
)
return JsonResponse(data)
|
tedder/ansible | refs/heads/devel | lib/ansible/modules/packaging/os/package_facts.py | 21 | #!/usr/bin/python
# (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# most of it copied from AWX's scan_packages module
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: package_facts
short_description: package information as facts
description:
- Return information about installed packages as facts
options:
manager:
description:
- The package manager used by the system so we can query the package information
default: auto
choices: ["auto", "rpm", "apt"]
required: False
version_added: "2.5"
author:
- Matthew Jones (@matburt)
- Brian Coca (@bcoca)
- Adam Miller (@maxamillion)
'''
EXAMPLES = '''
- name: get the rpm package facts
package_facts:
manager: "auto"
- name: show them
debug: var=ansible_facts.packages
'''
RETURN = '''
ansible_facts:
description: facts to add to ansible_facts
returned: always
type: complex
contains:
packages:
description: list of dicts with package information
returned: when operating system level package manager is specified or auto detected manager
type: dict
sample_rpm:
{
"packages": {
"kernel": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.26.2.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.16.1.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.10.2.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.21.1.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
"kernel-tools": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel-tools",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
"kernel-tools-libs": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel-tools-libs",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
}
}
sample_deb:
{
"packages": {
"libbz2-1.0": [
{
"version": "1.0.6-5",
"source": "apt",
"arch": "amd64",
"name": "libbz2-1.0"
}
],
"patch": [
{
"version": "2.7.1-4ubuntu1",
"source": "apt",
"arch": "amd64",
"name": "patch"
}
],
}
}
'''
import sys
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
def rpm_package_list():
try:
import rpm
except ImportError:
module.fail_json(msg='Unable to use the rpm python bindings, please ensure they are installed under the python the module runs under')
trans_set = rpm.TransactionSet()
installed_packages = {}
for package in trans_set.dbMatch():
package_details = dict(name=package[rpm.RPMTAG_NAME],
version=package[rpm.RPMTAG_VERSION],
release=package[rpm.RPMTAG_RELEASE],
epoch=package[rpm.RPMTAG_EPOCH],
arch=package[rpm.RPMTAG_ARCH],
source='rpm')
if package_details['name'] not in installed_packages:
installed_packages[package_details['name']] = [package_details]
else:
installed_packages[package_details['name']].append(package_details)
return installed_packages
def apt_package_list():
try:
import apt
except ImportError:
module.fail_json(msg='Unable to use the apt python bindings, please ensure they are installed under the python the module runs under')
apt_cache = apt.Cache()
installed_packages = {}
apt_installed_packages = [pk for pk in apt_cache.keys() if apt_cache[pk].is_installed]
for package in apt_installed_packages:
ac_pkg = apt_cache[package].installed
package_details = dict(name=package, version=ac_pkg.version, arch=ac_pkg.architecture, source='apt')
if package_details['name'] not in installed_packages:
installed_packages[package_details['name']] = [package_details]
else:
installed_packages[package_details['name']].append(package_details)
return installed_packages
# FIXME: add more listing methods
def main():
global module
module = AnsibleModule(argument_spec=dict(manager=dict()), supports_check_mode=True)
manager = module.params['manager']
packages = {}
results = {}
if manager is None or manager == 'auto':
# detect!
for manager_lib in ('rpm', 'apt'):
try:
dummy = __import__(manager_lib)
manager = manager_lib
break
except ImportError:
pass
# FIXME: add more detection methods
try:
if manager == "rpm":
packages = rpm_package_list()
elif manager == "apt":
packages = apt_package_list()
else:
if manager:
results['msg'] = 'Unsupported package manager: %s' % manager
results['skipped'] = True
else:
module.fail_json(msg='Could not detect supported package manager')
except Exception as e:
from traceback import format_tb
module.fail_json(msg='Failed to retrieve packages: %s' % to_text(e), exception=format_tb(sys.exc_info()[2]))
results['ansible_facts'] = {}
# Set the facts, this will override the facts in ansible_facts that might
# exist from previous runs when using operating system level or distribution
# package managers
results['ansible_facts']['packages'] = packages
module.exit_json(**results)
if __name__ == '__main__':
main()
|
odubno/microblog | refs/heads/master | venv/lib/python2.7/site-packages/coverage/html.py | 159 | """HTML reporting for Coverage."""
import os, re, shutil, sys
import coverage
from coverage.backward import pickle
from coverage.misc import CoverageException, Hasher
from coverage.phystokens import source_token_lines, source_encoding
from coverage.report import Reporter
from coverage.results import Numbers
from coverage.templite import Templite
# Static files are looked for in a list of places.
STATIC_PATH = [
# The place Debian puts system Javascript libraries.
"/usr/share/javascript",
# Our htmlfiles directory.
os.path.join(os.path.dirname(__file__), "htmlfiles"),
]
def data_filename(fname, pkgdir=""):
"""Return the path to a data file of ours.
The file is searched for on `STATIC_PATH`, and the first place it's found,
is returned.
Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
is provided, at that subdirectory.
"""
for static_dir in STATIC_PATH:
static_filename = os.path.join(static_dir, fname)
if os.path.exists(static_filename):
return static_filename
if pkgdir:
static_filename = os.path.join(static_dir, pkgdir, fname)
if os.path.exists(static_filename):
return static_filename
raise CoverageException("Couldn't find static file %r" % fname)
def data(fname):
"""Return the contents of a data file of ours."""
data_file = open(data_filename(fname))
try:
return data_file.read()
finally:
data_file.close()
class HtmlReporter(Reporter):
"""HTML reporting."""
# These files will be copied from the htmlfiles dir to the output dir.
STATIC_FILES = [
("style.css", ""),
("jquery.min.js", "jquery"),
("jquery.hotkeys.js", "jquery-hotkeys"),
("jquery.isonscreen.js", "jquery-isonscreen"),
("jquery.tablesorter.min.js", "jquery-tablesorter"),
("coverage_html.js", ""),
("keybd_closed.png", ""),
("keybd_open.png", ""),
]
def __init__(self, cov, config):
super(HtmlReporter, self).__init__(cov, config)
self.directory = None
self.template_globals = {
'escape': escape,
'title': self.config.html_title,
'__url__': coverage.__url__,
'__version__': coverage.__version__,
}
self.source_tmpl = Templite(
data("pyfile.html"), self.template_globals
)
self.coverage = cov
self.files = []
self.arcs = self.coverage.data.has_arcs()
self.status = HtmlStatus()
self.extra_css = None
self.totals = Numbers()
def report(self, morfs):
"""Generate an HTML report for `morfs`.
`morfs` is a list of modules or filenames.
"""
assert self.config.html_dir, "must give a directory for html reporting"
# Read the status data.
self.status.read(self.config.html_dir)
# Check that this run used the same settings as the last run.
m = Hasher()
m.update(self.config)
these_settings = m.digest()
if self.status.settings_hash() != these_settings:
self.status.reset()
self.status.set_settings_hash(these_settings)
# The user may have extra CSS they want copied.
if self.config.extra_css:
self.extra_css = os.path.basename(self.config.extra_css)
# Process all the files.
self.report_files(self.html_file, morfs, self.config.html_dir)
if not self.files:
raise CoverageException("No data to report.")
# Write the index file.
self.index_file()
self.make_local_static_report_files()
return self.totals.pc_covered
def make_local_static_report_files(self):
"""Make local instances of static files for HTML report."""
# The files we provide must always be copied.
for static, pkgdir in self.STATIC_FILES:
shutil.copyfile(
data_filename(static, pkgdir),
os.path.join(self.directory, static)
)
# The user may have extra CSS they want copied.
if self.extra_css:
shutil.copyfile(
self.config.extra_css,
os.path.join(self.directory, self.extra_css)
)
def write_html(self, fname, html):
"""Write `html` to `fname`, properly encoded."""
fout = open(fname, "wb")
try:
fout.write(html.encode('ascii', 'xmlcharrefreplace'))
finally:
fout.close()
def file_hash(self, source, cu):
"""Compute a hash that changes if the file needs to be re-reported."""
m = Hasher()
m.update(source)
self.coverage.data.add_to_hash(cu.filename, m)
return m.digest()
def html_file(self, cu, analysis):
"""Generate an HTML file for one source file."""
source_file = cu.source_file()
try:
source = source_file.read()
finally:
source_file.close()
# Find out if the file on disk is already correct.
flat_rootname = cu.flat_rootname()
this_hash = self.file_hash(source, cu)
that_hash = self.status.file_hash(flat_rootname)
if this_hash == that_hash:
# Nothing has changed to require the file to be reported again.
self.files.append(self.status.index_info(flat_rootname))
return
self.status.set_file_hash(flat_rootname, this_hash)
# If need be, determine the encoding of the source file. We use it
# later to properly write the HTML.
if sys.version_info < (3, 0):
encoding = source_encoding(source)
# Some UTF8 files have the dreaded UTF8 BOM. If so, junk it.
if encoding.startswith("utf-8") and source[:3] == "\xef\xbb\xbf":
source = source[3:]
encoding = "utf-8"
# Get the numbers for this file.
nums = analysis.numbers
if self.arcs:
missing_branch_arcs = analysis.missing_branch_arcs()
# These classes determine which lines are highlighted by default.
c_run = "run hide_run"
c_exc = "exc"
c_mis = "mis"
c_par = "par " + c_run
lines = []
for lineno, line in enumerate(source_token_lines(source)):
lineno += 1 # 1-based line numbers.
# Figure out how to mark this line.
line_class = []
annotate_html = ""
annotate_title = ""
if lineno in analysis.statements:
line_class.append("stm")
if lineno in analysis.excluded:
line_class.append(c_exc)
elif lineno in analysis.missing:
line_class.append(c_mis)
elif self.arcs and lineno in missing_branch_arcs:
line_class.append(c_par)
annlines = []
for b in missing_branch_arcs[lineno]:
if b < 0:
annlines.append("exit")
else:
annlines.append(str(b))
annotate_html = " ".join(annlines)
if len(annlines) > 1:
annotate_title = "no jumps to these line numbers"
elif len(annlines) == 1:
annotate_title = "no jump to this line number"
elif lineno in analysis.statements:
line_class.append(c_run)
# Build the HTML for the line
html = []
for tok_type, tok_text in line:
if tok_type == "ws":
html.append(escape(tok_text))
else:
tok_html = escape(tok_text) or ' '
html.append(
"<span class='%s'>%s</span>" % (tok_type, tok_html)
)
lines.append({
'html': ''.join(html),
'number': lineno,
'class': ' '.join(line_class) or "pln",
'annotate': annotate_html,
'annotate_title': annotate_title,
})
# Write the HTML page for this file.
html = spaceless(self.source_tmpl.render({
'c_exc': c_exc, 'c_mis': c_mis, 'c_par': c_par, 'c_run': c_run,
'arcs': self.arcs, 'extra_css': self.extra_css,
'cu': cu, 'nums': nums, 'lines': lines,
}))
if sys.version_info < (3, 0):
html = html.decode(encoding)
html_filename = flat_rootname + ".html"
html_path = os.path.join(self.directory, html_filename)
self.write_html(html_path, html)
# Save this file's information for the index file.
index_info = {
'nums': nums,
'html_filename': html_filename,
'name': cu.name,
}
self.files.append(index_info)
self.status.set_index_info(flat_rootname, index_info)
def index_file(self):
"""Write the index.html file for this report."""
index_tmpl = Templite(
data("index.html"), self.template_globals
)
self.totals = sum([f['nums'] for f in self.files])
html = index_tmpl.render({
'arcs': self.arcs,
'extra_css': self.extra_css,
'files': self.files,
'totals': self.totals,
})
if sys.version_info < (3, 0):
html = html.decode("utf-8")
self.write_html(
os.path.join(self.directory, "index.html"),
html
)
# Write the latest hashes for next time.
self.status.write(self.directory)
class HtmlStatus(object):
"""The status information we keep to support incremental reporting."""
STATUS_FILE = "status.dat"
STATUS_FORMAT = 1
def __init__(self):
self.reset()
def reset(self):
"""Initialize to empty."""
self.settings = ''
self.files = {}
def read(self, directory):
"""Read the last status in `directory`."""
usable = False
try:
status_file = os.path.join(directory, self.STATUS_FILE)
fstatus = open(status_file, "rb")
try:
status = pickle.load(fstatus)
finally:
fstatus.close()
except (IOError, ValueError):
usable = False
else:
usable = True
if status['format'] != self.STATUS_FORMAT:
usable = False
elif status['version'] != coverage.__version__:
usable = False
if usable:
self.files = status['files']
self.settings = status['settings']
else:
self.reset()
def write(self, directory):
"""Write the current status to `directory`."""
status_file = os.path.join(directory, self.STATUS_FILE)
status = {
'format': self.STATUS_FORMAT,
'version': coverage.__version__,
'settings': self.settings,
'files': self.files,
}
fout = open(status_file, "wb")
try:
pickle.dump(status, fout)
finally:
fout.close()
def settings_hash(self):
"""Get the hash of the coverage.py settings."""
return self.settings
def set_settings_hash(self, settings):
"""Set the hash of the coverage.py settings."""
self.settings = settings
def file_hash(self, fname):
"""Get the hash of `fname`'s contents."""
return self.files.get(fname, {}).get('hash', '')
def set_file_hash(self, fname, val):
"""Set the hash of `fname`'s contents."""
self.files.setdefault(fname, {})['hash'] = val
def index_info(self, fname):
"""Get the information for index.html for `fname`."""
return self.files.get(fname, {}).get('index', {})
def set_index_info(self, fname, info):
"""Set the information for index.html for `fname`."""
self.files.setdefault(fname, {})['index'] = info
# Helpers for templates and generating HTML
def escape(t):
"""HTML-escape the text in `t`."""
return (t
# Convert HTML special chars into HTML entities.
.replace("&", "&").replace("<", "<").replace(">", ">")
.replace("'", "'").replace('"', """)
# Convert runs of spaces: "......" -> " . . ."
.replace(" ", " ")
# To deal with odd-length runs, convert the final pair of spaces
# so that "....." -> " . ."
.replace(" ", " ")
)
def spaceless(html):
"""Squeeze out some annoying extra space from an HTML string.
Nicely-formatted templates mean lots of extra space in the result.
Get rid of some.
"""
html = re.sub(r">\s+<p ", ">\n<p ", html)
return html
|
ProjectSWGCore/NGECore2 | refs/heads/master | scripts/object/static/worldbuilding/battle_droid_static.py | 85615 | import sys
def setup(core, object):
return |
jamesblunt/scrapy | refs/heads/master | scrapy/utils/display.py | 187 | """
pprint and pformat wrappers with colorization support
"""
from __future__ import print_function
import sys
from pprint import pformat as pformat_
def _colorize(text, colorize=True):
if not colorize or not sys.stdout.isatty():
return text
try:
from pygments import highlight
from pygments.formatters import TerminalFormatter
from pygments.lexers import PythonLexer
return highlight(text, PythonLexer(), TerminalFormatter())
except ImportError:
return text
def pformat(obj, *args, **kwargs):
return _colorize(pformat_(obj), kwargs.pop('colorize', True))
def pprint(obj, *args, **kwargs):
print(pformat(obj, *args, **kwargs))
|
blissland/blissflixx | refs/heads/master | lib/chanutils/torrent/__init__.py | 1 | import re, base64, subprocess, chanutils, playitem, urlparse
from torrentparse import TorrentParser
hash_re = re.compile("xt=urn:btih:([A-Za-z0-9]+)")
base32_re = re.compile("[A-Z2-7]{32}")
valid_re = re.compile("[A-F0-9]{40}")
#torr_sites = ("torcache.net", "torrage.com", "zoink.it")
torr_sites = ("torcache.net", "zoink.it")
def torrent_from_hash(hashid):
path = "/torrent/" + hashid + ".torrent"
for site in torr_sites:
try:
r = chanutils.get("http://" + site + path)
return r.content
except Exception:
pass
return None
def magnet2torrent(link):
matches = hash_re.search(link)
if not matches or len(matches.groups()) != 1:
raise Exception("Unable to find magnet hash")
hashid = matches.group(1).upper()
#If hash is base32, convert it to base16
if len(hashid) == 32 and base32_re.search(hashid):
s = base64.b32decode(hashid)
hashid = base64.b16encode(s)
elif not (len(hashid) == 40 and valid_re.search(hashid)):
raise Exception("Invalid magnet hash")
return torrent_from_hash(hashid)
def peerflix_metadata(link):
# stdin=PIPE so peerflix does not enter interactive mode
s = subprocess.check_output(["peerflix", link, "-l"], stdin=subprocess.PIPE)
lines = s.split('\n')
files = []
for l in lines:
delim = l.rfind(':')
if delim == -1:
break
if "Verifying downloaded:" in l:
continue
files.append((l[20:delim-6], l[delim+7:-5]))
return files
def torrent_files(link):
return peerflix_metadata(torrent2magnet(link))
def showmore(link):
files = torrent_files(link)
if not files:
raise Exception("Unable to retrieve torrent files")
results = playitem.PlayItemList()
idx = 0
for f in files:
subtitle = ''
if isinstance(f[1], basestring):
subtitle = 'Size: ' + f[1]
else:
subtitle = 'Size: ' + chanutils.byte_size(f[1])
url = set_torridx(link, idx)
img = '/img/icons/file-o.svg'
idx = idx + 1
item = playitem.PlayItem(f[0], img, url, subtitle)
results.add(item)
return results
TRACKERS = ("udp://open.demonii.com:1337/announce", "udp://tracker.istole.it:6969/announce", "udp://www.eddie4.nl:6969/announce", "udp://coppersurfer.tk:6969/announce", "udp://tracker.btzoo.eu:80/announce", "http://explodie.org:6969/announce", "udp://9.rarbg.me:2710/announce")
HASH_RE = re.compile("[A-F0-9]{40}")
def torrent2magnet(torrent):
if torrent.startswith("magnet"):
return torrent
matches = HASH_RE.search(torrent.upper())
if not matches:
return torrent
magnet = "magnet:?xt=urn:btih:" + matches.group(0) + "&tr="
return magnet + "&tr=".join(TRACKERS)
def showmore_action(url, title):
return playitem.ShowmoreAction('View Files', url, title)
def subtitle(size, seeds, peers):
subtitle = 'Size: ' + unicode(size)
subtitle = subtitle + ', Seeds: ' + unicode(seeds)
subtitle = subtitle + ', Peers: ' + unicode(peers)
return subtitle
def is_torrent(url):
obj = urlparse.urlparse(url)
if obj.path.endswith(".torrent") or url.startswith('magnet:'):
return True
else:
return False
def torrent_idx(url):
obj = urlparse.urlparse(url)
idx = None
if obj.query:
params = urlparse.parse_qs(obj.query)
if 'bf_torr_idx' in params:
idx = params['bf_torr_idx'][0]
if idx is not None:
idx = int(idx)
return idx
def set_torridx(url, idx=-1):
if is_torrent_url(url):
return re.sub('bf_torr_idx\=-?\d+', 'bf_torr_idx=' + str(idx), url)
else:
if url.find('?') > -1:
url = url + '&'
else:
url = url + '?'
return url + "bf_torr_idx=" + str(idx)
def is_torrent_url(url):
return "bf_torr_idx=" in url
def is_main(url):
return torrent_idx(url) == -1
|
rafalkolasinski/py-filters | refs/heads/master | src/Colours.py | 1 | class Palette:
PRIMARY = "#212121"
SECONDARY = "#EAEAEA"
GRAY = "#777777"
ACCENTPRIMARY = "#00C3A9"
ACCENTSECONDARY = "008798"
|
FRidh/scipy | refs/heads/master | scipy/io/tests/test_netcdf.py | 57 | ''' Tests for netcdf '''
from __future__ import division, print_function, absolute_import
import os
from os.path import join as pjoin, dirname
import shutil
import tempfile
import warnings
from io import BytesIO
from glob import glob
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_, assert_allclose, assert_raises, assert_equal
from scipy.io.netcdf import netcdf_file
from scipy._lib._tmpdirs import in_tempdir
TEST_DATA_PATH = pjoin(dirname(__file__), 'data')
N_EG_ELS = 11 # number of elements for example variable
VARTYPE_EG = 'b' # var type for example variable
@contextmanager
def make_simple(*args, **kwargs):
f = netcdf_file(*args, **kwargs)
f.history = 'Created for a test'
f.createDimension('time', N_EG_ELS)
time = f.createVariable('time', VARTYPE_EG, ('time',))
time[:] = np.arange(N_EG_ELS)
time.units = 'days since 2008-01-01'
f.flush()
yield f
f.close()
def check_simple(ncfileobj):
'''Example fileobj tests '''
assert_equal(ncfileobj.history, b'Created for a test')
time = ncfileobj.variables['time']
assert_equal(time.units, b'days since 2008-01-01')
assert_equal(time.shape, (N_EG_ELS,))
assert_equal(time[-1], N_EG_ELS-1)
def test_read_write_files():
# test round trip for example file
cwd = os.getcwd()
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
with make_simple('simple.nc', 'w') as f:
pass
# read the file we just created in 'a' mode
with netcdf_file('simple.nc', 'a') as f:
check_simple(f)
# add something
f._attributes['appendRan'] = 1
# To read the NetCDF file we just created::
with netcdf_file('simple.nc') as f:
# Using mmap is the default
assert_(f.use_mmap)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Read it in append (and check mmap is off)
with netcdf_file('simple.nc', 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Now without mmap
with netcdf_file('simple.nc', mmap=False) as f:
# Using mmap is the default
assert_(not f.use_mmap)
check_simple(f)
# To read the NetCDF file we just created, as file object, no
# mmap. When n * n_bytes(var_type) is not divisible by 4, this
# raised an error in pupynere 1.0.12 and scipy rev 5893, because
# calculated vsize was rounding up in units of 4 - see
# http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj) as f:
# by default, don't use mmap for file-like
assert_(not f.use_mmap)
check_simple(f)
# Read file from fileobj, with mmap
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj, mmap=True) as f:
assert_(f.use_mmap)
check_simple(f)
# Again read it in append mode (adding another att)
with open('simple.nc', 'r+b') as fobj:
with netcdf_file(fobj, 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
f.createDimension('app_dim', 1)
var = f.createVariable('app_var', 'i', ('app_dim',))
var[:] = 42
# And... check that app_var made it in...
with netcdf_file('simple.nc') as f:
check_simple(f)
assert_equal(f.variables['app_var'][:], 42)
except:
os.chdir(cwd)
shutil.rmtree(tmpdir)
raise
os.chdir(cwd)
shutil.rmtree(tmpdir)
def test_read_write_sio():
eg_sio1 = BytesIO()
with make_simple(eg_sio1, 'w') as f1:
str_val = eg_sio1.getvalue()
eg_sio2 = BytesIO(str_val)
with netcdf_file(eg_sio2) as f2:
check_simple(f2)
# Test that error is raised if attempting mmap for sio
eg_sio3 = BytesIO(str_val)
assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True)
# Test 64-bit offset write / read
eg_sio_64 = BytesIO()
with make_simple(eg_sio_64, 'w', version=2) as f_64:
str_val = eg_sio_64.getvalue()
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
# also when version 2 explicitly specified
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64, version=2) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
def test_read_example_data():
# read any example data files
for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):
with netcdf_file(fname, 'r') as f:
pass
with netcdf_file(fname, 'r', mmap=False) as f:
pass
def test_itemset_no_segfault_on_readonly():
# Regression test for ticket #1202.
# Open the test file in read-only mode.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
with netcdf_file(filename, 'r') as f:
time_var = f.variables['time']
# time_var.assignValue(42) should raise a RuntimeError--not seg. fault!
assert_raises(RuntimeError, time_var.assignValue, 42)
def test_write_invalid_dtype():
dtypes = ['int64', 'uint64']
if np.dtype('int').itemsize == 8: # 64-bit machines
dtypes.append('int')
if np.dtype('uint').itemsize == 8: # 64-bit machines
dtypes.append('uint')
with netcdf_file(BytesIO(), 'w') as f:
f.createDimension('time', N_EG_ELS)
for dt in dtypes:
assert_raises(ValueError, f.createVariable, 'time', dt, ('time',))
def test_flush_rewind():
stream = BytesIO()
with make_simple(stream, mode='w') as f:
x = f.createDimension('x',4)
v = f.createVariable('v', 'i2', ['x'])
v[:] = 1
f.flush()
len_single = len(stream.getvalue())
f.flush()
len_double = len(stream.getvalue())
assert_(len_single == len_double)
def test_dtype_specifiers():
# Numpy 1.7.0-dev had a bug where 'i2' wouldn't work.
# Specifying np.int16 or similar only works from the same commit as this
# comment was made.
with make_simple(BytesIO(), mode='w') as f:
f.createDimension('x',4)
f.createVariable('v1', 'i2', ['x'])
f.createVariable('v2', np.int16, ['x'])
f.createVariable('v3', np.dtype(np.int16), ['x'])
def test_ticket_1720():
io = BytesIO()
items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
with netcdf_file(io, 'w') as f:
f.history = 'Created for a test'
f.createDimension('float_var', 10)
float_var = f.createVariable('float_var', 'f', ('float_var',))
float_var[:] = items
float_var.units = 'metres'
f.flush()
contents = io.getvalue()
io = BytesIO(contents)
with netcdf_file(io, 'r') as f:
assert_equal(f.history, b'Created for a test')
float_var = f.variables['float_var']
assert_equal(float_var.units, b'metres')
assert_equal(float_var.shape, (10,))
assert_allclose(float_var[:], items)
def test_mmaps_segfault():
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
with warnings.catch_warnings():
warnings.simplefilter("error")
with netcdf_file(filename, mmap=True) as f:
x = f.variables['lat'][:]
# should not raise warnings
del x
def doit():
with netcdf_file(filename, mmap=True) as f:
return f.variables['lat'][:]
# should not crash
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x = doit()
x.sum()
def test_zero_dimensional_var():
io = BytesIO()
with make_simple(io, 'w') as f:
v = f.createVariable('zerodim', 'i2', [])
# This is checking that .isrec returns a boolean - don't simplify it
# to 'assert not ...'
assert v.isrec is False, v.isrec
f.flush()
def test_byte_gatts():
# Check that global "string" atts work like they did before py3k
# unicode and general bytes confusion
with in_tempdir():
filename = 'g_byte_atts.nc'
f = netcdf_file(filename, 'w')
f._attributes['holy'] = b'grail'
f._attributes['witch'] = 'floats'
f.close()
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['holy'], b'grail')
assert_equal(f._attributes['witch'], b'floats')
f.close()
def test_open_append():
# open 'w' put one attr
with in_tempdir():
filename = 'append_dat.nc'
f = netcdf_file(filename, 'w')
f._attributes['Kilroy'] = 'was here'
f.close()
# open again in 'a', read the att and and a new one
f = netcdf_file(filename, 'a')
assert_equal(f._attributes['Kilroy'], b'was here')
f._attributes['naughty'] = b'Zoot'
f.close()
# open yet again in 'r' and check both atts
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['Kilroy'], b'was here')
assert_equal(f._attributes['naughty'], b'Zoot')
f.close()
|
alimony/django | refs/heads/master | django/conf/locale/zh_Hans/formats.py | 130 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
TIME_FORMAT = 'H:i' # 20:45
DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
YEAR_MONTH_FORMAT = 'Y年n月' # 2016年9月
MONTH_DAY_FORMAT = 'm月j日' # 9月5日
SHORT_DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
SHORT_DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
FIRST_DAY_OF_WEEK = 1 # 星期一 (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y/%m/%d', # '2016/09/05'
'%Y-%m-%d', # '2016-09-05'
'%Y年%n月%j日', # '2016年9月5日'
]
TIME_INPUT_FORMATS = [
'%H:%M', # '20:45'
'%H:%M:%S', # '20:45:29'
'%H:%M:%S.%f', # '20:45:29.000200'
]
DATETIME_INPUT_FORMATS = [
'%Y/%m/%d %H:%M', # '2016/09/05 20:45'
'%Y-%m-%d %H:%M', # '2016-09-05 20:45'
'%Y年%n月%j日 %H:%M', # '2016年9月5日 14:45'
'%Y/%m/%d %H:%M:%S', # '2016/09/05 20:45:29'
'%Y-%m-%d %H:%M:%S', # '2016-09-05 20:45:29'
'%Y年%n月%j日 %H:%M:%S', # '2016年9月5日 20:45:29'
'%Y/%m/%d %H:%M:%S.%f', # '2016/09/05 20:45:29.000200'
'%Y-%m-%d %H:%M:%S.%f', # '2016-09-05 20:45:29.000200'
'%Y年%n月%j日 %H:%n:%S.%f', # '2016年9月5日 20:45:29.000200'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ''
NUMBER_GROUPING = 4
|
RossBrunton/django | refs/heads/master | tests/modeladmin/models.py | 147 | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Band(models.Model):
name = models.CharField(max_length=100)
bio = models.TextField()
sign_date = models.DateField()
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class Concert(models.Model):
main_band = models.ForeignKey(Band, related_name='main_concerts')
opening_band = models.ForeignKey(Band, related_name='opening_concerts',
blank=True)
day = models.CharField(max_length=3, choices=((1, 'Fri'), (2, 'Sat')))
transport = models.CharField(max_length=100, choices=(
(1, 'Plane'),
(2, 'Train'),
(3, 'Bus')
), blank=True)
class ValidationTestModel(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
users = models.ManyToManyField(User)
state = models.CharField(max_length=2, choices=(("CO", "Colorado"), ("WA", "Washington")))
is_active = models.BooleanField(default=False)
pub_date = models.DateTimeField()
band = models.ForeignKey(Band)
no = models.IntegerField(verbose_name="Number", blank=True, null=True) # This field is intentionally 2 characters long. See #16080.
def decade_published_in(self):
return self.pub_date.strftime('%Y')[:3] + "0's"
class ValidationTestInlineModel(models.Model):
parent = models.ForeignKey(ValidationTestModel)
|
rogerwang/chromium | refs/heads/node | third_party/closure_linter/closure_linter/common/erroraccumulator.py | 264 | #!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linter error handler class that accumulates an array of errors."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
from closure_linter.common import errorhandler
class ErrorAccumulator(errorhandler.ErrorHandler):
"""Error handler object that accumulates errors in a list."""
def __init__(self):
self._errors = []
def HandleError(self, error):
"""Append the error to the list.
Args:
error: The error object
"""
self._errors.append(error)
def GetErrors(self):
"""Returns the accumulated errors.
Returns:
A sequence of errors.
"""
return self._errors
|
n0fate/volafox | refs/heads/master | volafox/plugins/ps.py | 1 | # -*- coding: utf-8 -*-
import sys
import struct
import time
import os
from tableprint import columnprint
from volafox.vatopa.addrspace import FileAddressSpace
from volafox.vatopa.ia32_pml4 import IA32PML4MemoryPae
# Lion 32bit, SN 32bit, Lion64bit, SN 64bit, Mountain Lion 64bit, Mavericks, El Capitan
DATA_PROC_STRUCTURE = [[476+24+168, '=4xIIIII4xII88xI276xQII20xbbbb52sI164xI', 16, '=IIII', 283, '=IIIIIII255s', 108, '=12xI4x8x64xI12x'],
[476+168, '=4xIIIII4xII64xI276xQII20xbbbb52sI164xI', 16, '=IIII', 283, '=IIIIIII255s', 108, '=12xI4x8x64xI12x'],
[752+24+268, '=8xQQQQI4xII152xQ456xQQQ16xbbbb52sQ264xI', 32, '=QQQQ', 303, '=IQQIQQQ255s', 120, '=24xI4x8x64xI12x'],
[1028, '=8xQQQQI4xII144xQ448xQQQ16xbbbb52sQ264xI', 32, '=QQQQ', 303, '=IQQIQQQ255s', 120, '=24xI4x8x64xI12x'],
[752+24+276, '=8xQQQQI4xII152xQ456xQQQ16xbbbb52sQ272xI', 32, '=QQQQ', 303, '=IQQIQQQ255s', 120, '=24xI4x8x64xI12x'],
[760+24+268, '=8xQQQQI4xII160xQ456xQQQ16xbbbb52sQ264xI', 32, '=QQQQ', 303, '=IQQIQQQ255s', 120, '=24xI4x8x64xI12x'],
[760+24+268+16, '=8xQQQQI4xII160xQ456xQQQ16xbbbb52sQ264x16xI', 32, '=QQQQ', 303, '=IQQIQQQ255s', 120, '=24xI4x8x64xI12x'],
[1052, '=8xQQQQI4xII160xQ456xQQQ16xbbbb17x35sQ264xI', 32, '=QQQQ', 303, '=IQQIQQQ255s', 120, '=24xI4x8x64xI12x'],
[1068, '=8xQQQQI4xII160x16xQ456xQQQ16xbbbb17x35sQ264xI', 32, '=QQQQ', 303, '=IQQIQQQ255s', 120, '=24xI4x8x64xI12x'],
#[1068, '=8xQQQQI4xII80xQ80x16x64x392xQQQ16xbbbb17x35sQ264xI', 32, '=QQQQ', 303, '=IQQIQQQ255s', 120, '=24xI4x8x64xI12x']
]
# Mavericks add new element in proc structure : uint64_t p_puniqueid; /* parent's unique ID - set on fork/spawn/vfork, doesn't change if reparented. */
# Lion 32bit, SN 32bit, Lion64bit, SN 64bit, Mountain Lion 64bit
DATA_TASK_STRUCTURE = [[32+460+4, '=8xIIIIII460xI'],
[36+428+4, '=12xIIIIII428xI'],
[736, '=16xIII4xQQQ672xQ'],
[712, '=24xIII4xQQQ640xQ'],
[744, '=16xIII4xQQQ656x24xQ']]
# http://opensource.apple.com/source/xnu/xnu-xxxx.xx.xx/osfmk/vm/vm_map.h
# Lion 32bit, SN 32bit, Lion64bit, SN 64bit, Mavericks
DATA_VME_STRUCTURE = [[162+12, '=12xIIQQII8x4xIQ16xIII42xIIIIIIIII', 52, '=IIQQ24xI'],
[162, '=12xIIQQIIIQ16xIII42xIIIIIIIII', 40, '=IIQQ12xI'],
[194, '=16xQQQQII16xQQ16xIII42xIIIIIIIII', 80, '=QQQQ40xQ'],
[178, '=16xQQQQIIQQ16xIII42xIIIIIIIII', 56, '=QQQQ16xQ'],
[202, '=16xQQQQII16x4xIQQ16xIII42xIIIIIIIII', 80, '=QQQQ40xQ']]
# http://opensource.apple.com/source/xnu/xnu-xxxx.xx.xx/osfmk/i386/pmap.h
# 11D50, Lion 32bit, SN 32bit, Lion64bit, SN 64bit
DATA_PMAP_STRUCTURE = [[44, '=36xQ'],
[12, '=4xQ'],
[100, '=84xQII'],
[80, '=72xQ'],
[16, '=8xQ'],
[152, '=128xQQQ']]
# 32bit, 64bit
DATA_QUEUE_STRUCTURE = [[8, '=II'],
[16, '=QQ']]
def unsigned8(n):
return n & 0xFFL
class process_manager:
def __init__(self, x86_mem_pae, arch, os_version, build, base_address, nproc):
self.x86_mem_pae = x86_mem_pae
self.arch = arch
self.os_version = os_version
self.build = build
self.base_address = base_address
self.nproc = nproc
def get_proc(self, proc_sym_addr, PROC_STRUCTURE):
proc = []
if not(self.x86_mem_pae.is_valid_address(proc_sym_addr)):
return proc, '', ''
#print '%x'%self.x86_mem_pae.vtop(proc_sym_addr)
proclist = self.x86_mem_pae.read(proc_sym_addr, PROC_STRUCTURE[0])
data = struct.unpack(PROC_STRUCTURE[1], proclist)
if data[0] == 0:
return proc, '', ''
try:
pgrp_t = self.x86_mem_pae.read(data[16], PROC_STRUCTURE[2]) # pgrp structure
m_pgrp = struct.unpack(PROC_STRUCTURE[3], pgrp_t)
except struct.error:
return proc, '', ''
session_t = self.x86_mem_pae.read(m_pgrp[3], PROC_STRUCTURE[4]) # session structure
m_session = struct.unpack(PROC_STRUCTURE[5], session_t)
#print 'u_cred : %x'%self.x86_mem_pae.vtop(data[7])
p_ucred = self.x86_mem_pae.read(data[7], PROC_STRUCTURE[6])
ucred = struct.unpack(PROC_STRUCTURE[7], p_ucred)
proc.append(self.x86_mem_pae.vtop(proc_sym_addr))
proc.append(data[1])
proc.append(data[2])
proc.append(data[3])
proc.append(data[4])
proc.append(data[5])
proc.append(data[6])
proc.append(data[8]) # user_stack
proc.append(data[9]) # vnode of executable
proc.append(data[10]) # offset in executable vnode
proc.append(data[11]) # Process Priority
proc.append(data[12]) # User-Priority based on p_cpu and p_nice
proc.append(data[13]) # Process 'nice' value
proc.append(data[14]) # User-Priority based on p_cpu and p_nice
proc.append(data[15].split('\x00', 1)[0]) # process name
proc.append(str(m_session[7]).strip('\x00')) # username
proc.append(data[17]) # time
proc.append(ucred[0]) # ruid
proc.append(ucred[1]) # rgid
return proc, data[0], data[1]
def get_proc_struct(self):
if self.arch == 32:
if self.os_version == 11:
PROC_STRUCTURE = DATA_PROC_STRUCTURE[0] # Lion 32bit
else:
PROC_STRUCTURE = DATA_PROC_STRUCTURE[1] # Snow Leopard 32bit
else:
if self.os_version == 11:
PROC_STRUCTURE = DATA_PROC_STRUCTURE[2] # Lion 64bit
elif self.os_version == 12:
PROC_STRUCTURE = DATA_PROC_STRUCTURE[4] # Mountain Lion
elif self.os_version == 13:
PROC_STRUCTURE = DATA_PROC_STRUCTURE[5] # Mavericks
elif self.os_version == 14:
PROC_STRUCTURE = DATA_PROC_STRUCTURE[6] # above Yosemite
elif self.os_version == 15 and (self.build == '15A264' or self.build == '15B42'):
PROC_STRUCTURE = DATA_PROC_STRUCTURE[7] # El Capitan
elif self.os_version == 15 and (self.build == '15C50' or self.build == '15D21' or self.build == '15E65' or self.build == '15F34'):
PROC_STRUCTURE = DATA_PROC_STRUCTURE[8] # El Capitan
elif self.os_version == 10:
PROC_STRUCTURE = DATA_PROC_STRUCTURE[3] # Snow Leopard 64bit
else:
PROC_STRUCTURE = []
return PROC_STRUCTURE
def get_kernel_task_addr(self, sym_addr):
if self.arch == 32:
kernproc = self.x86_mem_pae.read(sym_addr+self.base_address, 4) # __DATA.__common _kernproc
proc_sym_addr = struct.unpack('I', kernproc)[0]
else:
kernproc = self.x86_mem_pae.read(sym_addr+self.base_address, 8) # __DATA.__common _kernproc
proc_sym_addr = struct.unpack('Q', kernproc)[0]
return proc_sym_addr
def pass_kernel_task_proc(self, sym_addr):
if self.arch == 32:
kernproc = self.x86_mem_pae.read(sym_addr+4, 4) # __DATA.__common _kernproc
proc_sym_addr = struct.unpack('I', kernproc)[0]
else:
kernproc = self.x86_mem_pae.read(sym_addr+8, 8) # __DATA.__common _kernproc
proc_sym_addr = struct.unpack('Q', kernproc)[0]
return proc_sym_addr
def get_proc_list(self, sym_addr, proc_list, pid):
if not(self.x86_mem_pae.is_valid_address(sym_addr+self.base_address)):
return 1
PROC_STRUCTURE = self.get_proc_struct()
if len(PROC_STRUCTURE) == 0:
return 1
proc_sym_addr = self.get_kernel_task_addr(sym_addr)
proc_addr = []
for count in xrange(self.nproc):
if proc_sym_addr == 0:
break
if not(self.x86_mem_pae.is_valid_address(proc_sym_addr)):
break
proc = []
if proc_sym_addr in proc_addr:
break
proc_addr.append(proc_sym_addr)
proc, next_proc_addr, pid_in_proc = self.get_proc(proc_sym_addr, PROC_STRUCTURE)
proc_sym_addr = next_proc_addr
if pid > 0 and len(proc):
if pid_in_proc == pid and len(proc):
proc_list.append(proc)
return 0
elif pid == -1 and len(proc): # All Process
proc_list.append(proc)
else: # Process Dump or filtering
return 1
return 0
def get_queue(self, ptr):
if self.arch == 32:
QUEUE_STRUCTURE = DATA_QUEUE_STRUCTURE[0]
elif self.arch == 64:
QUEUE_STRUCTURE = DATA_QUEUE_STRUCTURE[1]
else:
return queue
queue_ptr = self.x86_mem_pae.read(ptr+self.base_address, QUEUE_STRUCTURE[0])
queue = struct.unpack(QUEUE_STRUCTURE[1], queue_ptr)
return queue # next, prev
def get_task_queue(self, sym_addr, count, task_list):
queue = self.get_queue(sym_addr)
print '[+] Task Count at Kernel Symbol: %d'%(count-1)
#print 'Queue Next: %.8x, prev: %.8x'%(self.x86_mem_pae.vtop(queue[0]),self.x86_mem_pae.vtop(queue[1]))
#print '[+] Get Task Queue'
task_ptr = queue[0] # next
i = 1
while i < count:
task = [] # temp
if task_ptr == 0:
break
if not(self.x86_mem_pae.is_valid_address(task_ptr)):
break
task_struct = self.get_task("", task_ptr)
task.append(i) # count
task.append(self.x86_mem_pae.vtop(task_ptr)) # physical address
task.append(task_ptr) # virtual address
task.append(task_struct) # task structure
task.append(task_struct[6]) # task.bsd_info physical address
task_list.append(task)
task_ptr = task_struct[4] # task_queue_t
i += 1
return i
def get_task(self, proc, task_ptr):
#print '====== task.h --> osfmk\\kern\\task.h'
if self.arch == 32:
if self.os_version == 11:
TASK_STRUCTURE = DATA_TASK_STRUCTURE[0]
else:
TASK_STRUCTURE = DATA_TASK_STRUCTURE[1]
else:
if self.os_version == 11:
TASK_STRUCTURE = DATA_TASK_STRUCTURE[2]
elif self.os_version >= 12:
TASK_STRUCTURE = DATA_TASK_STRUCTURE[4]
else:
TASK_STRUCTURE = DATA_TASK_STRUCTURE[3]
task_info = self.x86_mem_pae.read(task_ptr, TASK_STRUCTURE[0])
task_struct = struct.unpack(TASK_STRUCTURE[1], task_info)
return task_struct
def get_proc_region(self, task_ptr, user_stack, fflag):
vm_list = []
vm_struct = []
if self.arch == 32:
if self.os_version >= 11: # Lion
VME_STRUCTURE = DATA_VME_STRUCTURE[0]
else:
VME_STRUCTURE = DATA_VME_STRUCTURE[1]
else:
if self.os_version == 11: # Lion
VME_STRUCTURE = DATA_VME_STRUCTURE[2]
elif self.os_version == 12: # Mountain Lion
VME_STRUCTURE = DATA_VME_STRUCTURE[2]
elif self.os_version >= 13: # above Mavericks
VME_STRUCTURE = DATA_VME_STRUCTURE[4]
else:
VME_STRUCTURE = DATA_VME_STRUCTURE[3]
vm_info = self.x86_mem_pae.read(task_ptr, VME_STRUCTURE[0])
vm_struct = struct.unpack(VME_STRUCTURE[1], vm_info)
# if vm_struct[7] == 0: # pmap_t
# return vm_list, vm_struct
# if not(self.x86_mem_pae.is_valid_address(vm_struct[7])):
# return vm_list, vm_struct
### 11.09.28 end n0fate
#print '======= vm_map_t --> osfmk\\vm\\vm_map.h ========'
#print 'prev: %x'%vm_struct[0]
#print 'next: %x'%self.x86_mem_pae.vtop(vm_struct[1])
#print ''
#print '[+] Virtual Memory Map Information'
#print ' [-] Virtual Address Start Point: 0x%x'%vm_struct[2]
#print ' [-] Virtual Address End Point: 0x%x'%vm_struct[3]
#print ' [-] Number of Entries: %d'%vm_struct[4] # number of entries
#print ' [-] Pageable Entries: %x'%vm_struct[5]
#print 'page_shift: %x'%vm_struct[6]
#print 'pmap_t: %x'%self.x86_mem_pae.vtop(vm_struct[7])
#print 'Virtual size: %x\n'%vm_struct[7]
vm_list = []
# process full dump
if fflag == 1:
vm_temp_list = []
vm_temp_list.append(vm_struct[2])
vm_temp_list.append(vm_struct[3])
vm_list.append(vm_temp_list)
return vm_list, vm_struct
#print ''
#print '[+] Generating Process Virtual Memory Maps'
entry_next_ptr = vm_struct[1]
for data in range(0, vm_struct[4]): # number of entries
try:
#print 'next ptr: %x'%self.x86_mem_pae.vtop(entry_next_ptr)
vm_list_ptr = self.x86_mem_pae.read(entry_next_ptr, VME_STRUCTURE[2])
vme_list = struct.unpack(VME_STRUCTURE[3], vm_list_ptr)
except:
break
# *prev, *next, start, end
vm_temp_list = []
vm_temp_list.append(vme_list[2]) # start
vm_temp_list.append(vme_list[3]) # end
# get permission on virtual memory ('rwx')
permission = ''
max_permission = ''
perm_list = []
perm = ((vme_list[4]) >> 7 )& 0x003f
count = 6
while count >= 0:
perm_list.append(perm&1)
perm = perm >> 1
count = count - 1
if (perm_list[0] == 1 ):
permission += 'r' # Protection
else:
permission += '-'
if (perm_list[1] == 1 ):
permission += 'w' # Protection
else:
permission += '-'
if (perm_list[2] == 1 ):
permission += 'x' # Protection
else:
permission += '-'
if (perm_list[3] == 1 ):
max_permission += 'r' # Max Protection
else:
max_permission += '-'
if (perm_list[4] == 1 ):
max_permission += 'w' # Max Protection
else:
max_permission += '-'
if (perm_list[5] == 1 ):
max_permission += 'x' # Max Protection
else:
max_permission += '-'
##########################################
#if vme_list[3] == user_stack:
# print ' [-] Region from 0x%x to 0x%x (%s, max %s;), %s'%(vme_list[2], vme_list[3], permission, max_permission, "<UserStack>")
#else:
# print ' [-] Region from 0x%x to 0x%x (%s, max %s;)'%(vme_list[2], vme_list[3], permission, max_permission)
#print 'next[data]: %x'%self.x86_mem_pae.vtop(vme_list[1])
entry_next_ptr = vme_list[1]
vm_temp_list.append(permission)
vm_temp_list.append(max_permission)
vm_list.append(vm_temp_list)
return vm_list, vm_struct
def get_proc_cr3(self, vm_list, vm_struct):
if self.arch == 32:
if self.build == '11D50': # temporary 12.04.24 n0fate
PMAP_STRUCTURE = DATA_PMAP_STRUCTURE[0]
elif self.os_version >= 11: # Lion xnu-1699, build version 11D50 has some bug (36xQ)
PMAP_STRUCTURE = DATA_PMAP_STRUCTURE[1]
else: # Leopard or Snow Leopard xnu-1456
PMAP_STRUCTURE = DATA_PMAP_STRUCTURE[2]
else:
if self.build == '11D50': # temporary 12.04.24 n0fate
PMAP_STRUCTURE = DATA_PMAP_STRUCTURE[3]
elif self.os_version >= 11: # Lion xnu-1699, build version 11D50 has some bug (36xQ)
PMAP_STRUCTURE = DATA_PMAP_STRUCTURE[4]
else: # Leopard or Snow Leopard xnu-1456
PMAP_STRUCTURE = DATA_PMAP_STRUCTURE[5]
if self.os_version <= 12:
pmap_info = self.x86_mem_pae.read(vm_struct[6], PMAP_STRUCTURE[0])
else:
pmap_info = self.x86_mem_pae.read(vm_struct[7], PMAP_STRUCTURE[0])
pm_cr3 = struct.unpack(PMAP_STRUCTURE[1], pmap_info)[0]
return pm_cr3
def get_proc_dump(self, vm_list, vm_struct, process_name, mempath):
pm_cr3 = self.get_proc_cr3(vm_list, vm_struct)
print '[+] Resetting the Page Mapping Table: 0x%x'%pm_cr3
proc_pae = IA32PML4MemoryPae(FileAddressSpace(mempath), pm_cr3)
print '[+] Process Dump Start'
for vme_info in vm_list:
#print proc_pae.vtop(vme_info[0])
#print vme_info[1]
nop_code = 0x00
pk_nop_code = struct.pack('=B', nop_code)
nop = pk_nop_code*0x1000
file = open('%s-%x-%x'%(process_name, vme_info[0], vme_info[1]), mode="wb")
nop_flag = 0
for i in range(vme_info[0], vme_info[1], 0x1000):
raw_data = 0x00
if not(proc_pae.is_valid_address(i)):
if nop_flag == 1:
raw_data = nop
file.write(raw_data)
continue
raw_data = proc_pae.read(i, 0x1000)
if raw_data is None:
if nop_flag == 1:
raw_data = nop
file.write(raw_data)
continue
file.write(raw_data)
nop_flag = 1
file.close()
size = os.path.getsize('%s-%x-%x'%(process_name, vme_info[0], vme_info[1]))
if size == 0:
os.remove('%s-%x-%x'%(process_name, vme_info[0], vme_info[1]))
else:
print ' [-] [DUMP] Image Name: %s-%x-%x'%(process_name, vme_info[0], vme_info[1])
print '[+] Process Dump End'
return
#################################### PUBLIC FUNCTIONS ####################################
def proc_print(data_list, os_version):
print '[+] Process List'
if os_version >= 11:
headerlist = ["OFFSET(P)", "PID", "PPID", "PRIORITY", "NICE", "PROCESS_NAME", "USERNAME(UID,GID)", "CRED(UID,GID)", "CREATE_TIME (UTC+0)", ""]
else:
headerlist = ["OFFSET(P)", "PID", "PPID", "PRIORITY", "NICE", "PROCESS_NAME", "USERNAME", "CRED(UID,GID)", "CREATE_TIME (UTC+0)", ""]
contentlist = []
for data in data_list:
line = []
line.append("0x%.8X"%data[0]) # offset
line.append('%d'%data[1]) # pid
line.append('%d'%data[4]) # ppid
line.append('%d'%unsigned8(data[10])) # Priority
line.append('%d'%unsigned8(data[12])) # nice
line.append('%s'%data[14]) # Changed by CL to read null formatted strings
if os_version >= 11:
line.append('%s(%d,%d)'%(data[15], data[5], data[6]))
else:
line.append('%s'%(data[15]))
line.append('(%d,%d)'%(data[17], data[18]))
line.append('%s'%time.strftime("%a %b %d %H:%M:%S %Y", time.gmtime(data[16])))
line.append('')
contentlist.append(line)
mszlist = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
columnprint(headerlist, contentlist, mszlist)
def get_proc_list(x86_mem_pae, sym_addr, arch, os_version, build, base_address, nproc):
proclist = []
ProcMan = process_manager(x86_mem_pae, arch, os_version, build, base_address, nproc)
ret = ProcMan.get_proc_list(sym_addr, proclist, -1)
return proclist
def print_proc_list(proc_list, os_version):
proc_print(proc_list, os_version)
def get_proc_dump(x86_mem_pae, sym_addr, arch, os_version, build, pid, base_address, mempath, nproc):
proclist = []
ProcMan = process_manager(x86_mem_pae, arch, os_version, build, base_address, nproc)
ret = ProcMan.get_proc_list(sym_addr, proclist, pid)
if ret == 1:
return 1
dumped_proc = proclist
proc_print(dumped_proc, os_version)
task_struct = ProcMan.get_task(dumped_proc[0], dumped_proc[0][2])
retData = ProcMan.get_proc_region(task_struct[3], dumped_proc[0][5], 0)
vm_list = retData[0]
vm_struct = retData[1]
ProcMan.get_proc_dump(vm_list, vm_struct, str(dumped_proc[0][1])+'-'+dumped_proc[0][14], mempath)
return
def get_task_dump(x86_mem_pae, sym_addr, count, arch, os_version, build, task_id, base_address, mempath, nproc):
ProcMan = process_manager(x86_mem_pae, arch, os_version, build, base_address, nproc)
task_list = []
check_count = ProcMan.get_task_queue(sym_addr, count, task_list) # task queue ptr, task_count, task_list
for task in task_list:
if task[0] == task_id:
task_struct = task
break
if len(task_struct) == 0:
'[+] Could not found TASK ID'
return
PROC_STRUCTURE = ProcMan.get_proc_struct()
if len(PROC_STRUCTURE) == 0:
return
#proc_matched = ProcMan.get_proc(task[4], PROC_STRUCTURE)[0]
#if len(proc_matched) == 0:
# print '[+] task dump failed'
# return
retData = ProcMan.get_proc_region(task_struct[3][3], 0x00, 0) #
vm_list = retData[0]
vm_struct = retData[1]
ProcMan.get_proc_dump(vm_list, vm_struct, str(task_id), mempath)
return
def get_task_list(x86_mem_pae, sym_addr, count, arch, os_version, build, base_address, nproc):
ProcMan = process_manager(x86_mem_pae, arch, os_version, build, base_address, nproc)
task_list = []
check_count = ProcMan.get_task_queue(sym_addr, count, task_list) # task queue ptr, task_count, task_list
return task_list, check_count
def proc_lookup(proc_list, task_list, x86_mem_pae, arch, os_version, build, base_address, nproc):
ProcMan = process_manager(x86_mem_pae, arch, os_version, build, base_address, nproc)
PROC_STRUCTURE = ProcMan.get_proc_struct()
if len(PROC_STRUCTURE) == 0:
print ' [*] Doesn\'t support kernel version'
return
print '[+] Task List Count at Queue: %d'%len(task_list)
print '[+] Process List Count: %d'%len(proc_list)
# task list
unlinked_task = []
valid_task = []
# comment: task = [count, task_ptr(Physical), task_ptr(Virtual), [task structure], task.bsd_info]
for task in task_list:
task_ptr = task[2]
valid_flag = 0
for proc in proc_list:
task_ptr_in_proc = proc[2]
if task_ptr_in_proc == task_ptr:
valid_flag = 1
task.append(proc[1]) # PID
task.append(proc[14]) # process name
task.append(proc[15]) # username
valid_task.append(task)
break
if valid_flag == 0:
proc_matched = ProcMan.get_proc(task[4], PROC_STRUCTURE)[0]
if len(proc_matched) != 0:
task.append(proc_matched[1])
task.append(proc_matched[14])
task.append(proc_matched[15])
unlinked_task.append(task)
else:
task.append('-')
task.append('-')
task.append('-')
unlinked_task.append(task)
return valid_task, unlinked_task
def task_print(data_list):
#print '[+] Process List'
headerlist = ["TASK_CNT", "OFFSET(P)", "REF_CNT", "Active", "Halt", "VM_MAP(V)", "PID", "PROCESS", "USERNAME", ""]
contentlist = []
for data in data_list:
line = ['%d'%data[0]] # count
line.append("0x%.8X"%data[1]) # offset
line.append('%d'%data[3][0]) # Number of references to me
line.append('%d'%data[3][1]) # task has not been terminated
line.append('%d'%data[3][2]) # task is being halted
line.append('0x%.8X'%data[3][3]) # VM_MAP
line.append('%s'%str(data[5])) # PID
line.append('%s'%data[6]) # Process Name
line.append('%s'%data[7]) # User Name
#line.append('%s'%data[8]) # proc.tasks -> Task ptr
#line.append('%s'%data[9]) # task.bsd_info -> proc ptr
#line.append('%s'%time.strftime("%a %b %d %H:%M:%S %Y", time.gmtime(data[14])))
line.append('')
contentlist.append(line)
# use optional max size list here to match default lsof output, otherwise specify
# lsof +c 0 on the command line to print full name of commands
mszlist = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
columnprint(headerlist, contentlist, mszlist)
|
qmarlats/pyquizz | refs/heads/master | env-3/lib/python3.5/site-packages/sphinx/ext/coverage.py | 3 | # -*- coding: utf-8 -*-
"""
sphinx.ext.coverage
~~~~~~~~~~~~~~~~~~~
Check Python modules and C API for coverage. Mostly written by Josip
Dzolonga for the Google Highly Open Participation contest.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import glob
import inspect
from os import path
from six import iteritems
from six.moves import cPickle as pickle
import sphinx
from sphinx.builders import Builder
from sphinx.util.inspect import safe_getattr
# utility
def write_header(f, text, char='-'):
f.write(text + '\n')
f.write(char * len(text) + '\n')
def compile_regex_list(name, exps, warnfunc):
lst = []
for exp in exps:
try:
lst.append(re.compile(exp))
except Exception:
warnfunc('invalid regex %r in %s' % (exp, name))
return lst
class CoverageBuilder(Builder):
name = 'coverage'
def init(self):
self.c_sourcefiles = []
for pattern in self.config.coverage_c_path:
pattern = path.join(self.srcdir, pattern)
self.c_sourcefiles.extend(glob.glob(pattern))
self.c_regexes = []
for (name, exp) in self.config.coverage_c_regexes.items():
try:
self.c_regexes.append((name, re.compile(exp)))
except Exception:
self.warn('invalid regex %r in coverage_c_regexes' % exp)
self.c_ignorexps = {}
for (name, exps) in iteritems(self.config.coverage_ignore_c_items):
self.c_ignorexps[name] = compile_regex_list(
'coverage_ignore_c_items', exps, self.warn)
self.mod_ignorexps = compile_regex_list(
'coverage_ignore_modules', self.config.coverage_ignore_modules,
self.warn)
self.cls_ignorexps = compile_regex_list(
'coverage_ignore_classes', self.config.coverage_ignore_classes,
self.warn)
self.fun_ignorexps = compile_regex_list(
'coverage_ignore_functions', self.config.coverage_ignore_functions,
self.warn)
def get_outdated_docs(self):
return 'coverage overview'
def write(self, *ignored):
self.py_undoc = {}
self.build_py_coverage()
self.write_py_coverage()
self.c_undoc = {}
self.build_c_coverage()
self.write_c_coverage()
def build_c_coverage(self):
# Fetch all the info from the header files
c_objects = self.env.domaindata['c']['objects']
for filename in self.c_sourcefiles:
undoc = set()
f = open(filename, 'r')
try:
for line in f:
for key, regex in self.c_regexes:
match = regex.match(line)
if match:
name = match.groups()[0]
if name not in c_objects:
for exp in self.c_ignorexps.get(key, ()):
if exp.match(name):
break
else:
undoc.add((key, name))
continue
finally:
f.close()
if undoc:
self.c_undoc[filename] = undoc
def write_c_coverage(self):
output_file = path.join(self.outdir, 'c.txt')
op = open(output_file, 'w')
try:
if self.config.coverage_write_headline:
write_header(op, 'Undocumented C API elements', '=')
op.write('\n')
for filename, undoc in iteritems(self.c_undoc):
write_header(op, filename)
for typ, name in sorted(undoc):
op.write(' * %-50s [%9s]\n' % (name, typ))
op.write('\n')
finally:
op.close()
def build_py_coverage(self):
objects = self.env.domaindata['py']['objects']
modules = self.env.domaindata['py']['modules']
skip_undoc = self.config.coverage_skip_undoc_in_source
for mod_name in modules:
ignore = False
for exp in self.mod_ignorexps:
if exp.match(mod_name):
ignore = True
break
if ignore:
continue
try:
mod = __import__(mod_name, fromlist=['foo'])
except ImportError as err:
self.warn('module %s could not be imported: %s' %
(mod_name, err))
self.py_undoc[mod_name] = {'error': err}
continue
funcs = []
classes = {}
for name, obj in inspect.getmembers(mod):
# diverse module attributes are ignored:
if name[0] == '_':
# begins in an underscore
continue
if not hasattr(obj, '__module__'):
# cannot be attributed to a module
continue
if obj.__module__ != mod_name:
# is not defined in this module
continue
full_name = '%s.%s' % (mod_name, name)
if inspect.isfunction(obj):
if full_name not in objects:
for exp in self.fun_ignorexps:
if exp.match(name):
break
else:
if skip_undoc and not obj.__doc__:
continue
funcs.append(name)
elif inspect.isclass(obj):
for exp in self.cls_ignorexps:
if exp.match(name):
break
else:
if full_name not in objects:
if skip_undoc and not obj.__doc__:
continue
# not documented at all
classes[name] = []
continue
attrs = []
for attr_name in dir(obj):
if attr_name not in obj.__dict__:
continue
try:
attr = safe_getattr(obj, attr_name)
except AttributeError:
continue
if not (inspect.ismethod(attr) or
inspect.isfunction(attr)):
continue
if attr_name[0] == '_':
# starts with an underscore, ignore it
continue
if skip_undoc and not attr.__doc__:
# skip methods without docstring if wished
continue
full_attr_name = '%s.%s' % (full_name, attr_name)
if full_attr_name not in objects:
attrs.append(attr_name)
if attrs:
# some attributes are undocumented
classes[name] = attrs
self.py_undoc[mod_name] = {'funcs': funcs, 'classes': classes}
def write_py_coverage(self):
output_file = path.join(self.outdir, 'python.txt')
op = open(output_file, 'w')
failed = []
try:
if self.config.coverage_write_headline:
write_header(op, 'Undocumented Python objects', '=')
keys = sorted(self.py_undoc.keys())
for name in keys:
undoc = self.py_undoc[name]
if 'error' in undoc:
failed.append((name, undoc['error']))
else:
if not undoc['classes'] and not undoc['funcs']:
continue
write_header(op, name)
if undoc['funcs']:
op.write('Functions:\n')
op.writelines(' * %s\n' % x for x in undoc['funcs'])
op.write('\n')
if undoc['classes']:
op.write('Classes:\n')
for name, methods in sorted(
iteritems(undoc['classes'])):
if not methods:
op.write(' * %s\n' % name)
else:
op.write(' * %s -- missing methods:\n\n' % name)
op.writelines(' - %s\n' % x for x in methods)
op.write('\n')
if failed:
write_header(op, 'Modules that failed to import')
op.writelines(' * %s -- %s\n' % x for x in failed)
finally:
op.close()
def finish(self):
# dump the coverage data to a pickle file too
picklepath = path.join(self.outdir, 'undoc.pickle')
dumpfile = open(picklepath, 'wb')
try:
pickle.dump((self.py_undoc, self.c_undoc), dumpfile)
finally:
dumpfile.close()
def setup(app):
app.add_builder(CoverageBuilder)
app.add_config_value('coverage_ignore_modules', [], False)
app.add_config_value('coverage_ignore_functions', [], False)
app.add_config_value('coverage_ignore_classes', [], False)
app.add_config_value('coverage_c_path', [], False)
app.add_config_value('coverage_c_regexes', {}, False)
app.add_config_value('coverage_ignore_c_items', {}, False)
app.add_config_value('coverage_write_headline', True, False)
app.add_config_value('coverage_skip_undoc_in_source', False, False)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
|
Distrotech/bzr | refs/heads/distrotech-bzr | bzrlib/tests/test__chk_map.py | 2 | # Copyright (C) 2009, 2010, 2011 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Tests for _chk_map_*."""
from bzrlib import (
chk_map,
tests,
)
from bzrlib.static_tuple import StaticTuple
stuple = StaticTuple
def load_tests(standard_tests, module, loader):
suite, _ = tests.permute_tests_for_extension(standard_tests, loader,
'bzrlib._chk_map_py', 'bzrlib._chk_map_pyx')
return suite
class TestSearchKeys(tests.TestCase):
module = None # Filled in by test parameterization
def assertSearchKey16(self, expected, key):
self.assertEqual(expected, self.module._search_key_16(key))
def assertSearchKey255(self, expected, key):
actual = self.module._search_key_255(key)
self.assertEqual(expected, actual, 'actual: %r' % (actual,))
def test_simple_16(self):
self.assertSearchKey16('8C736521', stuple('foo',))
self.assertSearchKey16('8C736521\x008C736521', stuple('foo', 'foo'))
self.assertSearchKey16('8C736521\x0076FF8CAA', stuple('foo', 'bar'))
self.assertSearchKey16('ED82CD11', stuple('abcd',))
def test_simple_255(self):
self.assertSearchKey255('\x8cse!', stuple('foo',))
self.assertSearchKey255('\x8cse!\x00\x8cse!', stuple('foo', 'foo'))
self.assertSearchKey255('\x8cse!\x00v\xff\x8c\xaa', stuple('foo', 'bar'))
# The standard mapping for these would include '\n', so it should be
# mapped to '_'
self.assertSearchKey255('\xfdm\x93_\x00P_\x1bL', stuple('<', 'V'))
def test_255_does_not_include_newline(self):
# When mapping via _search_key_255, we should never have the '\n'
# character, but all other 255 values should be present
chars_used = set()
for char_in in range(256):
search_key = self.module._search_key_255(stuple(chr(char_in),))
chars_used.update(search_key)
all_chars = set([chr(x) for x in range(256)])
unused_chars = all_chars.symmetric_difference(chars_used)
self.assertEqual(set('\n'), unused_chars)
class TestDeserialiseLeafNode(tests.TestCase):
module = None
def assertDeserialiseErrors(self, text):
self.assertRaises((ValueError, IndexError),
self.module._deserialise_leaf_node, text, 'not-a-real-sha')
def test_raises_on_non_leaf(self):
self.assertDeserialiseErrors('')
self.assertDeserialiseErrors('short\n')
self.assertDeserialiseErrors('chknotleaf:\n')
self.assertDeserialiseErrors('chkleaf:x\n')
self.assertDeserialiseErrors('chkleaf:\n')
self.assertDeserialiseErrors('chkleaf:\nnotint\n')
self.assertDeserialiseErrors('chkleaf:\n10\n')
self.assertDeserialiseErrors('chkleaf:\n10\n256\n')
self.assertDeserialiseErrors('chkleaf:\n10\n256\n10\n')
def test_deserialise_empty(self):
node = self.module._deserialise_leaf_node(
"chkleaf:\n10\n1\n0\n\n", stuple("sha1:1234",))
self.assertEqual(0, len(node))
self.assertEqual(10, node.maximum_size)
self.assertEqual(("sha1:1234",), node.key())
self.assertIsInstance(node.key(), StaticTuple)
self.assertIs(None, node._search_prefix)
self.assertIs(None, node._common_serialised_prefix)
def test_deserialise_items(self):
node = self.module._deserialise_leaf_node(
"chkleaf:\n0\n1\n2\n\nfoo bar\x001\nbaz\nquux\x001\nblarh\n",
("sha1:1234",))
self.assertEqual(2, len(node))
self.assertEqual([(("foo bar",), "baz"), (("quux",), "blarh")],
sorted(node.iteritems(None)))
def test_deserialise_item_with_null_width_1(self):
node = self.module._deserialise_leaf_node(
"chkleaf:\n0\n1\n2\n\nfoo\x001\nbar\x00baz\nquux\x001\nblarh\n",
("sha1:1234",))
self.assertEqual(2, len(node))
self.assertEqual([(("foo",), "bar\x00baz"), (("quux",), "blarh")],
sorted(node.iteritems(None)))
def test_deserialise_item_with_null_width_2(self):
node = self.module._deserialise_leaf_node(
"chkleaf:\n0\n2\n2\n\nfoo\x001\x001\nbar\x00baz\n"
"quux\x00\x001\nblarh\n",
("sha1:1234",))
self.assertEqual(2, len(node))
self.assertEqual([(("foo", "1"), "bar\x00baz"), (("quux", ""), "blarh")],
sorted(node.iteritems(None)))
def test_iteritems_selected_one_of_two_items(self):
node = self.module._deserialise_leaf_node(
"chkleaf:\n0\n1\n2\n\nfoo bar\x001\nbaz\nquux\x001\nblarh\n",
("sha1:1234",))
self.assertEqual(2, len(node))
self.assertEqual([(("quux",), "blarh")],
sorted(node.iteritems(None, [("quux",), ("qaz",)])))
def test_deserialise_item_with_common_prefix(self):
node = self.module._deserialise_leaf_node(
"chkleaf:\n0\n2\n2\nfoo\x00\n1\x001\nbar\x00baz\n2\x001\nblarh\n",
("sha1:1234",))
self.assertEqual(2, len(node))
self.assertEqual([(("foo", "1"), "bar\x00baz"), (("foo", "2"), "blarh")],
sorted(node.iteritems(None)))
self.assertIs(chk_map._unknown, node._search_prefix)
self.assertEqual('foo\x00', node._common_serialised_prefix)
def test_deserialise_multi_line(self):
node = self.module._deserialise_leaf_node(
"chkleaf:\n0\n2\n2\nfoo\x00\n1\x002\nbar\nbaz\n2\x002\nblarh\n\n",
("sha1:1234",))
self.assertEqual(2, len(node))
self.assertEqual([(("foo", "1"), "bar\nbaz"),
(("foo", "2"), "blarh\n"),
], sorted(node.iteritems(None)))
self.assertIs(chk_map._unknown, node._search_prefix)
self.assertEqual('foo\x00', node._common_serialised_prefix)
def test_key_after_map(self):
node = self.module._deserialise_leaf_node(
"chkleaf:\n10\n1\n0\n\n", ("sha1:1234",))
node.map(None, ("foo bar",), "baz quux")
self.assertEqual(None, node.key())
def test_key_after_unmap(self):
node = self.module._deserialise_leaf_node(
"chkleaf:\n0\n1\n2\n\nfoo bar\x001\nbaz\nquux\x001\nblarh\n",
("sha1:1234",))
node.unmap(None, ("foo bar",))
self.assertEqual(None, node.key())
class TestDeserialiseInternalNode(tests.TestCase):
module = None
def assertDeserialiseErrors(self, text):
self.assertRaises((ValueError, IndexError),
self.module._deserialise_internal_node, text,
stuple('not-a-real-sha',))
def test_raises_on_non_internal(self):
self.assertDeserialiseErrors('')
self.assertDeserialiseErrors('short\n')
self.assertDeserialiseErrors('chknotnode:\n')
self.assertDeserialiseErrors('chknode:x\n')
self.assertDeserialiseErrors('chknode:\n')
self.assertDeserialiseErrors('chknode:\nnotint\n')
self.assertDeserialiseErrors('chknode:\n10\n')
self.assertDeserialiseErrors('chknode:\n10\n256\n')
self.assertDeserialiseErrors('chknode:\n10\n256\n10\n')
# no trailing newline
self.assertDeserialiseErrors('chknode:\n10\n256\n0\n1\nfo')
def test_deserialise_one(self):
node = self.module._deserialise_internal_node(
"chknode:\n10\n1\n1\n\na\x00sha1:abcd\n", stuple('sha1:1234',))
self.assertIsInstance(node, chk_map.InternalNode)
self.assertEqual(1, len(node))
self.assertEqual(10, node.maximum_size)
self.assertEqual(("sha1:1234",), node.key())
self.assertEqual('', node._search_prefix)
self.assertEqual({'a': ('sha1:abcd',)}, node._items)
def test_deserialise_with_prefix(self):
node = self.module._deserialise_internal_node(
"chknode:\n10\n1\n1\npref\na\x00sha1:abcd\n", stuple('sha1:1234',))
self.assertIsInstance(node, chk_map.InternalNode)
self.assertEqual(1, len(node))
self.assertEqual(10, node.maximum_size)
self.assertEqual(("sha1:1234",), node.key())
self.assertEqual('pref', node._search_prefix)
self.assertEqual({'prefa': ('sha1:abcd',)}, node._items)
node = self.module._deserialise_internal_node(
"chknode:\n10\n1\n1\npref\n\x00sha1:abcd\n", stuple('sha1:1234',))
self.assertIsInstance(node, chk_map.InternalNode)
self.assertEqual(1, len(node))
self.assertEqual(10, node.maximum_size)
self.assertEqual(("sha1:1234",), node.key())
self.assertEqual('pref', node._search_prefix)
self.assertEqual({'pref': ('sha1:abcd',)}, node._items)
def test_deserialise_pref_with_null(self):
node = self.module._deserialise_internal_node(
"chknode:\n10\n1\n1\npref\x00fo\n\x00sha1:abcd\n",
stuple('sha1:1234',))
self.assertIsInstance(node, chk_map.InternalNode)
self.assertEqual(1, len(node))
self.assertEqual(10, node.maximum_size)
self.assertEqual(("sha1:1234",), node.key())
self.assertEqual('pref\x00fo', node._search_prefix)
self.assertEqual({'pref\x00fo': ('sha1:abcd',)}, node._items)
def test_deserialise_with_null_pref(self):
node = self.module._deserialise_internal_node(
"chknode:\n10\n1\n1\npref\x00fo\n\x00\x00sha1:abcd\n",
stuple('sha1:1234',))
self.assertIsInstance(node, chk_map.InternalNode)
self.assertEqual(1, len(node))
self.assertEqual(10, node.maximum_size)
self.assertEqual(("sha1:1234",), node.key())
self.assertEqual('pref\x00fo', node._search_prefix)
self.assertEqual({'pref\x00fo\x00': ('sha1:abcd',)}, node._items)
class Test_BytesToTextKey(tests.TestCase):
def assertBytesToTextKey(self, key, bytes):
self.assertEqual(key,
self.module._bytes_to_text_key(bytes))
def assertBytesToTextKeyRaises(self, bytes):
# These are invalid bytes, and we want to make sure the code under test
# raises an exception rather than segfaults, etc. We don't particularly
# care what exception.
self.assertRaises(Exception, self.module._bytes_to_text_key, bytes)
def test_file(self):
self.assertBytesToTextKey(('file-id', 'revision-id'),
'file: file-id\nparent-id\nname\nrevision-id\n'
'da39a3ee5e6b4b0d3255bfef95601890afd80709\n100\nN')
def test_invalid_no_kind(self):
self.assertBytesToTextKeyRaises(
'file file-id\nparent-id\nname\nrevision-id\n'
'da39a3ee5e6b4b0d3255bfef95601890afd80709\n100\nN')
def test_invalid_no_space(self):
self.assertBytesToTextKeyRaises(
'file:file-id\nparent-id\nname\nrevision-id\n'
'da39a3ee5e6b4b0d3255bfef95601890afd80709\n100\nN')
def test_invalid_too_short_file_id(self):
self.assertBytesToTextKeyRaises('file:file-id')
def test_invalid_too_short_parent_id(self):
self.assertBytesToTextKeyRaises('file:file-id\nparent-id')
def test_invalid_too_short_name(self):
self.assertBytesToTextKeyRaises('file:file-id\nparent-id\nname')
def test_dir(self):
self.assertBytesToTextKey(('dir-id', 'revision-id'),
'dir: dir-id\nparent-id\nname\nrevision-id')
|
richard-willowit/odoo | refs/heads/master | addons/website_event/controllers/__init__.py | 738 | # -*- coding: utf-8 -*-
from . import main
|
petewarden/tensorflow | refs/heads/master | tensorflow/python/keras/layers/preprocessing/benchmarks/bucketized_column_dense_benchmark.py | 6 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of bucketized columns with dense inputs."""
import numpy as np
from tensorflow.python import keras
from tensorflow.python.compat import v2_compat
from tensorflow.python.eager.def_function import function as tf_function
from tensorflow.python.feature_column import feature_column_v2 as fcv2
from tensorflow.python.framework import dtypes as dt
from tensorflow.python.keras.layers.preprocessing import discretization
from tensorflow.python.keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
from tensorflow.python.platform import test as tf_test
# This is required as of 3/2021 because otherwise we drop into graph mode.
v2_compat.enable_v2_behavior()
NUM_REPEATS = 10 # The number of times to run each benchmark.
BATCH_SIZES = [32, 256]
### KPL AND FC IMPLEMENTATION BENCHMARKS ###
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
max_value = 25.0
bins = np.arange(1.0, max_value)
data = fc_bm.create_data(
max_length, batch_size * NUM_REPEATS, 100000, dtype=float)
# Keras implementation
model = keras.Sequential()
model.add(keras.Input(shape=(max_length,), name="data", dtype=dt.float32))
model.add(discretization.Discretization(bins))
# FC implementation
fc = fcv2.bucketized_column(
fcv2.numeric_column("data"), boundaries=list(bins))
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(fcv2.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {"data": data.to_tensor(default_value=0.0)}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {"data": data.to_tensor(default_value=0.0)}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "bucketized|dense|batch_%s" % batch
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf_test.main()
|
legalsylvain/OpenUpgrade | refs/heads/master | addons/mail/tests/common.py | 40 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestMail(common.TransactionCase):
def _mock_smtp_gateway(self, *args, **kwargs):
return args[2]['Message-Id']
def _init_mock_build_email(self):
self._build_email_args_list = []
self._build_email_kwargs_list = []
def _mock_build_email(self, *args, **kwargs):
""" Mock build_email to be able to test its values. Store them into
some internal variable for latter processing. """
self._build_email_args_list.append(args)
self._build_email_kwargs_list.append(kwargs)
return self._build_email(*args, **kwargs)
def setUp(self):
super(TestMail, self).setUp()
cr, uid = self.cr, self.uid
# Install mock SMTP gateway
self._init_mock_build_email()
self._build_email = self.registry('ir.mail_server').build_email
self.registry('ir.mail_server').build_email = self._mock_build_email
self._send_email = self.registry('ir.mail_server').send_email
self.registry('ir.mail_server').send_email = self._mock_smtp_gateway
# Usefull models
self.ir_model = self.registry('ir.model')
self.ir_model_data = self.registry('ir.model.data')
self.ir_attachment = self.registry('ir.attachment')
self.mail_alias = self.registry('mail.alias')
self.mail_thread = self.registry('mail.thread')
self.mail_group = self.registry('mail.group')
self.mail_mail = self.registry('mail.mail')
self.mail_message = self.registry('mail.message')
self.mail_notification = self.registry('mail.notification')
self.mail_followers = self.registry('mail.followers')
self.mail_message_subtype = self.registry('mail.message.subtype')
self.res_users = self.registry('res.users')
self.res_partner = self.registry('res.partner')
# Find Employee group
group_employee_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_user')
self.group_employee_id = group_employee_ref and group_employee_ref[1] or False
# Partner Data
# User Data: employee, noone
self.user_employee_id = self.res_users.create(cr, uid, {
'name': 'Ernest Employee',
'login': 'ernest',
'alias_name': 'ernest',
'email': 'e.e@example.com',
'signature': '--\nErnest',
'notify_email': 'always',
'groups_id': [(6, 0, [self.group_employee_id])]
}, {'no_reset_password': True})
self.user_noone_id = self.res_users.create(cr, uid, {
'name': 'Noemie NoOne',
'login': 'noemie',
'alias_name': 'noemie',
'email': 'n.n@example.com',
'signature': '--\nNoemie',
'notify_email': 'always',
'groups_id': [(6, 0, [])]
}, {'no_reset_password': True})
# Test users to use through the various tests
self.res_users.write(cr, uid, uid, {'name': 'Administrator'})
self.user_raoul_id = self.res_users.create(cr, uid, {
'name': 'Raoul Grosbedon',
'signature': 'SignRaoul',
'email': 'raoul@raoul.fr',
'login': 'raoul',
'alias_name': 'raoul',
'groups_id': [(6, 0, [self.group_employee_id])]
})
self.user_bert_id = self.res_users.create(cr, uid, {
'name': 'Bert Tartignole',
'signature': 'SignBert',
'email': 'bert@bert.fr',
'login': 'bert',
'alias_name': 'bert',
'groups_id': [(6, 0, [])]
})
self.user_raoul = self.res_users.browse(cr, uid, self.user_raoul_id)
self.user_bert = self.res_users.browse(cr, uid, self.user_bert_id)
self.user_admin = self.res_users.browse(cr, uid, uid)
self.partner_admin_id = self.user_admin.partner_id.id
self.partner_raoul_id = self.user_raoul.partner_id.id
self.partner_bert_id = self.user_bert.partner_id.id
# Test 'pigs' group to use through the various tests
self.group_pigs_id = self.mail_group.create(
cr, uid,
{'name': 'Pigs', 'description': 'Fans of Pigs, unite !', 'alias_name': 'group+pigs'},
{'mail_create_nolog': True}
)
self.group_pigs = self.mail_group.browse(cr, uid, self.group_pigs_id)
# Test mail.group: public to provide access to everyone
self.group_jobs_id = self.mail_group.create(cr, uid, {'name': 'Jobs', 'public': 'public'})
# Test mail.group: private to restrict access
self.group_priv_id = self.mail_group.create(cr, uid, {'name': 'Private', 'public': 'private'})
def tearDown(self):
# Remove mocks
self.registry('ir.mail_server').build_email = self._build_email
self.registry('ir.mail_server').send_email = self._send_email
super(TestMail, self).tearDown()
|
tokyo-jesus/oosmos | refs/heads/master | Examples/Sync/Linux/bld.py | 1 | #!/usr/bin/env python
oosmos_dir = '../../..'
import sys
sys.path.append(oosmos_dir)
import oosmos
tests_dir = oosmos_dir+'/Classes/Tests'
classes_dir = oosmos_dir+'/Classes'
prt_c = classes_dir+'/prt.c'
oosmos_c = oosmos_dir+'/Source/oosmos.c'
synctest_c = oosmos_dir+'/Classes/Tests/synctest.c'
oosmos.cLinux.Compile(oosmos_dir, 'main', ['main.c',synctest_c,prt_c,oosmos_c])
|
memsharded/conan | refs/heads/develop | conans/test/unittests/util/local_db_test.py | 1 | import os
import unittest
from conans.client.store.localdb import LocalDB
from conans.test.utils.test_files import temp_folder
class LocalStoreTest(unittest.TestCase):
def localdb_test(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
localdb = LocalDB.create(db_file)
# Test write and read login
user, token = localdb.get_login("myurl1")
self.assertIsNone(user)
self.assertIsNone(token)
localdb.set_login(("pepe", "token"), "myurl1")
user, token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual("token", token)
self.assertEqual("pepe", localdb.get_username("myurl1"))
|
wa1tnr/ainsuSPI | refs/heads/master | 0-Distribution.d/circuitpython-master/tests/io/bytesio_cow.py | 40 | # Make sure that write operations on io.BytesIO don't
# change original object it was constructed from.
try:
import uio as io
except ImportError:
import io
b = b"foobar"
a = io.BytesIO(b)
a.write(b"1")
print(b)
print(a.getvalue())
b = bytearray(b"foobar")
a = io.BytesIO(b)
a.write(b"1")
print(b)
print(a.getvalue())
|
rslayer/bhulan | refs/heads/master | constants.py | 1 | LAT_KEY = "lat"
LON_KEY = "lon"
PATENT_KEY = "patent"
TIMESTAMP_KEY = "timestamp"
TEMPERATURE_KEY = "temperature"
DRIVER_KEY = "driver"
COMMUNE_KEY = "commune"
CAPACITY_KEY = "capacity"
M_LAT_KEY = "mLat"
M_LON_KEY = "mLon"
VELOCITY_KEY = "velocity"
TRUCK_ID_KEY = "truckId"
TIME_KEY = "time"
TIME_START_KEY = "timeStart"
TIME_STOP_KEY = "timeStop"
RADIUS_KEY = "radius"
START_STOP_KEY = "startStop"
DATE_NUM_KEY = "dateNum"
CONSTRAINT_KEY = "constraint"
DATE_KEY = "date"
TRUCK_POINTS_KEY = "truckPoints"
TRUCKS_KEY = "trucks"
TRUCKS_DB_KEY = "trucks"
COMPUTED_DB_KEY = "computed"
TRUCK_DATE_COMBOS_KEY = "truckDateCombos"
POINTS_KEY = "points"
POINT_KEY = "point"
AVAILABILITY_KEY = "availability"
TRUCK_DATES_KEY = "truckDates"
ROUTE_CENTERS_KEY = "routeCenters"
CHILE_DATA_DB_KEY = "chileData"
CHILE_MAP_DB_KEY = "chileMap"
WATTS_DATA_DB_KEY = "wattsData"
#import_train
TRAINING_DB_KEY = "training"
TRAIN_DB_KEY = "train"
ROAD_DB_KEY = "roads"
INPUT_KEY = "input"
OUTPUT_KEY = "output"
FILE_NUM_KEY = "fileNum"
EDGE_ID_KEY = "edgeId"
CONF_KEY = "conf"
ID_KEY = "id"
FOR_NEIGHBORS_KEY = "forNeighbors"
BACK_NEIGHBORS_KEY = "backNeighbors"
GRAPH_KEY = "graph"
DIRECTION_KEY = "direction"
REVERSE_KEY = "reverse"
FORWARD_KEY = "forward"
BACKWARD_KEY = "backward"
NODES_KEY = "nodes"
EDGES_KEY = "edges"
START_NODE_KEY = "startNodeId"
END_NODE_KEY = "endNodeId"
COST_KEY = "cost"
START_LAT_KEY = "startLat"
START_LON_KEY = "startLon"
END_LAT_KEY = "endLat"
END_LON_KEY = "endLon"
COMPUTED_KEY = "computed"
START_POINT_KEY = "startPoint"
END_POINT_KEY = "endPoint"
CANDIDATE_KEY = "candidate"
INPUT_ID_KEY = 'inputId'
MINI_EDGES_KEY = "miniEdges"
MINI_NODES_KEY = "miniNodes"
NODE_EDGES_KEY = "nodeEdges"
LENGTH_KEY = "length"
CELL_ID_KEY = "cellId"
CELLS_KEY = "cells"
_ID_KEY = "_id"
YO_KEY = "_id"
MAX_DEGREE_KEY = "maxDegree"
KEY = "key"
VALUE_KEY = "value"
GRID_INDEXES_KEY = "gridIndexes"
START_NUM_KEY = "startNum"
END_NUM_KEY = "endNum"
END_KEY = "end"
START_KEY = "start"
NODE_ID_KEY = "nodeId"
NODE_DB_ID_KEY = "node_id"
EDGE_ID_KEY = "edgeId"
EDGE_DB_ID_KEY = 'edge_id'
SEGMENT_ID_KEY = "segmentId"
SEGMENT_DB_ID_KEY = 'segment_id'
START_EDGES_KEY = "startEdges"
END_EDGES_KEY = "endEdges"
BIG_DATA_DB_KEY = "bigData"
OSM_GRAPH_KEY = "osmGraph"
MIN_LAT_KEY = "minLat"
MIN_LON_KEY = "minLon"
MAX_LAT_KEY = "maxLat"
MAX_LON_KEY = "maxLon"
MAX_MINS_KEY = "maxMins"
CELL_IDS_KEY = "cellIds"
MINI_EDGE_CELL_IDS_KEY = "miniEdgeCellIds"
START_MINI_NODE_ID_KEY = "startMiniNodeId"
END_MINI_NODE_ID_KEY = "endMiniNodeId"
REMAINING_KEY = "remaining"
DB_KEY = "db"
DB_ID_KEY = "dbId"
MONGO_ID_KEY = "_id"
ITEM_KEY = "item"
DISTANCE_KEY = "distance"
PRIORITY_KEY = "priority"
MARKED_KEY = "marked"
HEAP_ID_KEY = "heapId"
VISITED_KEY = "visited"
DESCRIPTION_KEY = "description"
STATE_KEY = "state"
TEST_EDGES_KEY = "testEdges"
TEST_NODES_KEY = "testNodes"
ADDRESS_KEY = "address"
STOPS_KEY = "stops"
STOP_PROPS_KEY = "stopProps"
STOP_PROP_ID_KEY = "stopPropId"
DURATION_KEY = "duration"
TOO_BIG = 16000000
CELL_SIZE = 50
MONTH_NUM = 31 |
hakiri/core | refs/heads/master | daemon/core/misc/xmlsession.py | 7 | #
# CORE
# Copyright (c)2011-2014 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
'''
Helpers for loading and saving XML files. savesessionxml(session, filename) is
the main public interface here.
'''
import os.path
from core.netns import nodes
from xmlparser import core_document_parser
from xmlwriter import core_document_writer
def opensessionxml(session, filename, start=False, nodecls=nodes.CoreNode):
''' Import a session from the EmulationScript XML format.
'''
options = {'start': start, 'nodecls': nodecls}
doc = core_document_parser(session, filename, options)
if start:
session.name = os.path.basename(filename)
session.filename = filename
session.node_count = str(session.getnodecount())
session.instantiate()
def savesessionxml(session, filename, version):
''' Export a session to the EmulationScript XML format.
'''
doc = core_document_writer(session, version)
doc.writexml(filename)
|
GabMus/techpillswebsite | refs/heads/master | techpills/blog/admin.py | 1 | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Article)
|
ctippur/swaggeralliance | refs/heads/master | src/python/flask/swagger_server/lib/__init__.py | 1 | __all__ = ["aws"]
|
levilucio/SyVOLT | refs/heads/master | GM2AUTOSAR_MM/graph_MT_pre__MetaModelElement_S.py | 1 | """
__graph_MT_pre__MetaModelElement_S.py___________________________________________________________
Automatically generated graphical appearance ---> MODIFY DIRECTLY WITH CAUTION
________________________________________________________________________________________
"""
import tkFont
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_MT_pre__MetaModelElement_S(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 143, 82
graphEntity.__init__(self, x, y)
self.ChangesAtRunTime = 0
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
else: atribs = None
self.graphForms = []
self.imageDict = self.getImageDict()
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
h = drawing.create_oval(self.translate([162.0, 61.0, 162.0, 61.0]), tags = (self.tag, 'connector'), outline = '', fill = '' )
self.connectors.append( h )
h = drawing.create_rectangle(self.translate([21.0, 18.0, 162.0, 98.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'moccasin')
self.gf6 = GraphicalForm(drawing, h, "gf6")
self.graphForms.append(self.gf6)
helv12 = tkFont.Font ( family="Helvetica", size=12, weight="bold" )
h = drawing.create_text(self.translate([-3, -3]), font=helv12,
tags = (self.tag, self.semanticObject.getClass()),
fill = "black",
text=self.semanticObject.MT_label__.toString())
self.attr_display["MT_label__"] = h
self.gf_label = GraphicalForm(drawing, h, 'gf_label', fontObject=helv12)
self.graphForms.append(self.gf_label)
def postCondition( self, actionID, * params):
return None
def preCondition( self, actionID, * params):
return None
def getImageDict( self ):
imageDict = dict()
return imageDict
new_class = graph_MT_pre__MetaModelElement_S
|
t3dev/odoo | refs/heads/master | addons/crm/models/res_config_settings.py | 1 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
crm_alias_prefix = fields.Char('Default Alias Name for Leads')
generate_lead_from_alias = fields.Boolean('Manual Assignation of Emails', config_parameter='crm.generate_lead_from_alias')
group_use_lead = fields.Boolean(string="Leads", implied_group='crm.group_use_lead')
module_crm_phone_validation = fields.Boolean("Phone Formatting")
module_crm_iap_lead = fields.Boolean("Generate new leads based on their country, technologies, size, etc.")
module_crm_iap_lead_website = fields.Boolean("Create Leads/Opportunities from your website's traffic")
lead_mining_in_pipeline = fields.Boolean("Create a lead mining request directly from the opportunity pipeline.", config_parameter='crm.lead_mining_in_pipeline')
def _find_default_lead_alias_id(self):
alias = self.env.ref('crm.mail_alias_lead_info', False)
if not alias:
alias = self.env['mail.alias'].search([
('alias_model_id.model', '=', 'crm.lead'),
('alias_force_thread_id', '=', False),
('alias_parent_model_id.model', '=', 'crm.team'),
('alias_parent_thread_id', '=', False),
('alias_defaults', '=', '{}')
], limit=1)
return alias
@api.onchange('group_use_lead')
def _onchange_group_use_lead(self):
""" Reset alias / leads configuration if leads are not used """
if not self.group_use_lead:
self.generate_lead_from_alias = False
@api.onchange('generate_lead_from_alias')
def _onchange_generate_lead_from_alias(self):
self.crm_alias_prefix = (self.crm_alias_prefix or 'info') if self.generate_lead_from_alias else False
@api.model
def get_values(self):
res = super(ResConfigSettings, self).get_values()
alias = self._find_default_lead_alias_id()
res.update(
crm_alias_prefix=alias.alias_name if alias else False,
)
return res
@api.multi
def set_values(self):
super(ResConfigSettings, self).set_values()
alias = self._find_default_lead_alias_id()
if alias:
alias.write({'alias_name': self.crm_alias_prefix})
else:
self.env['mail.alias'].with_context(
alias_model_name='crm.lead',
alias_parent_model_name='crm.team').create({'alias_name': self.crm_alias_prefix})
|
trishnaguha/ansible | refs/heads/devel | lib/ansible/modules/net_tools/nios/nios_nsgroup.py | 52 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: nios_nsgroup
short_description: Configure InfoBlox DNS Nameserver Groups
extends_documentation_fragment: nios
author:
- Erich Birngruber (@ebirn)
- Sumit Jaiswal (@sjaiswal)
version_added: "2.8"
description:
- Adds and/or removes nameserver groups form Infoblox NIOS servers.
This module manages NIOS C(nsgroup) objects using the Infoblox. WAPI interface over REST.
requirements:
- infoblox_client
options:
name:
description:
- Specifies the name of the NIOS nameserver group to be managed.
required: true
grid_primary:
description:
- This host is to be used as primary server in this nameserver group. It must be a grid member.
This option is required when setting I(use_external_primaries) to C(false).
suboptions:
name:
description:
- Provide the name of the grid member to identify the host.
required: true
enable_preferred_primaries:
description:
- This flag represents whether the preferred_primaries field values of this member are used (see Infoblox WAPI docs).
default: false
type: bool
grid_replicate:
description:
- Use DNS zone transfers if set to C(True) or ID Grid Replication if set to C(False).
type: bool
default: false
lead:
description:
- This flag controls if the grid lead secondary nameserver performs zone transfers to non lead secondaries.
type: bool
default: false
stealth:
description:
- Configure the external nameserver as stealth server (without NS record) in the zones.
type: bool
default: false
grid_secondaries:
description:
- Configures the list of grid member hosts that act as secondary nameservers.
This option is required when setting I(use_external_primaries) to C(true).
suboptions:
name:
description:
- Provide the name of the grid member to identify the host.
required: true
enable_preferred_primaries:
description:
- This flag represents whether the preferred_primaries field values of this member are used (see Infoblox WAPI docs).
default: false
type: bool
grid_replicate:
description:
- Use DNS zone transfers if set to C(True) or ID Grid Replication if set to C(False)
type: bool
default: false
lead:
description:
- This flag controls if the grid lead secondary nameserver performs zone transfers to non lead secondaries.
type: bool
default: false
stealth:
description:
- Configure the external nameserver as stealth server (without NS record) in the zones.
type: bool
default: false
preferred_primaries:
description:
- Provide a list of elements like in I(external_primaries) to set the precedence of preferred primary nameservers.
is_grid_default:
description:
- If set to C(True) this nsgroup will become the default nameserver group for new zones.
type: bool
required: false
default: false
use_external_primary:
description:
- This flag controls whether the group is using an external primary nameserver.
Note that modification of this field requires passing values for I(grid_secondaries) and I(external_primaries).
type: bool
required: false
default: false
external_primaries:
description:
- Configures a list of external nameservers (non-members of the grid).
This option is required when setting I(use_external_primaries) to C(true).
suboptions:
address:
description:
- Configures the IP address of the external nameserver
required: true
name:
description:
- Set a label for the external nameserver
required: true
stealth:
description:
- Configure the external nameserver as stealth server (without NS record) in the zones.
type: bool
default: false
tsig_key_name:
description:
- Sets a label for the I(tsig_key) value
tsig_key_alg:
description:
- Provides the algorithm used for the I(tsig_key) in use.
choices: ['HMAC-MD5', 'HMAC-SHA256']
default: 'HMAC-MD5'
tsig_key:
description:
- Set a DNS TSIG key for the nameserver to secure zone transfers (AFXRs).
required: false
external_secondaries:
description:
- Allows to provide a list of external secondary nameservers, that are not members of the grid.
suboptions:
address:
description:
- Configures the IP address of the external nameserver
required: true
name:
description:
- Set a label for the external nameserver
required: true
stealth:
description:
- Configure the external nameserver as stealth server (without NS record) in the zones.
type: bool
default: false
tsig_key_name:
description:
- Sets a label for the I(tsig_key) value
tsig_key_alg:
description:
- Provides the algorithm used for the I(tsig_key) in use.
choices: ['HMAC-MD5', 'HMAC-SHA256']
default: 'HMAC-MD5'
tsig_key:
description:
- Set a DNS TSIG key for the nameserver to secure zone transfers (AFXRs).
extattrs:
description:
- Allows for the configuration of Extensible Attributes on the
instance of the object. This argument accepts a set of key / value
pairs for configuration.
required: false
comment:
description:
- Configures a text string comment to be associated with the instance
of this object. The provided text string will be configured on the
object instance.
required: false
state:
description:
- Configures the intended state of the instance of the object on
the NIOS server. When this value is set to C(present), the object
is configured on the device and when this value is set to C(absent)
the value is removed (if necessary) from the device.
choices: [present, absent]
default: present
'''
EXAMPLES = '''
- name: create simple infoblox nameserver group
nios_nsgroup:
name: my-simple-group
comment: "this is a simple nameserver group"
grid_primary:
- name: infoblox-test.example.com
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: create infoblox nameserver group with external primaries
nios_nsgroup:
name: my-example-group
use_external_primary: true
comment: "this is my example nameserver group"
external_primaries: "{{ ext_nameservers }}"
grid_secondaries:
- name: infoblox-test.example.com
lead: True
preferred_primaries: "{{ ext_nameservers }}"
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: delete infoblox nameserver group
nios_nsgroup:
name: my-simple-group
comment: "this is a simple nameserver group"
grid_primary:
- name: infoblox-test.example.com
state: absent
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.net_tools.nios.api import WapiModule
from ansible.module_utils.net_tools.nios.api import NIOS_NSGROUP
# from infoblox documentation
# Fields List
# Field Type Req R/O Base Search
# comment String N N Y : = ~
# extattrs Extattr N N N ext
# external_primaries [struct] N N N N/A
# external_secondaries [struct] N N N N/A
# grid_primary [struct] N N N N/A
# grid_secondaries [struct] N N N N/A
# is_grid_default Bool N N N N/A
# is_multimaster Bool N Y N N/A
# name String Y N Y : = ~
# use_external_primary Bool N N N N/A
def main():
'''entrypoint for module execution.'''
argument_spec = dict(
provider=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
)
# cleanup tsig fields
def clean_tsig(ext):
if 'tsig_key' in ext and not ext['tsig_key']:
del ext['tsig_key']
if 'tsig_key' not in ext and 'tsig_key_name' in ext and not ext['tsig_key_name']:
del ext['tsig_key_name']
if 'tsig_key' not in ext and 'tsig_key_alg' in ext:
del ext['tsig_key_alg']
def clean_grid_member(member):
if member['preferred_primaries']:
for ext in member['preferred_primaries']:
clean_tsig(ext)
if member['enable_preferred_primaries'] is False:
del member['enable_preferred_primaries']
del member['preferred_primaries']
if member['lead'] is False:
del member['lead']
if member['grid_replicate'] is False:
del member['grid_replicate']
def ext_primaries_transform(module):
if module.params['external_primaries']:
for ext in module.params['external_primaries']:
clean_tsig(ext)
return module.params['external_primaries']
def ext_secondaries_transform(module):
if module.params['external_secondaries']:
for ext in module.params['external_secondaries']:
clean_tsig(ext)
return module.params['external_secondaries']
def grid_primary_preferred_transform(module):
for member in module.params['grid_primary']:
clean_grid_member(member)
return module.params['grid_primary']
def grid_secondaries_preferred_primaries_transform(module):
for member in module.params['grid_secondaries']:
clean_grid_member(member)
return module.params['grid_secondaries']
extserver_spec = dict(
address=dict(required=True, ib_req=True),
name=dict(required=True, ib_req=True),
stealth=dict(type='bool', default=False),
tsig_key=dict(),
tsig_key_alg=dict(choices=['HMAC-MD5', 'HMAC-SHA256'], default='HMAC-MD5'),
tsig_key_name=dict(required=True)
)
memberserver_spec = dict(
name=dict(required=True, ib_req=True),
enable_preferred_primaries=dict(type='bool', default=False),
grid_replicate=dict(type='bool', default=False),
lead=dict(type='bool', default=False),
preferred_primaries=dict(type='list', elements='dict', options=extserver_spec, default=[]),
stealth=dict(type='bool', default=False),
)
ib_spec = dict(
name=dict(required=True, ib_req=True),
grid_primary=dict(type='list', elements='dict', options=memberserver_spec,
transform=grid_primary_preferred_transform),
grid_secondaries=dict(type='list', elements='dict', options=memberserver_spec,
transform=grid_secondaries_preferred_primaries_transform),
external_primaries=dict(type='list', elements='dict', options=extserver_spec, transform=ext_primaries_transform),
external_secondaries=dict(type='list', elements='dict', options=extserver_spec,
transform=ext_secondaries_transform),
is_grid_default=dict(type='bool', default=False),
use_external_primary=dict(type='bool', default=False),
extattrs=dict(),
comment=dict(),
)
argument_spec.update(ib_spec)
argument_spec.update(WapiModule.provider_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
wapi = WapiModule(module)
result = wapi.run(NIOS_NSGROUP, ib_spec)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
dimtruck/magnum | refs/heads/master | magnum/api/controllers/v1/certificate.py | 1 | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from magnum.api.controllers import base
from magnum.api.controllers import link
from magnum.api.controllers.v1 import types
from magnum.api.controllers.v1 import utils as api_utils
from magnum.common import exception
from magnum.common import policy
from magnum import objects
class Certificate(base.APIBase):
"""API representation of a certificate.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of a
certificate.
"""
_bay_uuid = None
"""uuid or logical name of bay"""
_bay = None
def _get_bay_uuid(self):
return self._bay_uuid
def _set_bay_uuid(self, value):
if value and self._bay_uuid != value:
try:
self._bay = api_utils.get_rpc_resource('Bay', value)
self._bay_uuid = self._bay.uuid
except exception.BayNotFound as e:
# Change error code because 404 (NotFound) is inappropriate
# response for a POST request to create a Bay
e.code = 400 # BadRequest
raise e
elif value == wtypes.Unset:
self._bay_uuid = wtypes.Unset
bay_uuid = wsme.wsproperty(wtypes.text, _get_bay_uuid,
_set_bay_uuid, mandatory=True)
"""The bay UUID or id"""
links = wsme.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated certificate links"""
csr = wtypes.StringType(min_length=1)
""""The Certificate Signing Request"""
pem = wtypes.StringType()
""""The Signed Certificate"""
def __init__(self, **kwargs):
super(Certificate, self).__init__()
self.fields = []
for field in objects.Certificate.fields:
# Skip fields we do not expose.
if not hasattr(self, field):
continue
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
def get_bay(self):
if not self._bay:
self._bay = api_utils.get_rpc_resource('Bay', self.bay_uuid)
return self._bay
@staticmethod
def _convert_with_links(certificate, url, expand=True):
if not expand:
certificate.unset_fields_except(['bay_uuid', 'csr', 'pem'])
certificate.links = [link.Link.make_link('self', url,
'certificates',
certificate.bay_uuid),
link.Link.make_link('bookmark', url,
'certificates',
certificate.bay_uuid,
bookmark=True)]
return certificate
@classmethod
def convert_with_links(cls, rpc_cert, expand=True):
cert = Certificate(**rpc_cert.as_dict())
return cls._convert_with_links(cert,
pecan.request.host_url, expand)
@classmethod
def sample(cls, expand=True):
sample = cls(bay_uuid='7ae81bb3-dec3-4289-8d6c-da80bd8001ae',
created_at=timeutils.utcnow(),
csr='AAA....AAA')
return cls._convert_with_links(sample, 'http://localhost:9511', expand)
class CertificateController(rest.RestController):
"""REST controller for Certificate."""
def __init__(self):
super(CertificateController, self).__init__()
_custom_actions = {
'detail': ['GET'],
}
@policy.enforce_wsgi("certificate", "get")
@wsme_pecan.wsexpose(Certificate, types.uuid_or_name)
def get_one(self, bay_ident):
"""Retrieve information about the given certificate.
:param bay_ident: UUID of a bay or
logical name of the bay.
"""
rpc_bay = api_utils.get_rpc_resource('Bay', bay_ident)
certificate = pecan.request.rpcapi.get_ca_certificate(rpc_bay)
return Certificate.convert_with_links(certificate)
@policy.enforce_wsgi("certificate", "create")
@wsme_pecan.wsexpose(Certificate, body=Certificate, status_code=201)
def post(self, certificate):
"""Create a new certificate.
:param certificate: a certificate within the request body.
"""
certificate_dict = certificate.as_dict()
context = pecan.request.context
certificate_dict['project_id'] = context.project_id
certificate_dict['user_id'] = context.user_id
cert_obj = objects.Certificate(context, **certificate_dict)
new_cert = pecan.request.rpcapi.sign_certificate(certificate.get_bay(),
cert_obj)
return Certificate.convert_with_links(new_cert)
|
valkyriesavage/invenio | refs/heads/263-date_added_math | modules/bibformat/lib/bibformat_dblayer.py | 3 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Database access related functions for BibFormat engine and
administration pages.
"""
__revision__ = "$Id$"
import zlib
import time
from invenio.dbquery import run_sql
## MARC-21 tag/field access functions
def get_fieldvalues(recID, tag):
"""Returns list of values of the MARC-21 'tag' fields for the record
'recID'."""
out = []
bibXXx = "bib" + tag[0] + tag[1] + "x"
bibrec_bibXXx = "bibrec_" + bibXXx
query = "SELECT value FROM %s AS b, %s AS bb WHERE bb.id_bibrec=%s AND bb.id_bibxxx=b.id AND tag LIKE '%s'" \
% (bibXXx, bibrec_bibXXx, recID, tag)
res = run_sql(query)
for row in res:
out.append(row[0])
return out
def localtime_to_utc(date, fmt="%Y-%m-%dT%H:%M:%SZ"):
"Convert localtime to UTC"
ldate = date.split(" ")[0]
ltime = date.split(" ")[1]
lhour = ltime.split(":")[0]
lminute = ltime.split(":")[1]
lsec = ltime.split(":")[2]
lyear = ldate.split("-")[0]
lmonth = ldate.split("-")[1]
lday = ldate.split("-")[2]
timetoconvert = time.strftime(fmt, time.gmtime(time.mktime((int(lyear), int(lmonth), int(lday), int(lhour), int(lminute), int(lsec), 0, 0, -1))))
return timetoconvert
def get_creation_date(sysno, fmt="%Y-%m-%dT%H:%M:%SZ"):
"Returns the creation date of the record 'sysno'."
out = ""
res = run_sql("SELECT DATE_FORMAT(creation_date, '%%Y-%%m-%%d %%H:%%i:%%s') FROM bibrec WHERE id=%s", (sysno,), 1)
if res[0][0]:
out = localtime_to_utc(res[0][0], fmt)
return out
def get_modification_date(sysno, fmt="%Y-%m-%dT%H:%M:%SZ"):
"Returns the date of last modification for the record 'sysno'."
out = ""
res = run_sql("SELECT DATE_FORMAT(modification_date,'%%Y-%%m-%%d %%H:%%i:%%s') FROM bibrec WHERE id=%s", (sysno,), 1)
if res and res[0][0]:
out = localtime_to_utc(res[0][0], fmt)
return out
## XML Marc related functions
def get_tag_from_name(name):
"""
Returns the marc code corresponding the given name
"""
res = run_sql("SELECT value FROM tag WHERE name LIKE %s", (name,))
if len(res)>0:
return res[0][0]
else:
return None
def get_tags_from_name(name):
"""
Returns the marc codes corresponding the given name,
ordered by value
"""
res = run_sql("SELECT value FROM tag WHERE name LIKE %s ORDER BY value", (name,))
if len(res)>0:
return list(res[0])
else:
return None
def tag_exists_for_name(name):
"""
Returns True if a tag exists for name in 'tag' table.
"""
rows = run_sql("SELECT value FROM tag WHERE name LIKE %s", (name,))
if len(rows) > 0:
return True
return False
def get_name_from_tag(tag):
"""
Returns the name corresponding to a marc code
"""
res = run_sql("SELECT name FROM tag WHERE value LIKE %s", (tag,))
if len(res)>0:
return res[0][0]
else:
return None
def name_exists_for_tag(tag):
"""
Returns True if a name exists for tag in 'tag' table.
"""
rows = run_sql("SELECT name FROM tag WHERE value LIKE %s", (tag,))
if len(rows) > 0:
return True
return False
def get_all_name_tag_mappings():
"""
Return the list of mappings name<->tag from 'tag' table.
The returned object is a dict with name as key (if 2 names are the same
we will take the value of one of them, as we cannot make the difference in format
templates)
@return: a dict containing list of mapping in 'tag' table
"""
out = {}
query = "SELECT value, name FROM tag"
res = run_sql(query)
for row in res:
out[row[1]] = row[0]
return out
## Output formats related functions
def get_output_format_id(code):
"""
Returns the id of output format given by code in the database.
Output formats are located inside 'format' table
@return: the id in the database of the output format. None if not found
"""
f_code = code
if len(code)>6:
f_code = code[:6]
res = run_sql("SELECT id FROM format WHERE code=%s", (f_code.lower(),))
if len(res)>0:
return res[0][0]
else:
return None
def add_output_format(code, name="", description="", content_type="text/html", visibility=1):
"""
Add output format into format table.
If format with given code already exists, do nothing
@param code: the code of the new format
@param name: a new for the new format
@param description: a description for the new format
@param content_type: the content_type (if applicable) of the new output format
"""
output_format_id = get_output_format_id(code);
if output_format_id is None:
query = "INSERT INTO format SET code=%s, description=%s, content_type=%s, visibility=%s"
params = (code.lower(), description, content_type, visibility)
run_sql(query, params)
set_output_format_name(code, name)
def remove_output_format(code):
"""
Removes the output format with 'code'
If code does not exist in database, do nothing
The function also removes all localized names in formatname table
@param the: code of the output format to remove
"""
output_format_id = get_output_format_id(code);
if output_format_id is None:
return
query = "DELETE FROM formatname WHERE id_format='%s'" % output_format_id
run_sql(query)
query = "DELETE FROM format WHERE id='%s'" % output_format_id
run_sql(query)
def get_output_format_description(code):
"""
Returns the description of the output format given by code
If code or description does not exist, return empty string
@param code: the code of the output format to get the description from
@return: output format description
"""
res = run_sql("SELECT description FROM format WHERE code=%s", (code,))
if len(res) > 0:
res = res[0][0]
if res is not None:
return res
return ""
def set_output_format_description(code, description):
"""
Sets the description of an output format, given by its code
If 'code' does not exist, create format
@param code: the code of the output format to update
@param description: the new description
"""
output_format_id = get_output_format_id(code)
if output_format_id is None:
add_output_format(code, "", description)
query = "UPDATE format SET description=%s WHERE code=%s"
params = (description, code.lower())
run_sql(query, params)
def get_output_format_visibility(code):
"""
Returns the visibility of the output format, given by its code
If code does not exist, return 0
@return: output format visibility (0 if not visible, 1 if visible
"""
res = run_sql("SELECT visibility FROM format WHERE code=%s", (code,))
if len(res) > 0:
res = res[0][0]
if res is not None and int(res) in range(0, 2):
return int(res)
return 0
def set_output_format_visibility(code, visibility):
"""
Sets the visibility of an output format, given by its code
If 'code' does not exist, create format
@param code: the code of the output format to update
@param visibility: the new visibility (0: not visible, 1:visible)
"""
output_format_id = get_output_format_id(code)
if output_format_id is None:
add_output_format(code, "", "", "", visibility)
query = "UPDATE format SET visibility=%s WHERE code=%s"
params = (visibility, code.lower())
run_sql(query, params)
def get_existing_content_types():
"""
Returns the list of all MIME content-types used in existing output
formats.
Always returns at least a list with 'text/html'
@return: a list of content-type strings
"""
query = "SELECT DISTINCT content_type FROM format GROUP BY content_type"
res = run_sql(query)
if res is not None:
res = [val[0] for val in res if len(val) > 0]
if not 'text/html' in res:
res.append('text/html')
return res
else:
return ['text/html']
def get_output_format_content_type(code):
"""
Returns the content_type of the output format given by code
If code or content_type does not exist, return empty string
@param code: the code of the output format to get the description from
@return: output format content_type
"""
res = run_sql("SELECT content_type FROM format WHERE code=%s", (code,))
if len(res) > 0:
res = res[0][0]
if res is not None:
return res
return ""
def set_output_format_content_type(code, content_type):
"""
Sets the content_type of an output format, given by its code
If 'code' does not exist, create format
@param code: the code of the output format to update
@param content_type: the content type for the format
"""
output_format_id = get_output_format_id(code)
if output_format_id is None:
# add one if not exist (should not happen)
add_output_format(code, "", "", content_type)
query = "UPDATE format SET content_type=%s WHERE code=%s"
params = (content_type, code.lower())
run_sql(query, params)
def get_output_format_names(code):
"""
Returns the localized names of the output format designated by 'code'
The returned object is a dict with keys 'ln' (for long name) and 'sn' (for short name),
containing each a dictionary with languages as keys.
The key 'generic' contains the generic name of the output format (for use in admin interface)
For eg: {'ln':{'en': "a long name", 'fr': "un long nom", 'de': "ein lange Name"},
'sn':{'en': "a name", 'fr': "un nom", 'de': "ein Name"}
'generic': "a name"}
The returned dictionary is never None. The keys 'ln' and 'sn' are always present. However
only languages present in the database are in dicts 'sn' and 'ln'. language "CFG_SITE_LANG" is always
in dict.
The localized names of output formats are located in formatname table.
@param code: the code of the output format to get the names from
@return: a dict containing output format names
"""
out = {'sn':{}, 'ln':{}, 'generic':''}
output_format_id = get_output_format_id(code);
if output_format_id is None:
return out
res = run_sql("SELECT name FROM format WHERE code=%s", (code,))
if len(res) > 0:
out['generic'] = res[0][0]
query = "SELECT type, ln, value FROM formatname WHERE id_format='%s'" % output_format_id
res = run_sql(query)
for row in res:
if row[0] == 'sn' or row[0] == 'ln':
out[row[0]][row[1]] = row[2]
return out
def set_output_format_name(code, name, lang="generic", type='ln'):
"""
Sets the name of an output format given by code.
if 'type' different from 'ln' or 'sn', do nothing
if 'name' exceeds 256 chars, 'name' is truncated to first 256 chars.
if 'code' does not correspond to exisiting output format, create format if "generic" is given as lang
The localized names of output formats are located in formatname table.
@param code: the code of an ouput format
@param type: either 'ln' (for long name) and 'sn' (for short name)
@param lang: the language in which the name is given
@param name: the name to give to the output format
"""
if len(name) > 256:
name = name[:256]
if type.lower() != "sn" and type.lower() != "ln":
return
output_format_id = get_output_format_id(code);
if output_format_id is None and lang == "generic" and type.lower() == "ln":
# Create output format inside table if it did not exist
# Happens when the output format was added not through web interface
add_output_format(code, name)
output_format_id = get_output_format_id(code) # Reload id, because it was not found previously
if lang =="generic" and type.lower()=="ln":
# Save inside format table for main name
query = "UPDATE format SET name=%s WHERE code=%s"
params = (name, code.lower())
run_sql(query, params)
else:
# Save inside formatname table for name variations
run_sql("REPLACE INTO formatname SET id_format=%s, ln=%s, type=%s, value=%s",
(output_format_id, lang, type.lower(), name))
def change_output_format_code(old_code, new_code):
"""
Change the code of an output format
@param old_code: the code of the output format to change
@param new_code: the new code
"""
output_format_id = get_output_format_id(old_code);
if output_format_id is None:
return
query = "UPDATE format SET code=%s WHERE id=%s"
params = (new_code.lower(), output_format_id)
res = run_sql(query, params)
def get_preformatted_record(recID, of, decompress=zlib.decompress):
"""
Returns the preformatted record with id 'recID' and format 'of'
If corresponding record does not exist for given output format,
returns None
@param recID: the id of the record to fetch
@param of: the output format code
@param decompress: the method used to decompress the preformatted record in database
@return: formatted record as String, or None if not exist
"""
# Try to fetch preformatted record
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
params = (recID, of)
res = run_sql(query, params)
if res:
# record 'recID' is formatted in 'of', so return it
return "%s" % decompress(res[0][0])
else:
return None
def get_preformatted_record_date(recID, of):
"""
Returns the date of the last update of the cache for the considered
preformatted record in bibfmt
If corresponding record does not exist for given output format,
returns None
@param recID: the id of the record to fetch
@param of: the output format code
@return: the date of the last update of the cache, or None if not exist
"""
# Try to fetch preformatted record
query = "SELECT last_updated FROM bibfmt WHERE id_bibrec='%s' AND format='%s'" % (recID, of)
res = run_sql(query)
if res:
# record 'recID' is formatted in 'of', so return it
return "%s" % res[0][0]
else:
return None
## def keep_formats_in_db(output_formats):
## """
## Remove from db formats that are not in the list
## TOBE USED ONLY ONCE OLD BIBFORMAT IS REMOVED (or old behaviours will be erased...)
## """
## query = "SELECT code FROM format"
## res = run_sql(query)
## for row in res:
## if not row[0] in output_formats:
## query = "DELETE FROM format WHERE code='%s'"%row[0]
## def add_formats_in_db(output_formats):
## """
## Add given formats in db (if not already there)
## """
## for output_format in output_format:
## if get_format_from_db(output_format) is None:
## #Add new
## query = "UPDATE TABLE format "
## else:
## #Update
## query = "UPDATE TABLE format "
## query = "UPDATE TABLE format "
## res = run_sql(query)
## for row in res:
## if not row[0] in output_formats:
## query = "DELETE FROM format WHERE code='%s'"%row[0]
|
lsinfo/odoo | refs/heads/8.0 | addons/stock/tests/test_stock_flow.py | 219 | # -*- coding: utf-8 -*-
from openerp.addons.stock.tests.common import TestStockCommon
from openerp.tools import mute_logger, float_round
class TestStockFlow(TestStockCommon):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_picking_create_and_transfer_quantity(self):
""" Basic stock operation on incoming and outgoing shipment. """
LotObj = self.env['stock.production.lot']
# ----------------------------------------------------------------------
# Create incoming shipment of product A, B, C, D
# ----------------------------------------------------------------------
# Product A ( 1 Unit ) , Product C ( 10 Unit )
# Product B ( 1 Unit ) , Product D ( 10 Unit )
# Product D ( 5 Unit )
# ----------------------------------------------------------------------
picking_in = self.PickingObj.create({
'partner_id': self.partner_delta_id,
'picking_type_id': self.picking_type_in})
self.MoveObj.create({
'name': self.productA.name,
'product_id': self.productA.id,
'product_uom_qty': 1,
'product_uom': self.productA.uom_id.id,
'picking_id': picking_in.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.productB.name,
'product_id': self.productB.id,
'product_uom_qty': 1,
'product_uom': self.productB.uom_id.id,
'picking_id': picking_in.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.productC.name,
'product_id': self.productC.id,
'product_uom_qty': 10,
'product_uom': self.productC.uom_id.id,
'picking_id': picking_in.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.productD.name,
'product_id': self.productD.id,
'product_uom_qty': 10,
'product_uom': self.productD.uom_id.id,
'picking_id': picking_in.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.productD.name,
'product_id': self.productD.id,
'product_uom_qty': 5,
'product_uom': self.productD.uom_id.id,
'picking_id': picking_in.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
# Check incoming shipment move lines state.
for move in picking_in.move_lines:
self.assertEqual(move.state, 'draft', 'Wrong state of move line.')
# Confirm incoming shipment.
picking_in.action_confirm()
# Check incoming shipment move lines state.
for move in picking_in.move_lines:
self.assertEqual(move.state, 'assigned', 'Wrong state of move line.')
# ----------------------------------------------------------------------
# Replace pack operation of incoming shipments.
# ----------------------------------------------------------------------
picking_in.do_prepare_partial()
self.StockPackObj.search([('product_id', '=', self.productA.id), ('picking_id', '=', picking_in.id)]).write({
'product_qty': 4.0})
self.StockPackObj.search([('product_id', '=', self.productB.id), ('picking_id', '=', picking_in.id)]).write({
'product_qty': 5.0})
self.StockPackObj.search([('product_id', '=', self.productC.id), ('picking_id', '=', picking_in.id)]).write({
'product_qty': 5.0})
self.StockPackObj.search([('product_id', '=', self.productD.id), ('picking_id', '=', picking_in.id)]).write({
'product_qty': 5.0})
lot2_productC = LotObj.create({'name': 'C Lot 2', 'product_id': self.productC.id})
self.StockPackObj.create({
'product_id': self.productC.id,
'product_qty': 2,
'product_uom_id': self.productC.uom_id.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location,
'picking_id': picking_in.id,
'lot_id': lot2_productC.id})
self.StockPackObj.create({
'product_id': self.productD.id,
'product_qty': 2,
'product_uom_id': self.productD.uom_id.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location,
'picking_id': picking_in.id})
# Check incoming shipment total quantity of pack operation
packs = self.StockPackObj.search([('picking_id', '=', picking_in.id)])
total_qty = [pack.product_qty for pack in packs]
self.assertEqual(sum(total_qty), 23, 'Wrong quantity in pack operation (%s found instead of 23)' % (sum(total_qty)))
# Transfer Incoming Shipment.
picking_in.do_transfer()
# ----------------------------------------------------------------------
# Check state, quantity and total moves of incoming shipment.
# ----------------------------------------------------------------------
# Check total no of move lines of incoming shipment.
self.assertEqual(len(picking_in.move_lines), 6, 'Wrong number of move lines.')
# Check incoming shipment state.
self.assertEqual(picking_in.state, 'done', 'Incoming shipment state should be done.')
# Check incoming shipment move lines state.
for move in picking_in.move_lines:
self.assertEqual(move.state, 'done', 'Wrong state of move line.')
# Check product A done quantity must be 3 and 1
moves = self.MoveObj.search([('product_id', '=', self.productA.id), ('picking_id', '=', picking_in.id)])
a_done_qty = [move.product_uom_qty for move in moves]
self.assertEqual(set(a_done_qty), set([1.0, 3.0]), 'Wrong move quantity for product A.')
# Check product B done quantity must be 4 and 1
moves = self.MoveObj.search([('product_id', '=', self.productB.id), ('picking_id', '=', picking_in.id)])
b_done_qty = [move.product_uom_qty for move in moves]
self.assertEqual(set(b_done_qty), set([4.0, 1.0]), 'Wrong move quantity for product B.')
# Check product C done quantity must be 7
c_done_qty = self.MoveObj.search([('product_id', '=', self.productC.id), ('picking_id', '=', picking_in.id)], limit=1).product_uom_qty
self.assertEqual(c_done_qty, 7.0, 'Wrong move quantity of product C (%s found instead of 7)' % (c_done_qty))
# Check product D done quantity must be 7
d_done_qty = self.MoveObj.search([('product_id', '=', self.productD.id), ('picking_id', '=', picking_in.id)], limit=1).product_uom_qty
self.assertEqual(d_done_qty, 7.0, 'Wrong move quantity of product D (%s found instead of 7)' % (d_done_qty))
# ----------------------------------------------------------------------
# Check Back order of Incoming shipment.
# ----------------------------------------------------------------------
# Check back order created or not.
back_order_in = self.PickingObj.search([('backorder_id', '=', picking_in.id)])
self.assertEqual(len(back_order_in), 1, 'Back order should be created.')
# Check total move lines of back order.
self.assertEqual(len(back_order_in.move_lines), 3, 'Wrong number of move lines.')
# Check back order should be created with 3 quantity of product C.
moves = self.MoveObj.search([('product_id', '=', self.productC.id), ('picking_id', '=', back_order_in.id)])
product_c_qty = [move.product_uom_qty for move in moves]
self.assertEqual(sum(product_c_qty), 3.0, 'Wrong move quantity of product C (%s found instead of 3)' % (product_c_qty))
# Check back order should be created with 8 quantity of product D.
moves = self.MoveObj.search([('product_id', '=', self.productD.id), ('picking_id', '=', back_order_in.id)])
product_d_qty = [move.product_uom_qty for move in moves]
self.assertEqual(sum(product_d_qty), 8.0, 'Wrong move quantity of product D (%s found instead of 8)' % (product_d_qty))
# ======================================================================
# Create Outgoing shipment with ...
# product A ( 10 Unit ) , product B ( 5 Unit )
# product C ( 3 unit ) , product D ( 10 Unit )
# ======================================================================
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out})
self.MoveObj.create({
'name': self.productA.name,
'product_id': self.productA.id,
'product_uom_qty': 10,
'product_uom': self.productA.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.productB.name,
'product_id': self.productB.id,
'product_uom_qty': 5,
'product_uom': self.productB.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.productC.name,
'product_id': self.productC.id,
'product_uom_qty': 3,
'product_uom': self.productC.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.productD.name,
'product_id': self.productD.id,
'product_uom_qty': 10,
'product_uom': self.productD.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
# Confirm outgoing shipment.
picking_out.action_confirm()
for move in picking_out.move_lines:
self.assertEqual(move.state, 'confirmed', 'Wrong state of move line.')
# Product assign to outgoing shipments
picking_out.action_assign()
for move in picking_out.move_lines:
self.assertEqual(move.state, 'assigned', 'Wrong state of move line.')
# Check availability for product A
aval_a_qty = self.MoveObj.search([('product_id', '=', self.productA.id), ('picking_id', '=', picking_out.id)], limit=1).availability
self.assertEqual(aval_a_qty, 4.0, 'Wrong move quantity availability of product A (%s found instead of 4)' % (aval_a_qty))
# Check availability for product B
aval_b_qty = self.MoveObj.search([('product_id', '=', self.productB.id), ('picking_id', '=', picking_out.id)], limit=1).availability
self.assertEqual(aval_b_qty, 5.0, 'Wrong move quantity availability of product B (%s found instead of 5)' % (aval_b_qty))
# Check availability for product C
aval_c_qty = self.MoveObj.search([('product_id', '=', self.productC.id), ('picking_id', '=', picking_out.id)], limit=1).availability
self.assertEqual(aval_c_qty, 3.0, 'Wrong move quantity availability of product C (%s found instead of 3)' % (aval_c_qty))
# Check availability for product D
aval_d_qty = self.MoveObj.search([('product_id', '=', self.productD.id), ('picking_id', '=', picking_out.id)], limit=1).availability
self.assertEqual(aval_d_qty, 7.0, 'Wrong move quantity availability of product D (%s found instead of 7)' % (aval_d_qty))
# ----------------------------------------------------------------------
# Replace pack operation of outgoing shipment.
# ----------------------------------------------------------------------
picking_out.do_prepare_partial()
self.StockPackObj.search([('product_id', '=', self.productA.id), ('picking_id', '=', picking_out.id)]).write({'product_qty': 2.0})
self.StockPackObj.search([('product_id', '=', self.productB.id), ('picking_id', '=', picking_out.id)]).write({'product_qty': 3.0})
self.StockPackObj.create({
'product_id': self.productB.id,
'product_qty': 2,
'product_uom_id': self.productB.uom_id.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location,
'picking_id': picking_out.id})
self.StockPackObj.search([('product_id', '=', self.productC.id), ('picking_id', '=', picking_out.id)]).write({
'product_qty': 2.0, 'lot_id': lot2_productC.id})
self.StockPackObj.create({
'product_id': self.productC.id,
'product_qty': 3,
'product_uom_id': self.productC.uom_id.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location,
'picking_id': picking_out.id})
self.StockPackObj.search([('product_id', '=', self.productD.id), ('picking_id', '=', picking_out.id)]).write({'product_qty': 6.0})
# Transfer picking.
picking_out.do_transfer()
# ----------------------------------------------------------------------
# Check state, quantity and total moves of outgoing shipment.
# ----------------------------------------------------------------------
# check outgoing shipment status.
self.assertEqual(picking_out.state, 'done', 'Wrong state of outgoing shipment.')
# check outgoing shipment total moves and and its state.
self.assertEqual(len(picking_out.move_lines), 5, 'Wrong number of move lines')
for move in picking_out.move_lines:
self.assertEqual(move.state, 'done', 'Wrong state of move line.')
back_order_out = self.PickingObj.search([('backorder_id', '=', picking_out.id)])
#------------------
# Check back order.
# -----------------
self.assertEqual(len(back_order_out), 1, 'Back order should be created.')
# Check total move lines of back order.
self.assertEqual(len(back_order_out.move_lines), 2, 'Wrong number of move lines')
# Check back order should be created with 8 quantity of product A.
product_a_qty = self.MoveObj.search([('product_id', '=', self.productA.id), ('picking_id', '=', back_order_out.id)], limit=1).product_uom_qty
self.assertEqual(product_a_qty, 8.0, 'Wrong move quantity of product A (%s found instead of 8)' % (product_a_qty))
# Check back order should be created with 4 quantity of product D.
product_d_qty = self.MoveObj.search([('product_id', '=', self.productD.id), ('picking_id', '=', back_order_out.id)], limit=1).product_uom_qty
self.assertEqual(product_d_qty, 4.0, 'Wrong move quantity of product D (%s found instead of 4)' % (product_d_qty))
#-----------------------------------------------------------------------
# Check stock location quant quantity and quantity available
# of product A, B, C, D
#-----------------------------------------------------------------------
# Check quants and available quantity for product A
quants = self.StockQuantObj.search([('product_id', '=', self.productA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 2.0, 'Expecting 2.0 Unit , got %.4f Unit on location stock!' % (sum(total_qty)))
self.assertEqual(self.productA.qty_available, 2.0, 'Wrong quantity available (%s found instead of 2.0)' % (self.productA.qty_available))
# Check quants and available quantity for product B
quants = self.StockQuantObj.search([('product_id', '=', self.productB.id), ('location_id', '=', self.stock_location)])
self.assertFalse(quants, 'No quant should found as outgoing shipment took everything out of stock.')
self.assertEqual(self.productB.qty_available, 0.0, 'Product B should have zero quantity available.')
# Check quants and available quantity for product C
quants = self.StockQuantObj.search([('product_id', '=', self.productC.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 2.0, 'Expecting 2.0 Unit, got %.4f Unit on location stock!' % (sum(total_qty)))
self.assertEqual(self.productC.qty_available, 2.0, 'Wrong quantity available (%s found instead of 2.0)' % (self.productC.qty_available))
# Check quants and available quantity for product D
quant = self.StockQuantObj.search([('product_id', '=', self.productD.id), ('location_id', '=', self.stock_location)], limit=1)
self.assertEqual(quant.qty, 1.0, 'Expecting 1.0 Unit , got %.4f Unit on location stock!' % (quant.qty))
self.assertEqual(self.productD.qty_available, 1.0, 'Wrong quantity available (%s found instead of 1.0)' % (self.productD.qty_available))
#-----------------------------------------------------------------------
# Back Order of Incoming shipment
#-----------------------------------------------------------------------
lot3_productC = LotObj.create({'name': 'Lot 3', 'product_id': self.productC.id})
lot4_productC = LotObj.create({'name': 'Lot 4', 'product_id': self.productC.id})
lot5_productC = LotObj.create({'name': 'Lot 5', 'product_id': self.productC.id})
lot6_productC = LotObj.create({'name': 'Lot 6', 'product_id': self.productC.id})
lot1_productD = LotObj.create({'name': 'Lot 1', 'product_id': self.productD.id})
lot2_productD = LotObj.create({'name': 'Lot 2', 'product_id': self.productD.id})
# Confirm back order of incoming shipment.
back_order_in.action_confirm()
self.assertEqual(back_order_in.state, 'assigned', 'Wrong state of incoming shipment back order.')
for move in back_order_in.move_lines:
self.assertEqual(move.state, 'assigned', 'Wrong state of move line.')
# ----------------------------------------------------------------------
# Replace pack operation (Back order of Incoming shipment)
# ----------------------------------------------------------------------
back_order_in.do_prepare_partial()
packD = self.StockPackObj.search([('product_id', '=', self.productD.id), ('picking_id', '=', back_order_in.id)])
self.assertEqual(len(packD), 1, 'Wrong number of pack operation.')
packD.write({'product_qty': 4, 'lot_id': lot1_productD.id})
self.StockPackObj.create({
'product_id': self.productD.id,
'product_qty': 4,
'product_uom_id': self.productD.uom_id.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location,
'picking_id': back_order_in.id,
'lot_id': lot2_productD.id})
self.StockPackObj.search([('product_id', '=', self.productC.id), ('picking_id', '=', back_order_in.id)], limit=1).write({'product_qty': 1, 'lot_id': lot3_productC.id})
self.StockPackObj.create({
'product_id': self.productC.id,
'product_qty': 1,
'product_uom_id': self.productC.uom_id.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location,
'picking_id': back_order_in.id,
'lot_id': lot4_productC.id})
self.StockPackObj.create({
'product_id': self.productC.id,
'product_qty': 2,
'product_uom_id': self.productC.uom_id.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location,
'picking_id': back_order_in.id,
'lot_id': lot5_productC.id})
self.StockPackObj.create({
'product_id': self.productC.id,
'product_qty': 2,
'product_uom_id': self.productC.uom_id.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location,
'picking_id': back_order_in.id,
'lot_id': lot6_productC.id})
self.StockPackObj.create({
'product_id': self.productA.id,
'product_qty': 10,
'product_uom_id': self.productA.uom_id.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location,
'picking_id': back_order_in.id})
back_order_in.do_transfer()
# ----------------------------------------------------------------------
# Check state, quantity and total moves (Back order of Incoming shipment).
# ----------------------------------------------------------------------
# Check total no of move lines.
self.assertEqual(len(back_order_in.move_lines), 6, 'Wrong number of move lines')
# Check incoming shipment state must be 'Done'.
self.assertEqual(back_order_in.state, 'done', 'Wrong state of picking.')
# Check incoming shipment move lines state must be 'Done'.
for move in back_order_in.move_lines:
self.assertEqual(move.state, 'done', 'Wrong state of move lines.')
# Check product A done quantity must be 10
movesA = self.MoveObj.search([('product_id', '=', self.productA.id), ('picking_id', '=', back_order_in.id)])
self.assertEqual(movesA.product_uom_qty, 10, "Wrong move quantity of product A (%s found instead of 10)" % (movesA.product_uom_qty))
# Check product C done quantity must be 3.0, 1.0, 2.0
movesC = self.MoveObj.search([('product_id', '=', self.productC.id), ('picking_id', '=', back_order_in.id)])
c_done_qty = [move.product_uom_qty for move in movesC]
self.assertEqual(set(c_done_qty), set([3.0, 1.0, 2.0]), 'Wrong quantity of moves product C.')
# Check product D done quantity must be 5.0 and 3.0
movesD = self.MoveObj.search([('product_id', '=', self.productD.id), ('picking_id', '=', back_order_in.id)])
d_done_qty = [move.product_uom_qty for move in movesD]
self.assertEqual(set(d_done_qty), set([3.0, 5.0]), 'Wrong quantity of moves product D.')
# Check no back order is created.
self.assertFalse(self.PickingObj.search([('backorder_id', '=', back_order_in.id)]), "Should not create any back order.")
#-----------------------------------------------------------------------
# Check stock location quant quantity and quantity available
# of product A, B, C, D
#-----------------------------------------------------------------------
# Check quants and available quantity for product A.
quants = self.StockQuantObj.search([('product_id', '=', self.productA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 12.0, 'Wrong total stock location quantity (%s found instead of 12)' % (sum(total_qty)))
self.assertEqual(self.productA.qty_available, 12.0, 'Wrong quantity available (%s found instead of 12)' % (self.productA.qty_available))
# Check quants and available quantity for product B.
quants = self.StockQuantObj.search([('product_id', '=', self.productB.id), ('location_id', '=', self.stock_location)])
self.assertFalse(quants, 'No quant should found as outgoing shipment took everything out of stock')
self.assertEqual(self.productB.qty_available, 0.0, 'Total quantity in stock should be 0 as the backorder took everything out of stock')
# Check quants and available quantity for product C.
quants = self.StockQuantObj.search([('product_id', '=', self.productC.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 8.0, 'Wrong total stock location quantity (%s found instead of 8)' % (sum(total_qty)))
self.assertEqual(self.productC.qty_available, 8.0, 'Wrong quantity available (%s found instead of 8)' % (self.productC.qty_available))
# Check quants and available quantity for product D.
quants = self.StockQuantObj.search([('product_id', '=', self.productD.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 9.0, 'Wrong total stock location quantity (%s found instead of 9)' % (sum(total_qty)))
self.assertEqual(self.productD.qty_available, 9.0, 'Wrong quantity available (%s found instead of 9)' % (self.productD.qty_available))
#-----------------------------------------------------------------------
# Back order of Outgoing shipment
# ----------------------------------------------------------------------
back_order_out.do_prepare_partial()
back_order_out.do_transfer()
# Check stock location quants and available quantity for product A.
quants = self.StockQuantObj.search([('product_id', '=', self.productA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertGreaterEqual(float_round(sum(total_qty), precision_rounding=0.0001), 1, 'Total stock location quantity for product A should not be nagative.')
def test_10_pickings_transfer_with_different_uom(self):
""" Picking transfer with diffrent unit of meassure. """
# ----------------------------------------------------------------------
# Create incoming shipment of products DozA, SDozA, SDozARound, kgB, gB
# ----------------------------------------------------------------------
# DozA ( 10 Dozen ) , SDozA ( 10.5 SuperDozen )
# SDozARound ( 10.5 10.5 SuperDozenRound ) , kgB ( 0.020 kg )
# gB ( 525.3 g )
# ----------------------------------------------------------------------
picking_in_A = self.PickingObj.create({
'partner_id': self.partner_delta_id,
'picking_type_id': self.picking_type_in})
self.MoveObj.create({
'name': self.DozA.name,
'product_id': self.DozA.id,
'product_uom_qty': 10,
'product_uom': self.DozA.uom_id.id,
'picking_id': picking_in_A.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.SDozA.name,
'product_id': self.SDozA.id,
'product_uom_qty': 10.5,
'product_uom': self.SDozA.uom_id.id,
'picking_id': picking_in_A.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.SDozARound.name,
'product_id': self.SDozARound.id,
'product_uom_qty': 10.5,
'product_uom': self.SDozARound.uom_id.id,
'picking_id': picking_in_A.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.kgB.name,
'product_id': self.kgB.id,
'product_uom_qty': 0.020,
'product_uom': self.kgB.uom_id.id,
'picking_id': picking_in_A.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.gB.name,
'product_id': self.gB.id,
'product_uom_qty': 525.3,
'product_uom': self.gB.uom_id.id,
'picking_id': picking_in_A.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
# Check incoming shipment move lines state.
for move in picking_in_A.move_lines:
self.assertEqual(move.state, 'draft', 'Move state must be draft.')
# Confirm incoming shipment.
picking_in_A.action_confirm()
# Check incoming shipment move lines state.
for move in picking_in_A.move_lines:
self.assertEqual(move.state, 'assigned', 'Move state must be draft.')
picking_in_A.do_prepare_partial()
# ----------------------------------------------------
# Check pack operation quantity of incoming shipments.
# ----------------------------------------------------
PackSdozAround = self.StockPackObj.search([('product_id', '=', self.SDozARound.id), ('picking_id', '=', picking_in_A.id)], limit=1)
self.assertEqual(PackSdozAround.product_qty, 11, 'Wrong quantity in pack operation (%s found instead of 11)' % (PackSdozAround.product_qty))
picking_in_A.do_transfer()
#-----------------------------------------------------------------------
# Check stock location quant quantity and quantity available
#-----------------------------------------------------------------------
# Check quants and available quantity for product DozA
quants = self.StockQuantObj.search([('product_id', '=', self.DozA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 10, 'Expecting 10 Dozen , got %.4f Dozen on location stock!' % (sum(total_qty)))
self.assertEqual(self.DozA.qty_available, 10, 'Wrong quantity available (%s found instead of 10)' % (self.DozA.qty_available))
# Check quants and available quantity for product SDozA
quants = self.StockQuantObj.search([('product_id', '=', self.SDozA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 10.5, 'Expecting 10.5 SDozen , got %.4f SDozen on location stock!' % (sum(total_qty)))
self.assertEqual(self.SDozA.qty_available, 10.5, 'Wrong quantity available (%s found instead of 10.5)' % (self.SDozA.qty_available))
# Check quants and available quantity for product SDozARound
quants = self.StockQuantObj.search([('product_id', '=', self.SDozARound.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 11, 'Expecting 11 SDozenRound , got %.4f SDozenRound on location stock!' % (sum(total_qty)))
self.assertEqual(self.SDozARound.qty_available, 11, 'Wrong quantity available (%s found instead of 11)' % (self.SDozARound.qty_available))
# Check quants and available quantity for product gB
quants = self.StockQuantObj.search([('product_id', '=', self.gB.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 525.3, 'Expecting 525.3 gram , got %.4f gram on location stock!' % (sum(total_qty)))
self.assertEqual(self.gB.qty_available, 525.3, 'Wrong quantity available (%s found instead of 525.3' % (self.gB.qty_available))
# Check quants and available quantity for product kgB
quants = self.StockQuantObj.search([('product_id', '=', self.kgB.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 0.020, 'Expecting 0.020 kg , got %.4f kg on location stock!' % (sum(total_qty)))
self.assertEqual(self.kgB.qty_available, 0.020, 'Wrong quantity available (%s found instead of 0.020)' % (self.kgB.qty_available))
# ----------------------------------------------------------------------
# Create Incoming Shipment B
# ----------------------------------------------------------------------
picking_in_B = self.PickingObj.create({
'partner_id': self.partner_delta_id,
'picking_type_id': self.picking_type_in})
self.MoveObj.create({
'name': self.DozA.name,
'product_id': self.DozA.id,
'product_uom_qty': 120,
'product_uom': self.uom_unit.id,
'picking_id': picking_in_B.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.SDozA.name,
'product_id': self.SDozA.id,
'product_uom_qty': 1512,
'product_uom': self.uom_unit.id,
'picking_id': picking_in_B.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.SDozARound.name,
'product_id': self.SDozARound.id,
'product_uom_qty': 1584,
'product_uom': self.uom_unit.id,
'picking_id': picking_in_B.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.kgB.name,
'product_id': self.kgB.id,
'product_uom_qty': 20.0,
'product_uom': self.uom_gm.id,
'picking_id': picking_in_B.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.gB.name,
'product_id': self.gB.id,
'product_uom_qty': 0.525,
'product_uom': self.uom_kg.id,
'picking_id': picking_in_B.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
# Check incoming shipment move lines state.
for move in picking_in_B.move_lines:
self.assertEqual(move.state, 'draft', 'Wrong state of move line.')
# Confirm incoming shipment.
picking_in_B.action_confirm()
# Check incoming shipment move lines state.
for move in picking_in_B.move_lines:
self.assertEqual(move.state, 'assigned', 'Wrong state of move line.')
picking_in_B.do_prepare_partial()
# ----------------------------------------------------------------------
# Check product quantity and unit of measure of pack operaation.
# ----------------------------------------------------------------------
# Check pack operation quantity and unit of measure for product DozA.
PackdozA = self.StockPackObj.search([('product_id', '=', self.DozA.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(PackdozA.product_qty, 120, 'Wrong quantity in pack operation (%s found instead of 120)' % (PackdozA.product_qty))
self.assertEqual(PackdozA.product_uom_id.id, self.uom_unit.id, 'Wrong uom in pack operation for product DozA.')
# Check pack operation quantity and unit of measure for product SDozA.
PackSdozA = self.StockPackObj.search([('product_id', '=', self.SDozA.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(PackSdozA.product_qty, 1512, 'Wrong quantity in pack operation (%s found instead of 1512)' % (PackSdozA.product_qty))
self.assertEqual(PackSdozA.product_uom_id.id, self.uom_unit.id, 'Wrong uom in pack operation for product SDozA.')
# Check pack operation quantity and unit of measure for product SDozARound.
PackSdozAround = self.StockPackObj.search([('product_id', '=', self.SDozARound.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(PackSdozAround.product_qty, 1584, 'Wrong quantity in pack operation (%s found instead of 1584)' % (PackSdozAround.product_qty))
self.assertEqual(PackSdozAround.product_uom_id.id, self.uom_unit.id, 'Wrong uom in pack operation for product SDozARound.')
# Check pack operation quantity and unit of measure for product gB.
packgB = self.StockPackObj.search([('product_id', '=', self.gB.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(packgB.product_qty, 525, 'Wrong quantity in pack operation (%s found instead of 525)' % (packgB.product_qty))
self.assertEqual(packgB.product_uom_id.id, self.uom_gm.id, 'Wrong uom in pack operation for product gB.')
# Check pack operation quantity and unit of measure for product kgB.
packkgB = self.StockPackObj.search([('product_id', '=', self.kgB.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(packkgB.product_qty, 20.0, 'Wrong quantity in pack operation (%s found instead of 20)' % (packkgB.product_qty))
self.assertEqual(packkgB.product_uom_id.id, self.uom_gm.id, 'Wrong uom in pack operation for product kgB')
# ----------------------------------------------------------------------
# Replace pack operation of incoming shipment.
# ----------------------------------------------------------------------
self.StockPackObj.search([('product_id', '=', self.kgB.id), ('picking_id', '=', picking_in_B.id)]).write({
'product_qty': 0.020, 'product_uom_id': self.uom_kg.id})
self.StockPackObj.search([('product_id', '=', self.gB.id), ('picking_id', '=', picking_in_B.id)]).write({
'product_qty': 525.3, 'product_uom_id': self.uom_gm.id})
self.StockPackObj.search([('product_id', '=', self.DozA.id), ('picking_id', '=', picking_in_B.id)]).write({
'product_qty': 4, 'product_uom_id': self.uom_dozen.id})
self.StockPackObj.create({
'product_id': self.DozA.id,
'product_qty': 48,
'product_uom_id': self.uom_unit.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location,
'picking_id': picking_in_B.id})
# Transfer product.
# -----------------
picking_in_B.do_transfer()
#-----------------------------------------------------------------------
# Check incoming shipment
#-----------------------------------------------------------------------
# Check incoming shipment state.
self.assertEqual(picking_in_B.state, 'done', 'Incoming shipment state should be done.')
# Check incoming shipment move lines state.
for move in picking_in_B.move_lines:
self.assertEqual(move.state, 'done', 'Wrong state of move line.')
# Check total done move lines for incoming shipment.
self.assertEqual(len(picking_in_B.move_lines), 6, 'Wrong number of move lines')
# Check product DozA done quantity.
moves_DozA = self.MoveObj.search([('product_id', '=', self.DozA.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(moves_DozA.product_uom_qty, 96, 'Wrong move quantity (%s found instead of 96)' % (moves_DozA.product_uom_qty))
self.assertEqual(moves_DozA.product_uom.id, self.uom_unit.id, 'Wrong uom in move for product DozA.')
# Check product SDozA done quantity.
moves_SDozA = self.MoveObj.search([('product_id', '=', self.SDozA.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(moves_SDozA.product_uom_qty, 1512, 'Wrong move quantity (%s found instead of 1512)' % (moves_SDozA.product_uom_qty))
self.assertEqual(moves_SDozA.product_uom.id, self.uom_unit.id, 'Wrong uom in move for product SDozA.')
# Check product SDozARound done quantity.
moves_SDozARound = self.MoveObj.search([('product_id', '=', self.SDozARound.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(moves_SDozARound.product_uom_qty, 1584, 'Wrong move quantity (%s found instead of 1584)' % (moves_SDozARound.product_uom_qty))
self.assertEqual(moves_SDozARound.product_uom.id, self.uom_unit.id, 'Wrong uom in move for product SDozARound.')
# Check product kgB done quantity.
moves_kgB = self.MoveObj.search([('product_id', '=', self.kgB.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(moves_kgB.product_uom_qty, 20, 'Wrong quantity in move (%s found instead of 20)' % (moves_kgB.product_uom_qty))
self.assertEqual(moves_kgB.product_uom.id, self.uom_gm.id, 'Wrong uom in move for product kgB.')
# Check two moves created for product gB with quantity (0.525 kg and 0.3 g)
moves_gB_kg = self.MoveObj.search([('product_id', '=', self.gB.id), ('picking_id', '=', picking_in_B.id), ('product_uom', '=', self.uom_kg.id)], limit=1)
self.assertEqual(moves_gB_kg.product_uom_qty, 0.525, 'Wrong move quantity (%s found instead of 0.525)' % (moves_gB_kg.product_uom_qty))
self.assertEqual(moves_gB_kg.product_uom.id, self.uom_kg.id, 'Wrong uom in move for product gB.')
moves_gB_g = self.MoveObj.search([('product_id', '=', self.gB.id), ('picking_id', '=', picking_in_B.id), ('product_uom', '=', self.uom_gm.id)], limit=1)
self.assertEqual(moves_gB_g.product_uom_qty, 0.3, 'Wrong move quantity (%s found instead of 0.3)' % (moves_gB_g.product_uom_qty))
self.assertEqual(moves_gB_g.product_uom.id, self.uom_gm.id, 'Wrong uom in move for product gB.')
# ----------------------------------------------------------------------
# Check Back order of Incoming shipment.
# ----------------------------------------------------------------------
# Check back order created or not.
bo_in_B = self.PickingObj.search([('backorder_id', '=', picking_in_B.id)])
self.assertEqual(len(bo_in_B), 1, 'Back order should be created.')
# Check total move lines of back order.
self.assertEqual(len(bo_in_B.move_lines), 1, 'Wrong number of move lines')
# Check back order created with correct quantity and uom or not.
moves_DozA = self.MoveObj.search([('product_id', '=', self.DozA.id), ('picking_id', '=', bo_in_B.id)], limit=1)
self.assertEqual(moves_DozA.product_uom_qty, 24.0, 'Wrong move quantity (%s found instead of 0.525)' % (moves_DozA.product_uom_qty))
self.assertEqual(moves_DozA.product_uom.id, self.uom_unit.id, 'Wrong uom in move for product DozA.')
# ----------------------------------------------------------------------
# Check product stock location quantity and quantity available.
# ----------------------------------------------------------------------
# Check quants and available quantity for product DozA
quants = self.StockQuantObj.search([('product_id', '=', self.DozA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 18, 'Expecting 18 Dozen , got %.4f Dozen on location stock!' % (sum(total_qty)))
self.assertEqual(self.DozA.qty_available, 18, 'Wrong quantity available (%s found instead of 18)' % (self.DozA.qty_available))
# Check quants and available quantity for product SDozA
quants = self.StockQuantObj.search([('product_id', '=', self.SDozA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 21, 'Expecting 18 SDozen , got %.4f SDozen on location stock!' % (sum(total_qty)))
self.assertEqual(self.SDozA.qty_available, 21, 'Wrong quantity available (%s found instead of 21)' % (self.SDozA.qty_available))
# Check quants and available quantity for product SDozARound
quants = self.StockQuantObj.search([('product_id', '=', self.SDozARound.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 22, 'Expecting 22 SDozenRound , got %.4f SDozenRound on location stock!' % (sum(total_qty)))
self.assertEqual(self.SDozARound.qty_available, 22, 'Wrong quantity available (%s found instead of 22)' % (self.SDozARound.qty_available))
# Check quants and available quantity for product gB.
quants = self.StockQuantObj.search([('product_id', '=', self.gB.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 1050.6, 'Expecting 1050.6 Gram , got %.4f Gram on location stock!' % (sum(total_qty)))
self.assertEqual(self.gB.qty_available, 1050.6, 'Wrong quantity available (%s found instead of 1050.6)' % (self.gB.qty_available))
# Check quants and available quantity for product kgB.
quants = self.StockQuantObj.search([('product_id', '=', self.kgB.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 0.040, 'Expecting 0.040 kg , got %.4f kg on location stock!' % (sum(total_qty)))
self.assertEqual(self.kgB.qty_available, 0.040, 'Wrong quantity available (%s found instead of 0.040)' % (self.kgB.qty_available))
# ----------------------------------------------------------------------
# Create outgoing shipment.
# ----------------------------------------------------------------------
before_out_quantity = self.kgB.qty_available
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out})
self.MoveObj.create({
'name': self.kgB.name,
'product_id': self.kgB.id,
'product_uom_qty': 0.966,
'product_uom': self.uom_gm.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.kgB.name,
'product_id': self.kgB.id,
'product_uom_qty': 0.034,
'product_uom': self.uom_gm.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
picking_out.action_confirm()
picking_out.action_assign()
picking_out.do_prepare_partial()
picking_out.do_transfer()
# Check quantity difference after stock transfer.
quantity_diff = before_out_quantity - self.kgB.qty_available
self.assertEqual(float_round(quantity_diff, precision_rounding=0.0001), 0.001, 'Wrong quantity diffrence.')
self.assertEqual(self.kgB.qty_available, 0.039, 'Wrong quantity available (%s found instead of 0.039)' % (self.kgB.qty_available))
# ======================================================================
# Outgoing shipments.
# ======================================================================
# Create Outgoing shipment with ...
# product DozA ( 54 Unit ) , SDozA ( 288 Unit )
# product SDozRound ( 360 unit ) , product gB ( 0.503 kg )
# product kgB ( 19 g )
# ======================================================================
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out})
self.MoveObj.create({
'name': self.DozA.name,
'product_id': self.DozA.id,
'product_uom_qty': 54,
'product_uom': self.uom_unit.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.SDozA.name,
'product_id': self.SDozA.id,
'product_uom_qty': 288,
'product_uom': self.uom_unit.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.SDozARound.name,
'product_id': self.SDozARound.id,
'product_uom_qty': 360,
'product_uom': self.uom_unit.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.gB.name,
'product_id': self.gB.id,
'product_uom_qty': 0.503,
'product_uom': self.uom_kg.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.kgB.name,
'product_id': self.kgB.id,
'product_uom_qty': 20,
'product_uom': self.uom_gm.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
# Confirm outgoing shipment.
picking_out.action_confirm()
for move in picking_out.move_lines:
self.assertEqual(move.state, 'confirmed', 'Wrong state of move line.')
# Assing product to outgoing shipments
picking_out.action_assign()
for move in picking_out.move_lines:
self.assertEqual(move.state, 'assigned', 'Wrong state of move line.')
# Check product A available quantity
DozA_qty = self.MoveObj.search([('product_id', '=', self.DozA.id), ('picking_id', '=', picking_out.id)], limit=1).availability
self.assertEqual(DozA_qty, 4.5, 'Wrong move quantity availability (%s found instead of 4.5)' % (DozA_qty))
# Check product B available quantity
SDozA_qty = self.MoveObj.search([('product_id', '=', self.SDozA.id), ('picking_id', '=', picking_out.id)], limit=1).availability
self.assertEqual(SDozA_qty, 2, 'Wrong move quantity availability (%s found instead of 2)' % (SDozA_qty))
# Check product C available quantity
SDozARound_qty = self.MoveObj.search([('product_id', '=', self.SDozARound.id), ('picking_id', '=', picking_out.id)], limit=1).availability
self.assertEqual(SDozARound_qty, 3, 'Wrong move quantity availability (%s found instead of 3)' % (SDozARound_qty))
# Check product D available quantity
gB_qty = self.MoveObj.search([('product_id', '=', self.gB.id), ('picking_id', '=', picking_out.id)], limit=1).availability
self.assertEqual(gB_qty, 503, 'Wrong move quantity availability (%s found instead of 503)' % (gB_qty))
# Check product D available quantity
kgB_qty = self.MoveObj.search([('product_id', '=', self.kgB.id), ('picking_id', '=', picking_out.id)], limit=1).availability
self.assertEqual(kgB_qty, 0.020, 'Wrong move quantity availability (%s found instead of 0.020)' % (kgB_qty))
picking_out.action_confirm()
picking_out.action_assign()
picking_out.do_prepare_partial()
picking_out.do_transfer()
# ----------------------------------------------------------------------
# Check product stock location quantity and quantity available.
# ----------------------------------------------------------------------
# Check quants and available quantity for product DozA
quants = self.StockQuantObj.search([('product_id', '=', self.DozA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 13.5, 'Expecting 13.5 Dozen , got %.4f Dozen on location stock!' % (sum(total_qty)))
self.assertEqual(self.DozA.qty_available, 13.5, 'Wrong quantity available (%s found instead of 13.5)' % (self.DozA.qty_available))
# Check quants and available quantity for product SDozA
quants = self.StockQuantObj.search([('product_id', '=', self.SDozA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 19, 'Expecting 19 SDozen , got %.4f SDozen on location stock!' % (sum(total_qty)))
self.assertEqual(self.SDozA.qty_available, 19, 'Wrong quantity available (%s found instead of 19)' % (self.SDozA.qty_available))
# Check quants and available quantity for product SDozARound
quants = self.StockQuantObj.search([('product_id', '=', self.SDozARound.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 19, 'Expecting 19 SDozRound , got %.4f SDozRound on location stock!' % (sum(total_qty)))
self.assertEqual(self.SDozARound.qty_available, 19, 'Wrong quantity available (%s found instead of 19)' % (self.SDozARound.qty_available))
# Check quants and available quantity for product gB.
quants = self.StockQuantObj.search([('product_id', '=', self.gB.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(float_round(sum(total_qty), precision_rounding=0.0001), 547.6, 'Expecting 547.6 g , got %.4f g on location stock!' % (sum(total_qty)))
self.assertEqual(self.gB.qty_available, 547.6, 'Wrong quantity available (%s found instead of 547.6)' % (self.gB.qty_available))
# Check quants and available quantity for product kgB.
quants = self.StockQuantObj.search([('product_id', '=', self.kgB.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 0.019, 'Expecting 0.019 kg , got %.4f kg on location stock!' % (sum(total_qty)))
self.assertEqual(self.kgB.qty_available, 0.019, 'Wrong quantity available (%s found instead of 0.019)' % (self.kgB.qty_available))
# ----------------------------------------------------------------------
# Receipt back order of incoming shipment.
# ----------------------------------------------------------------------
bo_in_B.do_prepare_partial()
bo_in_B.do_transfer()
# Check quants and available quantity for product kgB.
quants = self.StockQuantObj.search([('product_id', '=', self.DozA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 15.5, 'Expecting 15.5 Dozen , got %.4f Dozen on location stock!' % (sum(total_qty)))
self.assertEqual(self.DozA.qty_available, 15.5, 'Wrong quantity available (%s found instead of 15.5)' % (self.DozA.qty_available))
# -----------------------------------------
# Create product in kg and receive in ton.
# -----------------------------------------
productKG = self.ProductObj.create({'name': 'Product KG', 'uom_id': self.uom_kg.id, 'uom_po_id': self.uom_kg.id})
picking_in = self.PickingObj.create({
'partner_id': self.partner_delta_id,
'picking_type_id': self.picking_type_in})
self.MoveObj.create({
'name': productKG.name,
'product_id': productKG.id,
'product_uom_qty': 1.0,
'product_uom': self.uom_tone.id,
'picking_id': picking_in.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
# Check incoming shipment state.
self.assertEqual(picking_in.state, 'draft', 'Incoming shipment state should be draft.')
# Check incoming shipment move lines state.
for move in picking_in.move_lines:
self.assertEqual(move.state, 'draft', 'Wrong state of move line.')
# Confirm incoming shipment.
picking_in.action_confirm()
# Check incoming shipment move lines state.
for move in picking_in.move_lines:
self.assertEqual(move.state, 'assigned', 'Wrong state of move line.')
picking_in.do_prepare_partial()
# Check pack operation quantity.
packKG = self.StockPackObj.search([('product_id', '=', productKG.id), ('picking_id', '=', picking_in.id)], limit=1)
self.assertEqual(packKG.product_qty, 1000, 'Wrong product quantity in pack operation (%s found instead of 1000)' % (packKG.product_qty))
self.assertEqual(packKG.product_uom_id.id, self.uom_kg.id, 'Wrong product uom in pack operation.')
# Transfer Incoming shipment.
picking_in.do_transfer()
#-----------------------------------------------------------------------
# Check incoming shipment after transfer.
#-----------------------------------------------------------------------
# Check incoming shipment state.
self.assertEqual(picking_in.state, 'done', 'Incoming shipment state should be done.')
# Check incoming shipment move lines state.
for move in picking_in.move_lines:
self.assertEqual(move.state, 'done', 'Wrong state of move lines.')
# Check total done move lines for incoming shipment.
self.assertEqual(len(picking_in.move_lines), 1, 'Wrong number of move lines')
# Check product DozA done quantity.
move = self.MoveObj.search([('product_id', '=', productKG.id), ('picking_id', '=', picking_in.id)], limit=1)
self.assertEqual(move.product_uom_qty, 1, 'Wrong product quantity in done move.')
self.assertEqual(move.product_uom.id, self.uom_tone.id, 'Wrong unit of measure in done move.')
self.assertEqual(productKG.qty_available, 1000, 'Wrong quantity available of product (%s found instead of 1000)' % (productKG.qty_available))
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out})
self.MoveObj.create({
'name': productKG.name,
'product_id': productKG.id,
'product_uom_qty': 2.5,
'product_uom': self.uom_gm.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
picking_out.action_confirm()
picking_out.action_assign()
picking_out.do_prepare_partial()
pack_opt = self.StockPackObj.search([('product_id', '=', productKG.id), ('picking_id', '=', picking_out.id)], limit=1)
pack_opt.write({'product_qty': 0.5})
picking_out.do_transfer()
quants = self.StockQuantObj.search([('product_id', '=', productKG.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
# Check total quantity stock location.
self.assertEqual(sum(total_qty), 999.9995, 'Expecting 999.9995 kg , got %.4f kg on location stock!' % (sum(total_qty)))
# Check Back order created or not.
#---------------------------------
bo_out_1 = self.PickingObj.search([('backorder_id', '=', picking_out.id)])
self.assertEqual(len(bo_out_1), 1, 'Back order should be created.')
# Check total move lines of back order.
self.assertEqual(len(bo_out_1.move_lines), 1, 'Wrong number of move lines')
moves_KG = self.MoveObj.search([('product_id', '=', productKG.id), ('picking_id', '=', bo_out_1.id)], limit=1)
# Check back order created with correct quantity and uom or not.
self.assertEqual(moves_KG.product_uom_qty, 2.0, 'Wrong move quantity (%s found instead of 2.0)' % (moves_KG.product_uom_qty))
self.assertEqual(moves_KG.product_uom.id, self.uom_gm.id, 'Wrong uom in move for product KG.')
bo_out_1.action_assign()
bo_out_1.do_prepare_partial()
pack_opt = self.StockPackObj.search([('product_id', '=', productKG.id), ('picking_id', '=', bo_out_1.id)], limit=1)
pack_opt.write({'product_qty': 0.5})
bo_out_1.do_transfer()
quants = self.StockQuantObj.search([('product_id', '=', productKG.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
# Check total quantity stock location.
self.assertEqual(sum(total_qty), 999.9990, 'Expecting 999.9990 kg , got %.4f kg on location stock!' % (sum(total_qty)))
# Check Back order created or not.
#---------------------------------
bo_out_2 = self.PickingObj.search([('backorder_id', '=', bo_out_1.id)])
self.assertEqual(len(bo_out_2), 1, 'Back order should be created.')
# Check total move lines of back order.
self.assertEqual(len(bo_out_2.move_lines), 1, 'Wrong number of move lines')
# Check back order created with correct move quantity and uom or not.
moves_KG = self.MoveObj.search([('product_id', '=', productKG.id), ('picking_id', '=', bo_out_2.id)], limit=1)
self.assertEqual(moves_KG.product_uom_qty, 1.5, 'Wrong move quantity (%s found instead of 1.5)' % (moves_KG.product_uom_qty))
self.assertEqual(moves_KG.product_uom.id, self.uom_gm.id, 'Wrong uom in move for product KG.')
bo_out_2.action_assign()
bo_out_2.do_prepare_partial()
pack_opt = self.StockPackObj.search([('product_id', '=', productKG.id), ('picking_id', '=', bo_out_2.id)], limit=1)
pack_opt.write({'product_qty': 0.5})
bo_out_2.do_transfer()
# Check total quantity stock location of product KG.
quants = self.StockQuantObj.search([('product_id', '=', productKG.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 999.9985, 'Expecting 999.9985 kg , got %.4f kg on location stock!' % (sum(total_qty)))
# Check Back order created or not.
#---------------------------------
bo_out_3 = self.PickingObj.search([('backorder_id', '=', bo_out_2.id)])
self.assertEqual(len(bo_out_3), 1, 'Back order should be created.')
# Check total move lines of back order.
self.assertEqual(len(bo_out_3.move_lines), 1, 'Wrong number of move lines')
# Check back order created with correct quantity and uom or not.
moves_KG = self.MoveObj.search([('product_id', '=', productKG.id), ('picking_id', '=', bo_out_3.id)], limit=1)
self.assertEqual(moves_KG.product_uom_qty, 1, 'Wrong move quantity (%s found instead of 1.0)' % (moves_KG.product_uom_qty))
self.assertEqual(moves_KG.product_uom.id, self.uom_gm.id, 'Wrong uom in move for product KG.')
bo_out_3.action_assign()
bo_out_3.do_prepare_partial()
pack_opt = self.StockPackObj.search([('product_id', '=', productKG.id), ('picking_id', '=', bo_out_3.id)], limit=1)
pack_opt.write({'product_qty': 0.5})
bo_out_3.do_transfer()
quants = self.StockQuantObj.search([('product_id', '=', productKG.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 999.9980, 'Expecting 999.9980 kg , got %.4f kg on location stock!' % (sum(total_qty)))
# Check Back order created or not.
#---------------------------------
bo_out_4 = self.PickingObj.search([('backorder_id', '=', bo_out_3.id)])
self.assertEqual(len(bo_out_4), 1, 'Back order should be created.')
# Check total move lines of back order.
self.assertEqual(len(bo_out_4.move_lines), 1, 'Wrong number of move lines')
# Check back order created with correct quantity and uom or not.
moves_KG = self.MoveObj.search([('product_id', '=', productKG.id), ('picking_id', '=', bo_out_4.id)], limit=1)
self.assertEqual(moves_KG.product_uom_qty, 0.5, 'Wrong move quantity (%s found instead of 0.5)' % (moves_KG.product_uom_qty))
self.assertEqual(moves_KG.product_uom.id, self.uom_gm.id, 'Wrong uom in move for product KG.')
bo_out_4.action_assign()
bo_out_4.do_prepare_partial()
pack_opt = self.StockPackObj.search([('product_id', '=', productKG.id), ('picking_id', '=', bo_out_4.id)], limit=1)
pack_opt.write({'product_qty': 0.5})
bo_out_4.do_transfer()
quants = self.StockQuantObj.search([('product_id', '=', productKG.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 999.9975, 'Expecting 999.9975 kg , got %.4f kg on location stock!' % (sum(total_qty)))
def test_20_create_inventory_with_different_uom(self):
"""Create inventory with different unit of measure."""
# ------------------------------------------------
# Test inventory with product A(Unit).
# ------------------------------------------------
inventory = self.InvObj.create({'name': 'Test',
'product_id': self.UnitA.id,
'filter': 'product'})
inventory.prepare_inventory()
self.assertFalse(inventory.line_ids, "Inventory line should not created.")
inventory_line = self.InvLineObj.create({
'inventory_id': inventory.id,
'product_id': self.UnitA.id,
'product_uom_id': self.uom_dozen.id,
'product_qty': 10,
'location_id': self.stock_location})
inventory.action_done()
# Check quantity available of product UnitA.
quants = self.StockQuantObj.search([('product_id', '=', self.UnitA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 120, 'Expecting 120 Units , got %.4f Units on location stock!' % (sum(total_qty)))
self.assertEqual(self.UnitA.qty_available, 120, 'Expecting 120 Units , got %.4f Units of quantity available!' % (self.UnitA.qty_available))
# Create Inventory again for product UnitA.
inventory = self.InvObj.create({'name': 'Test',
'product_id': self.UnitA.id,
'filter': 'product'})
inventory.prepare_inventory()
self.assertEqual(len(inventory.line_ids), 1, "One inventory line should be created.")
inventory_line = self.InvLineObj.search([('product_id', '=', self.UnitA.id), ('inventory_id', '=', inventory.id)], limit=1)
self.assertEqual(inventory_line.product_qty, 120, "Wrong product quantity in inventory line.")
# Modify the inventory line and set the quantity to 144 product on this new inventory.
inventory_line.write({'product_qty': 144})
inventory.action_done()
move = self.MoveObj.search([('product_id', '=', self.UnitA.id), ('inventory_id', '=', inventory.id)], limit=1)
self.assertEqual(move.product_uom_qty, 24, "Wrong move quantity of product UnitA.")
# Check quantity available of product UnitA.
quants = self.StockQuantObj.search([('product_id', '=', self.UnitA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 144, 'Expecting 144 Units , got %.4f Units on location stock!' % (sum(total_qty)))
self.assertEqual(self.UnitA.qty_available, 144, 'Expecting 144 Units , got %.4f Units of quantity available!' % (self.UnitA.qty_available))
# ------------------------------------------------
# Test inventory with product KG.
# ------------------------------------------------
productKG = self.ProductObj.create({'name': 'Product KG', 'uom_id': self.uom_kg.id, 'uom_po_id': self.uom_kg.id})
inventory = self.InvObj.create({'name': 'Inventory Product KG',
'product_id': productKG.id,
'filter': 'product'})
inventory.prepare_inventory()
self.assertFalse(inventory.line_ids, "Inventory line should not created.")
inventory_line = self.InvLineObj.create({
'inventory_id': inventory.id,
'product_id': productKG.id,
'product_uom_id': self.uom_tone.id,
'product_qty': 5,
'location_id': self.stock_location})
inventory.action_done()
quants = self.StockQuantObj.search([('product_id', '=', productKG.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 5000, 'Expecting 5000 kg , got %.4f kg on location stock!' % (sum(total_qty)))
self.assertEqual(productKG.qty_available, 5000, 'Expecting 5000 kg , got %.4f kg of quantity available!' % (productKG.qty_available))
# Create Inventory again.
inventory = self.InvObj.create({'name': 'Test',
'product_id': productKG.id,
'filter': 'product'})
inventory.prepare_inventory()
self.assertEqual(len(inventory.line_ids), 1, "One inventory line should be created.")
inventory_line = self.InvLineObj.search([('product_id', '=', productKG.id), ('inventory_id', '=', inventory.id)], limit=1)
self.assertEqual(inventory_line.product_qty, 5000, "Wrong product quantity in inventory line.")
# Modify the inventory line and set the quantity to 4000 product on this new inventory.
inventory_line.write({'product_qty': 4000})
inventory.action_done()
# Check inventory move quantity of product KG.
move = self.MoveObj.search([('product_id', '=', productKG.id), ('inventory_id', '=', inventory.id)], limit=1)
self.assertEqual(move.product_uom_qty, 1000, "Wrong move quantity of product KG.")
# Check quantity available of product KG.
quants = self.StockQuantObj.search([('product_id', '=', productKG.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.qty for quant in quants]
self.assertEqual(sum(total_qty), 4000, 'Expecting 4000 kg , got %.4f on location stock!' % (sum(total_qty)))
self.assertEqual(productKG.qty_available, 4000, 'Expecting 4000 kg , got %.4f of quantity available!' % (productKG.qty_available))
#--------------------------------------------------------
# TEST PARTIAL INVENTORY WITH PACKS and LOTS
#---------------------------------------------------------
packproduct = self.ProductObj.create({'name': 'Pack Product', 'uom_id': self.uom_unit.id, 'uom_po_id': self.uom_unit.id})
lotproduct = self.ProductObj.create({'name': 'Lot Product', 'uom_id': self.uom_unit.id, 'uom_po_id': self.uom_unit.id})
inventory = self.InvObj.create({'name': 'Test Partial and Pack',
'filter': 'partial',
'location_id': self.stock_location})
inventory.prepare_inventory()
pack_obj = self.env['stock.quant.package']
lot_obj = self.env['stock.production.lot']
pack1 = pack_obj.create({'name': 'PACK00TEST1'})
pack2 = pack_obj.create({'name': 'PACK00TEST2'})
lot1 = lot_obj.create({'name': 'Lot001', 'product_id': lotproduct.id})
move = self.MoveObj.search([('product_id', '=', productKG.id), ('inventory_id', '=', inventory.id)], limit=1)
self.assertEqual(len(move), 0, "Partial filter should not create a lines upon prepare")
line_vals = []
line_vals += [{'location_id': self.stock_location, 'product_id': packproduct.id, 'product_qty': 10, 'product_uom_id': packproduct.uom_id.id}]
line_vals += [{'location_id': self.stock_location, 'product_id': packproduct.id, 'product_qty': 20, 'product_uom_id': packproduct.uom_id.id, 'package_id': pack1.id}]
line_vals += [{'location_id': self.stock_location, 'product_id': lotproduct.id, 'product_qty': 30, 'product_uom_id': lotproduct.uom_id.id, 'prod_lot_id': lot1.id}]
line_vals += [{'location_id': self.stock_location, 'product_id': lotproduct.id, 'product_qty': 25, 'product_uom_id': lotproduct.uom_id.id, 'prod_lot_id': False}]
inventory.write({'line_ids': [(0, 0, x) for x in line_vals]})
inventory.action_done()
self.assertEqual(packproduct.qty_available, 30, "Wrong qty available for packproduct")
self.assertEqual(lotproduct.qty_available, 55, "Wrong qty available for lotproduct")
quants = self.StockQuantObj.search([('product_id', '=', packproduct.id), ('location_id', '=', self.stock_location), ('package_id', '=', pack1.id)])
total_qty = sum([quant.qty for quant in quants])
self.assertEqual(total_qty, 20, 'Expecting 20 units on package 1 of packproduct, but we got %.4f on location stock!' % (total_qty))
#Create an inventory that will put the lots without lot to 0 and check that taking without pack will not take it from the pack
inventory2 = self.InvObj.create({'name': 'Test Partial Lot and Pack2',
'filter': 'partial',
'location_id': self.stock_location})
inventory2.prepare_inventory()
line_vals = []
line_vals += [{'location_id': self.stock_location, 'product_id': packproduct.id, 'product_qty': 20, 'product_uom_id': packproduct.uom_id.id}]
line_vals += [{'location_id': self.stock_location, 'product_id': lotproduct.id, 'product_qty': 0, 'product_uom_id': lotproduct.uom_id.id, 'prod_lot_id': False}]
line_vals += [{'location_id': self.stock_location, 'product_id': lotproduct.id, 'product_qty': 10, 'product_uom_id': lotproduct.uom_id.id, 'prod_lot_id': lot1.id}]
inventory2.write({'line_ids': [(0, 0, x) for x in line_vals]})
inventory2.action_done()
self.assertEqual(packproduct.qty_available, 40, "Wrong qty available for packproduct")
self.assertEqual(lotproduct.qty_available, 10, "Wrong qty available for lotproduct")
quants = self.StockQuantObj.search([('product_id', '=', lotproduct.id), ('location_id', '=', self.stock_location), ('lot_id', '=', lot1.id)])
total_qty = sum([quant.qty for quant in quants])
self.assertEqual(total_qty, 10, 'Expecting 0 units lot of lotproduct, but we got %.4f on location stock!' % (total_qty))
quants = self.StockQuantObj.search([('product_id', '=', lotproduct.id), ('location_id', '=', self.stock_location), ('lot_id', '=', False)])
total_qty = sum([quant.qty for quant in quants])
self.assertEqual(total_qty, 0, 'Expecting 0 units lot of lotproduct, but we got %.4f on location stock!' % (total_qty)) |
DirtyUnicorns/android_kernel_lge_msm8974 | refs/heads/n7x-caf | scripts/gcc-wrapper.py | 181 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:62",
"hci_conn.c:407",
"cpufreq_interactive.c:804",
"cpufreq_interactive.c:847",
"ene_ub6250.c:2118",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
|
QGuLL/samba | refs/heads/master | third_party/waf/wafadmin/Configure.py | 32 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2008 (ita)
"""
Configuration system
A configuration instance is created when "waf configure" is called, it is used to:
* create data dictionaries (Environment instances)
* store the list of modules to import
The old model (copied from Scons) was to store logic (mapping file extensions to functions)
along with the data. In Waf a way was found to separate that logic by adding an indirection
layer (storing the names in the Environment instances)
In the new model, the logic is more object-oriented, and the user scripts provide the
logic. The data files (Environments) must contain configuration data only (flags, ..).
Note: the c/c++ related code is in the module config_c
"""
import os, shlex, sys, time
try: import cPickle
except ImportError: import pickle as cPickle
import Environment, Utils, Options, Logs
from Logs import warn
from Constants import *
try:
from urllib import request
except:
from urllib import urlopen
else:
urlopen = request.urlopen
conf_template = '''# project %(app)s configured on %(now)s by
# waf %(wafver)s (abi %(abi)s, python %(pyver)x on %(systype)s)
# using %(args)s
#
'''
class ConfigurationError(Utils.WscriptError):
pass
autoconfig = False
"reconfigure the project automatically"
def find_file(filename, path_list):
"""find a file in a list of paths
@param filename: name of the file to search for
@param path_list: list of directories to search
@return: the first occurrence filename or '' if filename could not be found
"""
for directory in Utils.to_list(path_list):
if os.path.exists(os.path.join(directory, filename)):
return directory
return ''
def find_program_impl(env, filename, path_list=[], var=None, environ=None):
"""find a program in folders path_lst, and sets env[var]
@param env: environment
@param filename: name of the program to search for
@param path_list: list of directories to search for filename
@param var: environment value to be checked for in env or os.environ
@return: either the value that is referenced with [var] in env or os.environ
or the first occurrence filename or '' if filename could not be found
"""
if not environ:
environ = os.environ
try: path_list = path_list.split()
except AttributeError: pass
if var:
if env[var]: return env[var]
if var in environ: env[var] = environ[var]
if not path_list: path_list = environ.get('PATH', '').split(os.pathsep)
ext = (Options.platform == 'win32') and '.exe,.com,.bat,.cmd' or ''
for y in [filename+x for x in ext.split(',')]:
for directory in path_list:
x = os.path.join(directory, y)
if os.path.isfile(x):
if var: env[var] = x
return x
return ''
class ConfigurationContext(Utils.Context):
tests = {}
error_handlers = []
def __init__(self, env=None, blddir='', srcdir=''):
self.env = None
self.envname = ''
self.environ = dict(os.environ)
self.line_just = 40
self.blddir = blddir
self.srcdir = srcdir
self.all_envs = {}
# curdir: necessary for recursion
self.cwd = self.curdir = os.getcwd()
self.tools = [] # tools loaded in the configuration, and that will be loaded when building
self.setenv(DEFAULT)
self.lastprog = ''
self.hash = 0
self.files = []
self.tool_cache = []
if self.blddir:
self.post_init()
def post_init(self):
self.cachedir = os.path.join(self.blddir, CACHE_DIR)
path = os.path.join(self.blddir, WAF_CONFIG_LOG)
try: os.unlink(path)
except (OSError, IOError): pass
try:
self.log = open(path, 'w')
except (OSError, IOError):
self.fatal('could not open %r for writing' % path)
app = Utils.g_module.APPNAME
if app:
ver = getattr(Utils.g_module, 'VERSION', '')
if ver:
app = "%s (%s)" % (app, ver)
now = time.ctime()
pyver = sys.hexversion
systype = sys.platform
args = " ".join(sys.argv)
wafver = WAFVERSION
abi = ABI
self.log.write(conf_template % vars())
def __del__(self):
"""cleanup function: close config.log"""
# may be ran by the gc, not always after initialization
if hasattr(self, 'log') and self.log:
self.log.close()
def fatal(self, msg):
raise ConfigurationError(msg)
def check_tool(self, input, tooldir=None, funs=None):
"load a waf tool"
tools = Utils.to_list(input)
if tooldir: tooldir = Utils.to_list(tooldir)
for tool in tools:
tool = tool.replace('++', 'xx')
if tool == 'java': tool = 'javaw'
if tool.lower() == 'unittest': tool = 'unittestw'
# avoid loading the same tool more than once with the same functions
# used by composite projects
mag = (tool, id(self.env), funs)
if mag in self.tool_cache:
continue
self.tool_cache.append(mag)
module = None
try:
module = Utils.load_tool(tool, tooldir)
except Exception, e:
ex = e
if Options.options.download:
_3rdparty = os.path.normpath(Options.tooldir[0] + os.sep + '..' + os.sep + '3rdparty')
# try to download the tool from the repository then
# the default is set to false
for x in Utils.to_list(Options.remote_repo):
for sub in ['branches/waf-%s/wafadmin/3rdparty' % WAFVERSION, 'trunk/wafadmin/3rdparty']:
url = '/'.join((x, sub, tool + '.py'))
try:
web = urlopen(url)
if web.getcode() != 200:
continue
except Exception, e:
# on python3 urlopen throws an exception
continue
else:
loc = None
try:
loc = open(_3rdparty + os.sep + tool + '.py', 'wb')
loc.write(web.read())
web.close()
finally:
if loc:
loc.close()
Logs.warn('downloaded %s from %s' % (tool, url))
try:
module = Utils.load_tool(tool, tooldir)
except:
Logs.warn('module %s from %s is unusable' % (tool, url))
try:
os.unlink(_3rdparty + os.sep + tool + '.py')
except:
pass
continue
else:
break
if not module:
Logs.error('Could not load the tool %r or download a suitable replacement from the repository (sys.path %r)\n%s' % (tool, sys.path, e))
raise ex
else:
Logs.error('Could not load the tool %r in %r (try the --download option?):\n%s' % (tool, sys.path, e))
raise ex
if funs is not None:
self.eval_rules(funs)
else:
func = getattr(module, 'detect', None)
if func:
if type(func) is type(find_file): func(self)
else: self.eval_rules(func)
self.tools.append({'tool':tool, 'tooldir':tooldir, 'funs':funs})
def sub_config(self, k):
"executes the configure function of a wscript module"
self.recurse(k, name='configure')
def pre_recurse(self, name_or_mod, path, nexdir):
return {'conf': self, 'ctx': self}
def post_recurse(self, name_or_mod, path, nexdir):
if not autoconfig:
return
self.hash = hash((self.hash, getattr(name_or_mod, 'waf_hash_val', name_or_mod)))
self.files.append(path)
def store(self, file=''):
"save the config results into the cache file"
if not os.path.isdir(self.cachedir):
os.makedirs(self.cachedir)
if not file:
file = open(os.path.join(self.cachedir, 'build.config.py'), 'w')
file.write('version = 0x%x\n' % HEXVERSION)
file.write('tools = %r\n' % self.tools)
file.close()
if not self.all_envs:
self.fatal('nothing to store in the configuration context!')
for key in self.all_envs:
tmpenv = self.all_envs[key]
tmpenv.store(os.path.join(self.cachedir, key + CACHE_SUFFIX))
def set_env_name(self, name, env):
"add a new environment called name"
self.all_envs[name] = env
return env
def retrieve(self, name, fromenv=None):
"retrieve an environment called name"
try:
env = self.all_envs[name]
except KeyError:
env = Environment.Environment()
env['PREFIX'] = os.path.abspath(os.path.expanduser(Options.options.prefix))
self.all_envs[name] = env
else:
if fromenv: warn("The environment %s may have been configured already" % name)
return env
def setenv(self, name):
"enable the environment called name"
self.env = self.retrieve(name)
self.envname = name
def add_os_flags(self, var, dest=None):
# do not use 'get' to make certain the variable is not defined
try: self.env.append_value(dest or var, Utils.to_list(self.environ[var]))
except KeyError: pass
def check_message_1(self, sr):
self.line_just = max(self.line_just, len(sr))
for x in ('\n', self.line_just * '-', '\n', sr, '\n'):
self.log.write(x)
Utils.pprint('NORMAL', "%s :" % sr.ljust(self.line_just), sep='')
def check_message_2(self, sr, color='GREEN'):
self.log.write(sr)
self.log.write('\n')
Utils.pprint(color, sr)
def check_message(self, th, msg, state, option=''):
sr = 'Checking for %s %s' % (th, msg)
self.check_message_1(sr)
p = self.check_message_2
if state: p('ok ' + str(option))
else: p('not found', 'YELLOW')
# FIXME remove in waf 1.6
# the parameter 'option' is not used (kept for compatibility)
def check_message_custom(self, th, msg, custom, option='', color='PINK'):
sr = 'Checking for %s %s' % (th, msg)
self.check_message_1(sr)
self.check_message_2(custom, color)
def msg(self, msg, result, color=None):
"""Prints a configuration message 'Checking for xxx: ok'"""
self.start_msg('Checking for ' + msg)
if not isinstance(color, str):
color = result and 'GREEN' or 'YELLOW'
self.end_msg(result, color)
def start_msg(self, msg):
try:
if self.in_msg:
return
except:
self.in_msg = 0
self.in_msg += 1
self.line_just = max(self.line_just, len(msg))
for x in ('\n', self.line_just * '-', '\n', msg, '\n'):
self.log.write(x)
Utils.pprint('NORMAL', "%s :" % msg.ljust(self.line_just), sep='')
def end_msg(self, result, color):
self.in_msg -= 1
if self.in_msg:
return
if not color:
color = 'GREEN'
if result == True:
msg = 'ok'
elif result == False:
msg = 'not found'
color = 'YELLOW'
else:
msg = str(result)
self.log.write(msg)
self.log.write('\n')
Utils.pprint(color, msg)
def find_program(self, filename, path_list=[], var=None, mandatory=False):
"wrapper that adds a configuration message"
ret = None
if var:
if self.env[var]:
ret = self.env[var]
elif var in os.environ:
ret = os.environ[var]
if not isinstance(filename, list): filename = [filename]
if not ret:
for x in filename:
ret = find_program_impl(self.env, x, path_list, var, environ=self.environ)
if ret: break
self.check_message_1('Checking for program %s' % ' or '.join(filename))
self.log.write(' find program=%r paths=%r var=%r\n -> %r\n' % (filename, path_list, var, ret))
if ret:
Utils.pprint('GREEN', str(ret))
else:
Utils.pprint('YELLOW', 'not found')
if mandatory:
self.fatal('The program %r is required' % filename)
if var:
self.env[var] = ret
return ret
def cmd_to_list(self, cmd):
"commands may be written in pseudo shell like 'ccache g++'"
if isinstance(cmd, str) and cmd.find(' '):
try:
os.stat(cmd)
except OSError:
return shlex.split(cmd)
else:
return [cmd]
return cmd
def __getattr__(self, name):
r = self.__class__.__dict__.get(name, None)
if r: return r
if name and name.startswith('require_'):
for k in ['check_', 'find_']:
n = name.replace('require_', k)
ret = self.__class__.__dict__.get(n, None)
if ret:
def run(*k, **kw):
r = ret(self, *k, **kw)
if not r:
self.fatal('requirement failure')
return r
return run
self.fatal('No such method %r' % name)
def eval_rules(self, rules):
self.rules = Utils.to_list(rules)
for x in self.rules:
f = getattr(self, x)
if not f: self.fatal("No such method '%s'." % x)
try:
f()
except Exception, e:
ret = self.err_handler(x, e)
if ret == BREAK:
break
elif ret == CONTINUE:
continue
else:
self.fatal(e)
def err_handler(self, fun, error):
pass
def conf(f):
"decorator: attach new configuration functions"
setattr(ConfigurationContext, f.__name__, f)
return f
def conftest(f):
"decorator: attach new configuration tests (registered as strings)"
ConfigurationContext.tests[f.__name__] = f
return conf(f)
|
junalmeida/Sick-Beard | refs/heads/master | lib/subliminal/converters/addic7ed.py | 32 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from babelfish import LanguageReverseConverter, language_converters
class Addic7edConverter(LanguageReverseConverter):
def __init__(self):
self.name_converter = language_converters['name']
self.from_addic7ed = {'Català': ('cat',), 'Chinese (Simplified)': ('zho',), 'Chinese (Traditional)': ('zho',),
'Euskera': ('eus',), 'Galego': ('glg',), 'Greek': ('ell',), 'Malay': ('msa',),
'Portuguese (Brazilian)': ('por', 'BR'), 'Serbian (Cyrillic)': ('srp', None, 'Cyrl'),
'Serbian (Latin)': ('srp',), 'Spanish (Latin America)': ('spa',),
'Spanish (Spain)': ('spa',)}
self.to_addic7ed = {('cat',): 'Català', ('zho',): 'Chinese (Simplified)', ('eus',): 'Euskera',
('glg',): 'Galego', ('ell',): 'Greek', ('msa',): 'Malay',
('por', 'BR'): 'Portuguese (Brazilian)', ('srp', None, 'Cyrl'): 'Serbian (Cyrillic)'}
self.codes = self.name_converter.codes | set(self.from_addic7ed.keys())
def convert(self, alpha3, country=None, script=None):
if (alpha3, country, script) in self.to_addic7ed:
return self.to_addic7ed[(alpha3, country, script)]
if (alpha3, country) in self.to_addic7ed:
return self.to_addic7ed[(alpha3, country)]
if (alpha3,) in self.to_addic7ed:
return self.to_addic7ed[(alpha3,)]
return self.name_converter.convert(alpha3, country, script)
def reverse(self, addic7ed):
if addic7ed in self.from_addic7ed:
return self.from_addic7ed[addic7ed]
return self.name_converter.reverse(addic7ed)
|
noironetworks/nova | refs/heads/master | nova/api/openstack/versioned_method.py | 97 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class VersionedMethod(object):
def __init__(self, name, start_version, end_version, func):
"""Versioning information for a single method
@name: Name of the method
@start_version: Minimum acceptable version
@end_version: Maximum acceptable_version
@func: Method to call
Minimum and maximums are inclusive
"""
self.name = name
self.start_version = start_version
self.end_version = end_version
self.func = func
def __str__(self):
return ("Version Method %s: min: %s, max: %s"
% (self.name, self.start_version, self.end_version))
|
kobotoolbox/dkobo | refs/heads/master | fabfile.py | 1 | import os
import sys
import json
import re
import requests
from fabric.api import local, hosts, cd, env, prefix, run, sudo, settings, hide
def kobo_workon(venv_name):
return prefix('kobo_workon %s' % venv_name)
DEPLOYMENTS = {}
IMPORTED_DEPLOYMENTS = {}
deployments_file = os.environ.get('DEPLOYMENTS_JSON', 'deployments.json')
if os.path.exists(deployments_file):
with open(deployments_file, 'r') as f:
IMPORTED_DEPLOYMENTS = json.load(f)
def exit_with_error(message):
print message
sys.exit(1)
def check_key_filename(deployment_configs):
if 'key_filename' in deployment_configs and \
not os.path.exists(deployment_configs['key_filename']):
# Maybe the path contains a ~; try expanding that before failing
deployment_configs['key_filename'] = os.path.expanduser(
deployment_configs['key_filename']
)
if not os.path.exists(deployment_configs['key_filename']):
exit_with_error("Cannot find required permissions file: %s" %
deployment_configs['key_filename'])
def setup_env(deployment_name):
deployment = DEPLOYMENTS.get(deployment_name, {})
if 'shared' in IMPORTED_DEPLOYMENTS:
deployment.update(IMPORTED_DEPLOYMENTS['shared'])
if deployment_name in IMPORTED_DEPLOYMENTS:
deployment.update(IMPORTED_DEPLOYMENTS[deployment_name])
env.update(deployment)
check_key_filename(deployment)
env.virtualenv = os.path.join('/home', 'ubuntu', '.virtualenvs',
env.kf_virtualenv_name, 'bin', 'activate')
env.uwsgi_pidfile = os.path.join('/home', 'ubuntu', 'pids',
'kobo-uwsgi-master.pid')
env.kf_path = os.path.join(env.home, env.kf_path)
env.pip_requirements_file = os.path.join(env.kf_path,
'requirements.txt')
def deploy_ref(deployment_name, ref):
setup_env(deployment_name)
with cd(env.kf_path):
run("git fetch origin")
# Make sure we're not moving to an older codebase
git_output = run('git rev-list {}..HEAD --count 2>&1'.format(ref))
if int(git_output) > 0:
raise Exception("The server's HEAD is already in front of the "
"commit to be deployed.")
# We want to check out a specific commit, but this does leave the HEAD
# detached. Perhaps consider using `git reset`.
run('git checkout {}'.format(ref))
# Report if the working directory is unclean.
git_output = run('git status --porcelain')
if len(git_output):
run('git status')
print('WARNING: The working directory is unclean. See above.')
run('find . -name "*.pyc" -exec rm -rf {} \;')
run('find . -type d -empty -delete')
with kobo_workon(env.kf_virtualenv_name):
run("pip install --upgrade 'pip>=7.0' pip-tools")
run("pip-sync '%s'" % env.pip_requirements_file)
with cd(env.kf_path):
run("npm install")
run("bower install")
run("grunt build_all")
run("python manage.py syncdb")
run("python manage.py migrate")
# Figure out whether django-compressor is enabled before trying to
# run it. Trying to run it blindly when it's disabled will halt
# Fabric with a non-zero exit code
with settings(
hide('warnings', 'running', 'stdout'), warn_only=True):
result = run(
'echo "import sys; from django.conf import settings; '
'sys.exit(100 if settings.COMPRESS_ENABLED else 99)" | '
'python'
)
if result.return_code == 100:
run("python manage.py compress")
elif result.return_code == 99:
print 'COMPRESS_ENABLED is False. Skipping compress.'
else:
raise Exception(
'Failed to read COMPRESS_ENABLED from Django settings.')
run("python manage.py collectstatic --noinput")
run("sudo service uwsgi reload")
def deploy(deployment_name, branch='master'):
deploy_ref(deployment_name, 'origin/{}'.format(branch))
def repopulate_summary_field(deployment_name):
setup_env(deployment_name)
with cd(env.kf_path):
with kobo_workon(env.kf_virtualenv_name):
run("python manage.py populate_summary_field")
def deploy_passing(deployment_name, branch='master'):
''' Deploy the latest code on the given branch that's
been marked passing by Travis CI. '''
print 'Asking Travis CI for the hash of the latest passing commit...'
desired_commit = get_last_successfully_built_commit(branch)
print 'Found passing commit {} for branch {}!'.format(desired_commit,
branch)
deploy_ref(deployment_name, desired_commit)
def get_last_successfully_built_commit(branch):
''' Returns the hash of the latest successfully built commit
on the given branch according to Travis CI. '''
API_ENDPOINT='https://api.travis-ci.org/'
REPO_SLUG='kobotoolbox/dkobo'
COMMON_HEADERS={'accept': 'application/vnd.travis-ci.2+json'}
''' Travis only lets us specify `number`, `after_number`, and `event_type`.
It'd be great to filter by state and branch, but it seems we can't
(http://docs.travis-ci.com/api/?http#builds). '''
request = requests.get(
'{}repos/{}/builds'.format(API_ENDPOINT, REPO_SLUG),
headers=COMMON_HEADERS
)
if request.status_code != 200:
raise Exception('Travis returned unexpected code {}.'.format(
request.status_code
))
response = json.loads(request.text)
builds = response['builds']
commits = {commit['id']: commit for commit in response['commits']}
for build in builds:
if build['state'] != 'passed' or build['pull_request']:
# No interest in non-passing builds or PRs
continue
commit = commits[build['commit_id']]
if commit['branch'] == branch:
# Assumes the builds are in descending chronological order
if re.match('^[0-9a-f]+$', commit['sha']) is None:
raise Exception('Travis returned the invalid SHA {}.'.format(
commit['sha']))
return commit['sha']
raise Exception("Couldn't find a passing build for the branch {}. "
"This could be due to pagination, in which case this code "
"must be made more robust!".format(branch))
|
kennedyshead/home-assistant | refs/heads/dev | tests/components/sigfox/test_sensor.py | 14 | """Tests for the sigfox sensor."""
import re
import requests_mock
from homeassistant.components.sigfox.sensor import (
API_URL,
CONF_API_LOGIN,
CONF_API_PASSWORD,
)
from homeassistant.setup import async_setup_component
TEST_API_LOGIN = "foo"
TEST_API_PASSWORD = "ebcd1234"
VALID_CONFIG = {
"sensor": {
"platform": "sigfox",
CONF_API_LOGIN: TEST_API_LOGIN,
CONF_API_PASSWORD: TEST_API_PASSWORD,
}
}
VALID_MESSAGE = """
{"data":[{
"time":1521879720,
"data":"7061796c6f6164",
"rinfos":[{"lat":"0.0","lng":"0.0"}],
"snr":"50.0"}]}
"""
async def test_invalid_credentials(hass):
"""Test for invalid credentials."""
with requests_mock.Mocker() as mock_req:
url = re.compile(API_URL + "devicetypes")
mock_req.get(url, text="{}", status_code=401)
assert await async_setup_component(hass, "sensor", VALID_CONFIG)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 0
async def test_valid_credentials(hass):
"""Test for valid credentials."""
with requests_mock.Mocker() as mock_req:
url1 = re.compile(API_URL + "devicetypes")
mock_req.get(url1, text='{"data":[{"id":"fake_type"}]}', status_code=200)
url2 = re.compile(API_URL + "devicetypes/fake_type/devices")
mock_req.get(url2, text='{"data":[{"id":"fake_id"}]}')
url3 = re.compile(API_URL + "devices/fake_id/messages*")
mock_req.get(url3, text=VALID_MESSAGE)
assert await async_setup_component(hass, "sensor", VALID_CONFIG)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 1
state = hass.states.get("sensor.sigfox_fake_id")
assert state.state == "payload"
assert state.attributes.get("snr") == "50.0"
|
devcline/mtasa-blue | refs/heads/master | vendor/google-breakpad/src/tools/gyp/test/mac/gyptest-framework-headers.py | 344 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that mac_framework_headers works properly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
# TODO(thakis): Make this work with ninja, make. http://crbug.com/129013
test = TestGyp.TestGyp(formats=['xcode'])
CHDIR = 'framework-headers'
test.run_gyp('test.gyp', chdir=CHDIR)
# Test that headers are installed for frameworks
test.build('test.gyp', 'test_framework_headers_framework', chdir=CHDIR)
test.built_file_must_exist(
'TestFramework.framework/Versions/A/TestFramework', chdir=CHDIR)
test.built_file_must_exist(
'TestFramework.framework/Versions/A/Headers/myframework.h', chdir=CHDIR)
# Test that headers are installed for static libraries.
test.build('test.gyp', 'test_framework_headers_static', chdir=CHDIR)
test.built_file_must_exist('libTestLibrary.a', chdir=CHDIR)
test.built_file_must_exist('include/myframework.h', chdir=CHDIR)
test.pass_test()
|
thomasdouenne/openfisca-france-indirect-taxation | refs/heads/master | openfisca_france_indirect_taxation/tests/test_depenses_caburants_ht.py | 4 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 10 18:40:27 2016
@author: thomas.douenne
"""
from __future__ import division
from openfisca_france_indirect_taxation.examples.utils_example import simulate
simulated_variables = [
'pondmen',
'decuc',
'depenses_diesel_ht',
'depenses_diesel_htva',
'diesel_ticpe',
'depenses_diesel',
'depenses_diesel_recalculees'
]
# Merge des deux listes
for year in [2000]:
# Constition d'une base de données agrégée par décile (= collapse en stata)
df = simulate(simulated_variables = simulated_variables, year = year)
df['check_diesel_ht'] = df['depenses_diesel_ht'] - (df['depenses_diesel_htva'] - df['diesel_ticpe'])
assert (df['check_diesel_ht'] == 0).any()
df['check_diesel_recalcule'] = df['depenses_diesel'] - df['depenses_diesel_recalculees']
assert (df['check_diesel_recalcule'] == 0).any()
|
Komzpa/GroundHog | refs/heads/master | groundhog/utils/utils.py | 16 | """
Utility functions
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import random
import string
import copy as pycopy
import theano
import theano.tensor as TT
def print_time(secs):
if secs < 120.:
return '%6.3f sec' % secs
elif secs <= 60 * 60:
return '%6.3f min' % (secs / 60.)
else:
return '%6.3f h ' % (secs / 3600.)
def print_mem(context=None):
if theano.sandbox.cuda.cuda_enabled:
rvals = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray.mem_info()
# Avaliable memory in Mb
available = float(rvals[0]) / 1024. / 1024.
# Total memory in Mb
total = float(rvals[1]) / 1024. / 1024.
if context == None:
print ('Used %.3f Mb Free %.3f Mb, total %.3f Mb' %
(total - available, available, total))
else:
info = str(context)
print (('GPU status : Used %.3f Mb Free %.3f Mb,'
'total %.3f Mb [context %s]') %
(total - available, available, total, info))
def const(value):
return TT.constant(numpy.asarray(value, dtype=theano.config.floatX))
def as_floatX(variable):
"""
This code is taken from pylearn2:
Casts a given variable into dtype config.floatX
numpy ndarrays will remain numpy ndarrays
python floats will become 0-D ndarrays
all other types will be treated as theano tensors
"""
if isinstance(variable, float):
return numpy.cast[theano.config.floatX](variable)
if isinstance(variable, numpy.ndarray):
return numpy.cast[theano.config.floatX](variable)
return theano.tensor.cast(variable, theano.config.floatX)
def copy(x):
new_x = pycopy.copy(x)
new_x.params = [x for x in new_x.params]
new_x.params_grad_scale = [x for x in new_x.params_grad_scale ]
new_x.noise_params = [x for x in new_x.noise_params ]
new_x.noise_params_shape_fn = [x for x in new_x.noise_params_shape_fn]
new_x.updates = [x for x in new_x.updates ]
new_x.additional_gradients = [x for x in new_x.additional_gradients ]
new_x.inputs = [x for x in new_x.inputs ]
new_x.schedules = [x for x in new_x.schedules ]
new_x.properties = [x for x in new_x.properties ]
return new_x
def softmax(x):
if x.ndim == 2:
e = TT.exp(x)
return e / TT.sum(e, axis=1).dimshuffle(0, 'x')
else:
e = TT.exp(x)
return e/ TT.sum(e)
def sample_zeros(sizeX, sizeY, sparsity, scale, rng):
return numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
def sample_weights(sizeX, sizeY, sparsity, scale, rng):
"""
Initialization that fixes the largest singular value.
"""
sizeX = int(sizeX)
sizeY = int(sizeY)
sparsity = numpy.minimum(sizeY, sparsity)
values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
for dx in xrange(sizeX):
perm = rng.permutation(sizeY)
new_vals = rng.uniform(low=-scale, high=scale, size=(sparsity,))
vals_norm = numpy.sqrt((new_vals**2).sum())
new_vals = scale*new_vals/vals_norm
values[dx, perm[:sparsity]] = new_vals
_,v,_ = numpy.linalg.svd(values)
values = scale * values/v[0]
return values.astype(theano.config.floatX)
def sample_weights_classic(sizeX, sizeY, sparsity, scale, rng):
sizeX = int(sizeX)
sizeY = int(sizeY)
if sparsity < 0:
sparsity = sizeY
else:
sparsity = numpy.minimum(sizeY, sparsity)
sparsity = numpy.minimum(sizeY, sparsity)
values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
for dx in xrange(sizeX):
perm = rng.permutation(sizeY)
new_vals = rng.normal(loc=0, scale=scale, size=(sparsity,))
values[dx, perm[:sparsity]] = new_vals
return values.astype(theano.config.floatX)
def sample_weights_orth(sizeX, sizeY, sparsity, scale, rng):
sizeX = int(sizeX)
sizeY = int(sizeY)
assert sizeX == sizeY, 'for orthogonal init, sizeX == sizeY'
if sparsity < 0:
sparsity = sizeY
else:
sparsity = numpy.minimum(sizeY, sparsity)
values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
for dx in xrange(sizeX):
perm = rng.permutation(sizeY)
new_vals = rng.normal(loc=0, scale=scale, size=(sparsity,))
values[dx, perm[:sparsity]] = new_vals
u,s,v = numpy.linalg.svd(values)
values = u * scale
return values.astype(theano.config.floatX)
def init_bias(size, scale, rng):
return numpy.ones((size,), dtype=theano.config.floatX)*scale
def id_generator(size=5, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for i in xrange(size))
def constant_shape(shape):
return lambda *args, **kwargs : shape
def binVec2Int(binVec):
add = lambda x,y: x+y
return reduce(add,
[int(x) * 2 ** y
for x, y in zip(
list(binVec),range(len(binVec) - 1, -1,
-1))])
def Int2binVec(val, nbits=10):
strVal = '{0:b}'.format(val)
value = numpy.zeros((nbits,), dtype=theano.config.floatX)
if theano.config.floatX == 'float32':
value[:len(strVal)] = [numpy.float32(x) for x in strVal[::-1]]
else:
value[:len(strVal)] = [numpy.float64(x) for x in strVal[::-1]]
return value
def dot(inp, matrix):
"""
Decide the right type of dot product depending on the input
arguments
"""
if 'int' in inp.dtype and inp.ndim==2:
return matrix[inp.flatten()]
elif 'int' in inp.dtype:
return matrix[inp]
elif 'float' in inp.dtype and inp.ndim == 3:
shape0 = inp.shape[0]
shape1 = inp.shape[1]
shape2 = inp.shape[2]
return TT.dot(inp.reshape((shape0*shape1, shape2)), matrix)
else:
return TT.dot(inp, matrix)
def dbg_hook(hook, x):
if not isinstance(x, TT.TensorVariable):
x.out = theano.printing.Print(global_fn=hook)(x.out)
return x
else:
return theano.printing.Print(global_fn=hook)(x)
|
zidootech/zidoo-kodi-14.2 | refs/heads/master | lib/gtest/test/gtest_xml_outfiles_test.py | 2526 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
|
yiliaofan/faker | refs/heads/master | faker/providers/profile/__init__.py | 12 | # coding=utf-8
from .. import BaseProvider
import itertools
class Provider(BaseProvider):
"""
This provider is a collection of functions to generate personal profiles and identities.
"""
def simple_profile(self):
"""
Generates a basic profile with personal informations
"""
return {
"username": self.generator.user_name(),
"name": self.generator.name(),
"sex": self.random_element(["M", "F"]),
"address": self.generator.address(),
"mail": self.generator.free_email(),
#"password":self.generator.password()
"birthdate": self.generator.date(),
}
def profile(self, fields=None):
"""
Generates a complete profile.
If "fields" is not empty, only the fields in the list will be returned
"""
if fields is None:
fields = []
d = {
"job": self.generator.job(),
"company": self.generator.company(),
"ssn": self.generator.ssn(),
"residence": self.generator.address(),
"current_location": (self.generator.latitude(), self.generator.longitude()),
"blood_group": "".join(self.random_element(list(itertools.product(["A", "B", "AB", "0"], ["+", "-"])))),
"website": [self.generator.url() for i in range(1, self.random_int(2, 5))]
}
d = dict(d, **self.generator.simple_profile())
#field selection
if len(fields) > 0:
d = dict((k, v) for (k, v) in d.items() if k in fields)
return d
|
TigorC/zulip | refs/heads/master | zerver/management/commands/process_queue.py | 32 | from __future__ import absolute_import
from types import FrameType
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from django.core.management import CommandError
from django.conf import settings
from django.utils import autoreload
from zerver.worker.queue_processors import get_worker, get_active_worker_queues
import sys
import signal
import logging
import threading
class Command(BaseCommand):
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('--queue_name', metavar='<queue name>', type=str,
help="queue to process")
parser.add_argument('--worker_num', metavar='<worker number>', type=int, nargs='?', default=0,
help="worker label")
parser.add_argument('--all', dest="all", action="store_true", default=False,
help="run all queues")
help = "Runs a queue processing worker"
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
logging.basicConfig()
logger = logging.getLogger('process_queue')
if not settings.USING_RABBITMQ:
# Make the warning silent when running the tests
if settings.TEST_SUITE:
logger.info("Not using RabbitMQ queue workers in the test suite.")
else:
logger.error("Cannot run a queue processor when USING_RABBITMQ is False!")
sys.exit(1)
def run_threaded_workers(logger):
# type: (logging.Logger) -> None
cnt = 0
for queue_name in get_active_worker_queues():
if not settings.DEVELOPMENT:
logger.info('launching queue worker thread ' + queue_name)
cnt += 1
td = Threaded_worker(queue_name)
td.start()
logger.info('%d queue worker threads were launched' % (cnt,))
if options['all']:
autoreload.main(run_threaded_workers, (logger,))
else:
queue_name = options['queue_name']
worker_num = options['worker_num']
logger.info("Worker %d connecting to queue %s" % (worker_num, queue_name))
worker = get_worker(queue_name)
worker.setup()
def signal_handler(signal, frame):
# type: (int, FrameType) -> None
logger.info("Worker %d disconnecting from queue %s" % (worker_num, queue_name))
worker.stop()
sys.exit(0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
worker.start()
class Threaded_worker(threading.Thread):
def __init__(self, queue_name):
# type: (str) -> None
threading.Thread.__init__(self)
self.worker = get_worker(queue_name)
def run(self):
# type: () -> None
self.worker.setup()
logging.debug('starting consuming ' + self.worker.queue_name)
self.worker.start()
|
kernevil/samba | refs/heads/master | third_party/pep8/testsuite/E71.py | 40 | #: E711
if res == None:
pass
#: E711
if res != None:
pass
#: E711
if None == res:
pass
#: E711
if None != res:
pass
#
#: E712
if res == True:
pass
#: E712
if res != False:
pass
#: E712
if True != res:
pass
#: E712
if False == res:
pass
#
#: E713
if not X in Y:
pass
#: E713
if not X.B in Y:
pass
#: E713
if not X in Y and Z == "zero":
pass
#: E713
if X == "zero" or not Y in Z:
pass
#
#: E714
if not X is Y:
pass
#: E714
if not X.B is Y:
pass
#
#: Okay
if x not in y:
pass
if not (X in Y or X is Z):
pass
if not (X in Y):
pass
if x is not y:
pass
if TrueElement.get_element(True) == TrueElement.get_element(False):
pass
if (True) == TrueElement or x == TrueElement:
pass
assert (not foo) in bar
assert {'x': not foo} in bar
assert [42, not foo] in bar
#:
|
40023256/2015cd_midterm- | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/logging/config.py | 739 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, logging, logging.handlers, socket, struct, traceback, re
import io
try:
import _thread as thread
import threading
except ImportError: #pragma: no cover
thread = None
from socketserver import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
if sys.platform == "win32":
RESET_ERROR = 10054 #WSAECONNRESET
else:
RESET_ERROR = 104 #ECONNRESET
#
# The following code implements a socket listener for on-the-fly
# reconfiguration of logging.
#
# _listener holds the server object doing the listening
_listener = None
def fileConfig(fname, defaults=None, disable_existing_loggers=True):
"""
Read the logging configuration from a ConfigParser-format file.
This can be called several times from an application, allowing an end user
the ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
"""
import configparser
cp = configparser.ConfigParser(defaults)
if hasattr(fname, 'readline'):
cp.read_file(fname)
else:
cp.read(fname)
formatters = _create_formatters(cp)
# critical section
logging._acquireLock()
try:
logging._handlers.clear()
del logging._handlerList[:]
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers, disable_existing_loggers)
finally:
logging._releaseLock()
def _resolve(name):
"""Resolve a dotted name to a global object."""
name = name.split('.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def _strip_spaces(alist):
return map(lambda x: x.strip(), alist)
def _create_formatters(cp):
"""Create and return formatters"""
flist = cp["formatters"]["keys"]
if not len(flist):
return {}
flist = flist.split(",")
flist = _strip_spaces(flist)
formatters = {}
for form in flist:
sectname = "formatter_%s" % form
fs = cp.get(sectname, "format", raw=True, fallback=None)
dfs = cp.get(sectname, "datefmt", raw=True, fallback=None)
c = logging.Formatter
class_name = cp[sectname].get("class")
if class_name:
c = _resolve(class_name)
f = c(fs, dfs)
formatters[form] = f
return formatters
def _install_handlers(cp, formatters):
"""Install and return handlers"""
hlist = cp["handlers"]["keys"]
if not len(hlist):
return {}
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
handlers = {}
fixups = [] #for inter-handler references
for hand in hlist:
section = cp["handler_%s" % hand]
klass = section["class"]
fmt = section.get("formatter", "")
try:
klass = eval(klass, vars(logging))
except (AttributeError, NameError):
klass = _resolve(klass)
args = section["args"]
args = eval(args, vars(logging))
h = klass(*args)
if "level" in section:
level = section["level"]
h.setLevel(logging._levelNames[level])
if len(fmt):
h.setFormatter(formatters[fmt])
if issubclass(klass, logging.handlers.MemoryHandler):
target = section.get("target", "")
if len(target): #the target handler may not be loaded yet, so keep for later...
fixups.append((h, target))
handlers[hand] = h
#now all handlers are loaded, fixup inter-handler references...
for h, t in fixups:
h.setTarget(handlers[t])
return handlers
def _handle_existing_loggers(existing, child_loggers, disable_existing):
"""
When (re)configuring logging, handle loggers which were in the previous
configuration but are not in the new configuration. There's no point
deleting them as other threads may continue to hold references to them;
and by disabling them, you stop them doing any logging.
However, don't disable children of named loggers, as that's probably not
what was intended by the user. Also, allow existing loggers to NOT be
disabled if disable_existing is false.
"""
root = logging.root
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
else:
logger.disabled = disable_existing
def _install_loggers(cp, handlers, disable_existing):
"""Create and install loggers"""
# configure the root first
llist = cp["loggers"]["keys"]
llist = llist.split(",")
llist = list(map(lambda x: x.strip(), llist))
llist.remove("root")
section = cp["logger_root"]
root = logging.root
log = root
if "level" in section:
level = section["level"]
log.setLevel(logging._levelNames[level])
for h in root.handlers[:]:
root.removeHandler(h)
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
log.addHandler(handlers[hand])
#and now the others...
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
for log in llist:
section = cp["logger_%s" % log]
qn = section["qualname"]
propagate = section.getint("propagate", fallback=1)
logger = logging.getLogger(qn)
if qn in existing:
i = existing.index(qn) + 1 # start with the entry after qn
prefixed = qn + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(qn)
if "level" in section:
level = section["level"]
logger.setLevel(logging._levelNames[level])
for h in logger.handlers[:]:
logger.removeHandler(h)
logger.propagate = propagate
logger.disabled = 0
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
logger.addHandler(handlers[hand])
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = 1
# elif disable_existing_loggers:
# logger.disabled = 1
_handle_existing_loggers(existing, child_loggers, disable_existing)
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, str): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(logging._checkLevel(level))
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except Exception as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except Exception as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
deferred = []
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
if 'target not configured yet' in str(e):
deferred.append(name)
else:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Now do any that were deferred
for name in deferred:
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name) + 1 # look after name
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = True
# elif disable_existing:
# logger.disabled = True
_handle_existing_loggers(existing, child_loggers,
disable_existing)
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
style = config.get('style', '%')
result = logging.Formatter(fmt, dfmt, style)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except Exception as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
config_copy = dict(config) # for restoring in case of error
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except Exception as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
factory = c
else:
cname = config.pop('class')
klass = self.resolve(cname)
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
th = self.config['handlers'][config['target']]
if not isinstance(th, logging.Handler):
config.update(config_copy) # restore for deferred cfg
raise TypeError('target not configured yet')
config['target'] = th
except Exception as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(logging._checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except Exception as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(logging._checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
"""
Start up a socket server on the specified port, and listen for new
configurations.
These will be sent as a file suitable for processing by fileConfig().
Returns a Thread object on which you can call start() to start the server,
and which you can join() when appropriate. To stop the server, call
stopListening().
"""
if not thread: #pragma: no cover
raise NotImplementedError("listen() needs threading to work")
class ConfigStreamHandler(StreamRequestHandler):
"""
Handler for a logging configuration request.
It expects a completely new logging configuration and uses fileConfig
to install it.
"""
def handle(self):
"""
Handle a request.
Each request is expected to be a 4-byte length, packed using
struct.pack(">L", n), followed by the config file.
Uses fileConfig() to do the grunt work.
"""
try:
conn = self.connection
chunk = conn.recv(4)
if len(chunk) == 4:
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
chunk = chunk.decode("utf-8")
try:
import json
d =json.loads(chunk)
assert isinstance(d, dict)
dictConfig(d)
except:
#Apply new configuration.
file = io.StringIO(chunk)
try:
fileConfig(file)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
traceback.print_exc()
if self.server.ready:
self.server.ready.set()
except socket.error as e:
if not isinstance(e.args, tuple):
raise
else:
errcode = e.args[0]
if errcode != RESET_ERROR:
raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
A simple TCP socket-based logging config receiver.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None, ready=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
logging._acquireLock()
self.abort = 0
logging._releaseLock()
self.timeout = 1
self.ready = ready
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
logging._acquireLock()
abort = self.abort
logging._releaseLock()
self.socket.close()
class Server(threading.Thread):
def __init__(self, rcvr, hdlr, port):
super(Server, self).__init__()
self.rcvr = rcvr
self.hdlr = hdlr
self.port = port
self.ready = threading.Event()
def run(self):
server = self.rcvr(port=self.port, handler=self.hdlr,
ready=self.ready)
if self.port == 0:
self.port = server.server_address[1]
self.ready.set()
global _listener
logging._acquireLock()
_listener = server
logging._releaseLock()
server.serve_until_stopped()
return Server(ConfigSocketReceiver, ConfigStreamHandler, port)
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock()
|
tealover/nova | refs/heads/master | nova/volume/__init__.py | 64 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_config.cfg
from oslo_utils import importutils
_volume_opts = [
oslo_config.cfg.StrOpt('volume_api_class',
default='nova.volume.cinder.API',
help='The full class name of the '
'volume API class to use'),
]
oslo_config.cfg.CONF.register_opts(_volume_opts)
def API():
volume_api_class = oslo_config.cfg.CONF.volume_api_class
cls = importutils.import_class(volume_api_class)
return cls()
|
w1r0x/ansible | refs/heads/devel | contrib/inventory/openvz.py | 86 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# openvz.py
#
# Copyright 2014 jordonr <jordon@beamsyn.net>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Inspired by libvirt_lxc.py inventory script
# https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py
#
# Groups are determined by the description field of openvz guests
# multiple groups can be separated by commas: webserver,dbserver
from subprocess import Popen,PIPE
import sys
import json
#List openvz hosts
vzhosts = ['vzhost1','vzhost2','vzhost3']
#Add openvz hosts to the inventory and Add "_meta" trick
inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}}
#default group, when description not defined
default_group = ['vzguest']
def get_guests():
#Loop through vzhosts
for h in vzhosts:
#SSH to vzhost and get the list of guests in json
pipe = Popen(['ssh', h,'vzlist','-j'], stdout=PIPE, universal_newlines=True)
#Load Json info of guests
json_data = json.loads(pipe.stdout.read())
#loop through guests
for j in json_data:
#Add information to host vars
inventory['_meta']['hostvars'][j['hostname']] = {'ctid': j['ctid'], 'veid': j['veid'], 'vpsid': j['vpsid'], 'private_path': j['private'], 'root_path': j['root'], 'ip': j['ip']}
#determine group from guest description
if j['description'] is not None:
groups = j['description'].split(",")
else:
groups = default_group
#add guest to inventory
for g in groups:
if g not in inventory:
inventory[g] = {'hosts': []}
inventory[g]['hosts'].append(j['hostname'])
return inventory
if len(sys.argv) == 2 and sys.argv[1] == '--list':
inv_json = get_guests()
print(json.dumps(inv_json, sort_keys=True))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({}))
else:
print("Need an argument, either --list or --host <host>")
|
antonve/s4-project-mooc | refs/heads/master | common/djangoapps/util/tests/test_json_request.py | 191 | """
Test for JsonResponse and JsonResponseBadRequest util classes.
"""
from django.http import HttpResponse, HttpResponseBadRequest
from util.json_request import JsonResponse, JsonResponseBadRequest
import json
import unittest
import mock
class JsonResponseTestCase(unittest.TestCase):
"""
A set of tests to make sure that JsonResponse Class works correctly.
"""
def test_empty(self):
resp = JsonResponse()
self.assertIsInstance(resp, HttpResponse)
self.assertEqual(resp.content, "")
self.assertEqual(resp.status_code, 204)
self.assertEqual(resp["content-type"], "application/json")
def test_empty_string(self):
resp = JsonResponse("")
self.assertIsInstance(resp, HttpResponse)
self.assertEqual(resp.content, "")
self.assertEqual(resp.status_code, 204)
self.assertEqual(resp["content-type"], "application/json")
def test_string(self):
resp = JsonResponse("foo")
self.assertEqual(resp.content, '"foo"')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp["content-type"], "application/json")
def test_dict(self):
obj = {"foo": "bar"}
resp = JsonResponse(obj)
compare = json.loads(resp.content)
self.assertEqual(obj, compare)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp["content-type"], "application/json")
def test_set_status_kwarg(self):
obj = {"error": "resource not found"}
resp = JsonResponse(obj, status=404)
compare = json.loads(resp.content)
self.assertEqual(obj, compare)
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp["content-type"], "application/json")
def test_set_status_arg(self):
obj = {"error": "resource not found"}
resp = JsonResponse(obj, 404)
compare = json.loads(resp.content)
self.assertEqual(obj, compare)
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp["content-type"], "application/json")
def test_encoder(self):
obj = [1, 2, 3]
encoder = object()
with mock.patch.object(json, "dumps", return_value="[1,2,3]") as dumps:
resp = JsonResponse(obj, encoder=encoder)
self.assertEqual(resp.status_code, 200)
compare = json.loads(resp.content)
self.assertEqual(obj, compare)
kwargs = dumps.call_args[1]
self.assertIs(kwargs["cls"], encoder)
class JsonResponseBadRequestTestCase(unittest.TestCase):
"""
A set of tests to make sure that the JsonResponseBadRequest wrapper class
works as intended.
"""
def test_empty(self):
resp = JsonResponseBadRequest()
self.assertIsInstance(resp, HttpResponseBadRequest)
self.assertEqual(resp.content, "")
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp["content-type"], "application/json")
def test_empty_string(self):
resp = JsonResponseBadRequest("")
self.assertIsInstance(resp, HttpResponse)
self.assertEqual(resp.content, "")
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp["content-type"], "application/json")
def test_dict(self):
obj = {"foo": "bar"}
resp = JsonResponseBadRequest(obj)
compare = json.loads(resp.content)
self.assertEqual(obj, compare)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp["content-type"], "application/json")
def test_set_status_kwarg(self):
obj = {"error": "resource not found"}
resp = JsonResponseBadRequest(obj, status=404)
compare = json.loads(resp.content)
self.assertEqual(obj, compare)
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp["content-type"], "application/json")
def test_set_status_arg(self):
obj = {"error": "resource not found"}
resp = JsonResponseBadRequest(obj, 404)
compare = json.loads(resp.content)
self.assertEqual(obj, compare)
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp["content-type"], "application/json")
def test_encoder(self):
obj = [1, 2, 3]
encoder = object()
with mock.patch.object(json, "dumps", return_value="[1,2,3]") as dumps:
resp = JsonResponseBadRequest(obj, encoder=encoder)
self.assertEqual(resp.status_code, 400)
compare = json.loads(resp.content)
self.assertEqual(obj, compare)
kwargs = dumps.call_args[1]
self.assertIs(kwargs["cls"], encoder)
|
Neamar/django | refs/heads/master | django/core/management/templates.py | 274 | import cgi
import errno
import mimetypes
import os
import posixpath
import re
import shutil
import stat
import sys
import tempfile
from os import path
import django
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import handle_extensions
from django.template import Context, Engine
from django.utils import archive, six
from django.utils.six.moves.urllib.request import urlretrieve
from django.utils.version import get_docs_version
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
class TemplateCommand(BaseCommand):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
:param style: A color style object (see django.core.management.color).
:param app_or_project: The string 'app' or 'project'.
:param name: The name of the application or project.
:param directory: The directory to which the template should be copied.
:param options: The additional variables passed to project or app templates
"""
requires_system_checks = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
# The supported URL schemes
url_schemes = ['http', 'https', 'ftp']
# Can't perform any active locale changes during this command, because
# setting might not be available at all.
leave_locale_alone = True
def add_arguments(self, parser):
parser.add_argument('name', help='Name of the application or project.')
parser.add_argument('directory', nargs='?', help='Optional destination directory')
parser.add_argument('--template',
help='The path or URL to load the template from.')
parser.add_argument('--extension', '-e', dest='extensions',
action='append', default=['py'],
help='The file extension(s) to render (default: "py"). '
'Separate multiple extensions with commas, or use '
'-e multiple times.')
parser.add_argument('--name', '-n', dest='files',
action='append', default=[],
help='The file name(s) to render. '
'Separate multiple extensions with commas, or use '
'-n multiple times.')
def handle(self, app_or_project, name, target=None, **options):
self.app_or_project = app_or_project
self.paths_to_remove = []
self.verbosity = options['verbosity']
self.validate_name(name, app_or_project)
# if some directory is given, make sure it's nicely expanded
if target is None:
top_dir = path.join(os.getcwd(), name)
try:
os.makedirs(top_dir)
except OSError as e:
if e.errno == errno.EEXIST:
message = "'%s' already exists" % top_dir
else:
message = e
raise CommandError(message)
else:
top_dir = os.path.abspath(path.expanduser(target))
if not os.path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please create it first." % top_dir)
extensions = tuple(handle_extensions(options['extensions']))
extra_files = []
for file in options['files']:
extra_files.extend(map(lambda x: x.strip(), file.split(',')))
if self.verbosity >= 2:
self.stdout.write("Rendering %s template files with "
"extensions: %s\n" %
(app_or_project, ', '.join(extensions)))
self.stdout.write("Rendering %s template files with "
"filenames: %s\n" %
(app_or_project, ', '.join(extra_files)))
base_name = '%s_name' % app_or_project
base_subdir = '%s_template' % app_or_project
base_directory = '%s_directory' % app_or_project
camel_case_name = 'camel_case_%s_name' % app_or_project
camel_case_value = ''.join(x for x in name.title() if x != '_')
context = Context(dict(options, **{
base_name: name,
base_directory: top_dir,
camel_case_name: camel_case_value,
'docs_version': get_docs_version(),
'django_version': django.__version__,
'unicode_literals': '' if six.PY3 else 'from __future__ import unicode_literals\n\n',
}), autoescape=False)
# Setup a stub settings environment for template rendering
from django.conf import settings
if not settings.configured:
settings.configure()
template_dir = self.handle_template(options['template'],
base_subdir)
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
path_rest = root[prefix_length:]
relative_dir = path_rest.replace(base_name, name)
if relative_dir:
target_dir = path.join(top_dir, relative_dir)
if not path.exists(target_dir):
os.mkdir(target_dir)
for dirname in dirs[:]:
if dirname.startswith('.') or dirname == '__pycache__':
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class')):
# Ignore some files as they cause various breakages.
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir,
filename.replace(base_name, name))
if path.exists(new_path):
raise CommandError("%s already exists, overlaying a "
"project or app into an existing "
"directory won't replace conflicting "
"files" % new_path)
# Only render the Python files, as we don't want to
# accidentally render Django templates files
with open(old_path, 'rb') as template_file:
content = template_file.read()
if filename.endswith(extensions) or filename in extra_files:
content = content.decode('utf-8')
template = Engine().from_string(content)
content = template.render(context)
content = content.encode('utf-8')
with open(new_path, 'wb') as new_file:
new_file.write(content)
if self.verbosity >= 2:
self.stdout.write("Creating %s\n" % new_path)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
if self.paths_to_remove:
if self.verbosity >= 2:
self.stdout.write("Cleaning up temporary files.\n")
for path_to_remove in self.paths_to_remove:
if path.isfile(path_to_remove):
os.remove(path_to_remove)
else:
shutil.rmtree(path_to_remove)
def handle_template(self, template, subdir):
"""
Determines where the app or project templates are.
Use django.__path__[0] as the default because we don't
know into which directory Django has been installed.
"""
if template is None:
return path.join(django.__path__[0], 'conf', subdir)
else:
if template.startswith('file://'):
template = template[7:]
expanded_template = path.expanduser(template)
expanded_template = path.normpath(expanded_template)
if path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
# downloads the file and returns the path
absolute_path = self.download(template)
else:
absolute_path = path.abspath(expanded_template)
if path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError("couldn't handle %s template %s." %
(self.app_or_project, template))
def validate_name(self, name, app_or_project):
if name is None:
raise CommandError("you must provide %s %s name" % (
"an" if app_or_project == "app" else "a", app_or_project))
# If it's not a valid directory name.
if not re.search(r'^[_a-zA-Z]\w*$', name):
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = 'make sure the name begins with a letter or underscore'
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." %
(name, app_or_project, message))
def download(self, url):
"""
Downloads the given URL and returns the file name.
"""
def cleanup_url(url):
tmp = url.rstrip('/')
filename = tmp.split('/')[-1]
if url.endswith('/'):
display_url = tmp + '/'
else:
display_url = url
return filename, display_url
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download')
self.paths_to_remove.append(tempdir)
filename, display_url = cleanup_url(url)
if self.verbosity >= 2:
self.stdout.write("Downloading %s\n" % display_url)
try:
the_path, info = urlretrieve(url, path.join(tempdir, filename))
except IOError as e:
raise CommandError("couldn't download URL %s to %s: %s" %
(url, filename, e))
used_name = the_path.split('/')[-1]
# Trying to get better name from response headers
content_disposition = info.get('content-disposition')
if content_disposition:
_, params = cgi.parse_header(content_disposition)
guessed_filename = params.get('filename') or used_name
else:
guessed_filename = used_name
# Falling back to content type guessing
ext = self.splitext(guessed_filename)[1]
content_type = info.get('content-type')
if not ext and content_type:
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
# Move the temporary file to a filename that has better
# chances of being recognized by the archive utils
if used_name != guessed_filename:
guessed_path = path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
# Giving up
return the_path
def splitext(self, the_path):
"""
Like os.path.splitext, but takes off .tar, too
"""
base, ext = posixpath.splitext(the_path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def extract(self, filename):
"""
Extracts the given file to a temporarily and returns
the path of the directory with the extracted content.
"""
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract')
self.paths_to_remove.append(tempdir)
if self.verbosity >= 2:
self.stdout.write("Extracting %s\n" % filename)
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, IOError) as e:
raise CommandError("couldn't extract file %s to %s: %s" %
(filename, tempdir, e))
def is_url(self, template):
"""
Returns True if the name looks like a URL
"""
if ':' not in template:
return False
scheme = template.split(':', 1)[0].lower()
return scheme in self.url_schemes
def make_writeable(self, filename):
"""
Make sure that the file is writeable.
Useful if our source is read-only.
"""
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
|
gurneyalex/sale-workflow | refs/heads/8.0 | __unported__/sale_delivery_term/__init__.py | 74 | # -*- coding: utf-8 -*-
#
#
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2012 Domsense srl (<http://www.domsense.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import sale
|
pombredanne/abby | refs/heads/master | src/oscar/models.py | 1 | '''
Models definitions
'''
from __future__ import with_statement
import os
#import sys
import datetime
from google.appengine.ext import db
#import logging
class Post(db.Model):
"""
Abstract class over post provided in the `posts` directory
@todo: handle tzinfo http://tinyurl.com/dleqec
@todo: releted_objects realy^realy wanna have it - reviews, docs, pics + external
@todo: load fields from html
- title
- subtitle
- abstract_html
- body_html
- tags
- category
"""
title = db.StringProperty(required=True)
subtitle = db.StringProperty()
slug = db.StringProperty(required=True)
abstract_html = db.StringProperty()
body_html = db.TextProperty(verbose_name="HTML post content")
date_created = db.DateTimeProperty(verbose_name='date created')
date_published = db.DateTimeProperty(auto_now=True,verbose_name='date published')
date_updated = db.DateTimeProperty(verbose_name='date updated')
# enable_comments = db.BooleanProperty(default=False)
# last_comment_date = db.DateTimeProperty(verbose_name='date of last comment', null=True)
# comment_count = db.IntegerProperty(default=0)
category = db.CategoryProperty()
tags = db.ListProperty(db.Category)
rateing = db.RatingProperty()
# releted_objects
def get_absolute_url(self):
return "/posts/%s/" % self.slug
@classmethod
def _get_file_meta(cls, file=None, file_path=None):
"""
Read all the useful meta-data about the file given by `file` object or `file_path`
"""
if not file and file_path:
file = open(file_path, 'r')
file_body = file.read()
meta = {
'title': file.name, # read title from html
'subtitle': 'dupa', # read from html
'slug': os.path.splitext(os.path.basename(file.name))[0],
'abstract_html': 'Abstract',
'body_html': file_body,
'tags': [db.Category('one tag'), db.Category('second tag')],
}
if file_path:
meta.update({
'date_created': datetime.datetime.fromtimestamp(os.path.getctime(file_path)),
'date_updated': datetime.datetime.fromtimestamp(os.path.getmtime(file_path)),
})
return meta
@classmethod
def load_from_file(cls, file=None, file_path=None):
"""
Stores file with its metadata to DB
"""
if not file:
file = open(file_path, 'r')
if not file_path:
file_path = file.name
with file:
file_meta = cls._get_file_meta(file, file_path=file_path)
cls_properties = dict([[p, file_meta.get(p, None)] for p in cls.properties()])
cls(key_name=file_path, **cls_properties).put()
@classmethod
def sync_with_path(cls, path, keep_deleted=True, with_file_ext=''):
"""
Recursively read `path` and call `cls.load_from_file` for each of the files found
Files that were earlier imported get replaced with newer ones.
New ones get added. Removed files are controlled by the `keep_deleted` parameter.
"""
files_updated = []
if os.path.isfile(path):
if path.endswith(with_file_ext):
cls.load_from_file(file_path=path)
files_updated.append(path)
return
for root, dirs, files in os.walk(path):
for file_name in files:
if file_name.endswith(with_file_ext):
file_path = os.path.join(root, file_name)
cls.load_from_file(file_path=file_path)
files_updated.append(file_path)
if not keep_deleted:
to_delete = [post for post in cls.all() if post.key().id_or_name() not in files_updated]
map(lambda p: p.delete(), to_delete) |
bluven/eonboard | refs/heads/master | eoncloud_web/biz/floating/admin.py | 4 | #coding=utf-8
from django.contrib import admin
from biz.floating.models import Floating
class FloatingAdmin(admin.ModelAdmin):
list_display = ("id", "ip", "status", "instance","user")
admin.site.register(Floating, FloatingAdmin)
|
googleapis/google-auth-library-python | refs/heads/master | google/auth/external_account.py | 1 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""External Account Credentials.
This module provides credentials that exchange workload identity pool external
credentials for Google access tokens. This facilitates accessing Google Cloud
Platform resources from on-prem and non-Google Cloud platforms (e.g. AWS,
Microsoft Azure, OIDC identity providers), using native credentials retrieved
from the current environment without the need to copy, save and manage
long-lived service account credentials.
Specifically, this is intended to use access tokens acquired using the GCP STS
token exchange endpoint following the `OAuth 2.0 Token Exchange`_ spec.
.. _OAuth 2.0 Token Exchange: https://tools.ietf.org/html/rfc8693
"""
import abc
import copy
import datetime
import json
import re
import six
from google.auth import _helpers
from google.auth import credentials
from google.auth import exceptions
from google.auth import impersonated_credentials
from google.oauth2 import sts
from google.oauth2 import utils
# External account JSON type identifier.
_EXTERNAL_ACCOUNT_JSON_TYPE = "external_account"
# The token exchange grant_type used for exchanging credentials.
_STS_GRANT_TYPE = "urn:ietf:params:oauth:grant-type:token-exchange"
# The token exchange requested_token_type. This is always an access_token.
_STS_REQUESTED_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
# Cloud resource manager URL used to retrieve project information.
_CLOUD_RESOURCE_MANAGER = "https://cloudresourcemanager.googleapis.com/v1/projects/"
@six.add_metaclass(abc.ABCMeta)
class Credentials(credentials.Scoped, credentials.CredentialsWithQuotaProject):
"""Base class for all external account credentials.
This is used to instantiate Credentials for exchanging external account
credentials for Google access token and authorizing requests to Google APIs.
The base class implements the common logic for exchanging external account
credentials for Google access tokens.
"""
def __init__(
self,
audience,
subject_token_type,
token_url,
credential_source,
service_account_impersonation_url=None,
client_id=None,
client_secret=None,
quota_project_id=None,
scopes=None,
default_scopes=None,
):
"""Instantiates an external account credentials object.
Args:
audience (str): The STS audience field.
subject_token_type (str): The subject token type.
token_url (str): The STS endpoint URL.
credential_source (Mapping): The credential source dictionary.
service_account_impersonation_url (Optional[str]): The optional service account
impersonation generateAccessToken URL.
client_id (Optional[str]): The optional client ID.
client_secret (Optional[str]): The optional client secret.
quota_project_id (Optional[str]): The optional quota project ID.
scopes (Optional[Sequence[str]]): Optional scopes to request during the
authorization grant.
default_scopes (Optional[Sequence[str]]): Default scopes passed by a
Google client library. Use 'scopes' for user-defined scopes.
Raises:
google.auth.exceptions.RefreshError: If the generateAccessToken
endpoint returned an error.
"""
super(Credentials, self).__init__()
self._audience = audience
self._subject_token_type = subject_token_type
self._token_url = token_url
self._credential_source = credential_source
self._service_account_impersonation_url = service_account_impersonation_url
self._client_id = client_id
self._client_secret = client_secret
self._quota_project_id = quota_project_id
self._scopes = scopes
self._default_scopes = default_scopes
if self._client_id:
self._client_auth = utils.ClientAuthentication(
utils.ClientAuthType.basic, self._client_id, self._client_secret
)
else:
self._client_auth = None
self._sts_client = sts.Client(self._token_url, self._client_auth)
if self._service_account_impersonation_url:
self._impersonated_credentials = self._initialize_impersonated_credentials()
else:
self._impersonated_credentials = None
self._project_id = None
@property
def info(self):
"""Generates the dictionary representation of the current credentials.
Returns:
Mapping: The dictionary representation of the credentials. This is the
reverse of "from_info" defined on the subclasses of this class. It is
useful for serializing the current credentials so it can deserialized
later.
"""
config_info = {
"type": _EXTERNAL_ACCOUNT_JSON_TYPE,
"audience": self._audience,
"subject_token_type": self._subject_token_type,
"token_url": self._token_url,
"service_account_impersonation_url": self._service_account_impersonation_url,
"credential_source": copy.deepcopy(self._credential_source),
"quota_project_id": self._quota_project_id,
"client_id": self._client_id,
"client_secret": self._client_secret,
}
return {key: value for key, value in config_info.items() if value is not None}
@property
def service_account_email(self):
"""Returns the service account email if service account impersonation is used.
Returns:
Optional[str]: The service account email if impersonation is used. Otherwise
None is returned.
"""
if self._service_account_impersonation_url:
# Parse email from URL. The formal looks as follows:
# https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/name@project-id.iam.gserviceaccount.com:generateAccessToken
url = self._service_account_impersonation_url
start_index = url.rfind("/")
end_index = url.find(":generateAccessToken")
if start_index != -1 and end_index != -1 and start_index < end_index:
start_index = start_index + 1
return url[start_index:end_index]
return None
@property
def is_user(self):
"""Returns whether the credentials represent a user (True) or workload (False).
Workloads behave similarly to service accounts. Currently workloads will use
service account impersonation but will eventually not require impersonation.
As a result, this property is more reliable than the service account email
property in determining if the credentials represent a user or workload.
Returns:
bool: True if the credentials represent a user. False if they represent a
workload.
"""
# If service account impersonation is used, the credentials will always represent a
# service account.
if self._service_account_impersonation_url:
return False
# Workforce pools representing users have the following audience format:
# //iam.googleapis.com/locations/$location/workforcePools/$poolId/providers/$providerId
p = re.compile(r"//iam\.googleapis\.com/locations/[^/]+/workforcePools/")
if p.match(self._audience):
return True
return False
@property
def requires_scopes(self):
"""Checks if the credentials requires scopes.
Returns:
bool: True if there are no scopes set otherwise False.
"""
return not self._scopes and not self._default_scopes
@property
def project_number(self):
"""Optional[str]: The project number corresponding to the workload identity pool."""
# STS audience pattern:
# //iam.googleapis.com/projects/$PROJECT_NUMBER/locations/...
components = self._audience.split("/")
try:
project_index = components.index("projects")
if project_index + 1 < len(components):
return components[project_index + 1] or None
except ValueError:
return None
@_helpers.copy_docstring(credentials.Scoped)
def with_scopes(self, scopes, default_scopes=None):
return self.__class__(
audience=self._audience,
subject_token_type=self._subject_token_type,
token_url=self._token_url,
credential_source=self._credential_source,
service_account_impersonation_url=self._service_account_impersonation_url,
client_id=self._client_id,
client_secret=self._client_secret,
quota_project_id=self._quota_project_id,
scopes=scopes,
default_scopes=default_scopes,
)
@abc.abstractmethod
def retrieve_subject_token(self, request):
"""Retrieves the subject token using the credential_source object.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
Returns:
str: The retrieved subject token.
"""
# pylint: disable=missing-raises-doc
# (pylint doesn't recognize that this is abstract)
raise NotImplementedError("retrieve_subject_token must be implemented")
def get_project_id(self, request):
"""Retrieves the project ID corresponding to the workload identity pool.
When not determinable, None is returned.
This is introduced to support the current pattern of using the Auth library:
credentials, project_id = google.auth.default()
The resource may not have permission (resourcemanager.projects.get) to
call this API or the required scopes may not be selected:
https://cloud.google.com/resource-manager/reference/rest/v1/projects/get#authorization-scopes
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
Returns:
Optional[str]: The project ID corresponding to the workload identity pool
if determinable.
"""
if self._project_id:
# If already retrieved, return the cached project ID value.
return self._project_id
scopes = self._scopes if self._scopes is not None else self._default_scopes
# Scopes are required in order to retrieve a valid access token.
if self.project_number and scopes:
headers = {}
url = _CLOUD_RESOURCE_MANAGER + self.project_number
self.before_request(request, "GET", url, headers)
response = request(url=url, method="GET", headers=headers)
response_body = (
response.data.decode("utf-8")
if hasattr(response.data, "decode")
else response.data
)
response_data = json.loads(response_body)
if response.status == 200:
# Cache result as this field is immutable.
self._project_id = response_data.get("projectId")
return self._project_id
return None
@_helpers.copy_docstring(credentials.Credentials)
def refresh(self, request):
scopes = self._scopes if self._scopes is not None else self._default_scopes
if self._impersonated_credentials:
self._impersonated_credentials.refresh(request)
self.token = self._impersonated_credentials.token
self.expiry = self._impersonated_credentials.expiry
else:
now = _helpers.utcnow()
response_data = self._sts_client.exchange_token(
request=request,
grant_type=_STS_GRANT_TYPE,
subject_token=self.retrieve_subject_token(request),
subject_token_type=self._subject_token_type,
audience=self._audience,
scopes=scopes,
requested_token_type=_STS_REQUESTED_TOKEN_TYPE,
)
self.token = response_data.get("access_token")
lifetime = datetime.timedelta(seconds=response_data.get("expires_in"))
self.expiry = now + lifetime
@_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
def with_quota_project(self, quota_project_id):
# Return copy of instance with the provided quota project ID.
return self.__class__(
audience=self._audience,
subject_token_type=self._subject_token_type,
token_url=self._token_url,
credential_source=self._credential_source,
service_account_impersonation_url=self._service_account_impersonation_url,
client_id=self._client_id,
client_secret=self._client_secret,
quota_project_id=quota_project_id,
scopes=self._scopes,
default_scopes=self._default_scopes,
)
def _initialize_impersonated_credentials(self):
"""Generates an impersonated credentials.
For more details, see `projects.serviceAccounts.generateAccessToken`_.
.. _projects.serviceAccounts.generateAccessToken: https://cloud.google.com/iam/docs/reference/credentials/rest/v1/projects.serviceAccounts/generateAccessToken
Returns:
impersonated_credentials.Credential: The impersonated credentials
object.
Raises:
google.auth.exceptions.RefreshError: If the generateAccessToken
endpoint returned an error.
"""
# Return copy of instance with no service account impersonation.
source_credentials = self.__class__(
audience=self._audience,
subject_token_type=self._subject_token_type,
token_url=self._token_url,
credential_source=self._credential_source,
service_account_impersonation_url=None,
client_id=self._client_id,
client_secret=self._client_secret,
quota_project_id=self._quota_project_id,
scopes=self._scopes,
default_scopes=self._default_scopes,
)
# Determine target_principal.
target_principal = self.service_account_email
if not target_principal:
raise exceptions.RefreshError(
"Unable to determine target principal from service account impersonation URL."
)
scopes = self._scopes if self._scopes is not None else self._default_scopes
# Initialize and return impersonated credentials.
return impersonated_credentials.Credentials(
source_credentials=source_credentials,
target_principal=target_principal,
target_scopes=scopes,
quota_project_id=self._quota_project_id,
iam_endpoint_override=self._service_account_impersonation_url,
)
|
tchellomello/home-assistant | refs/heads/dev | homeassistant/components/esphome/switch.py | 7 | """Support for ESPHome switches."""
import logging
from typing import Optional
from aioesphomeapi import SwitchInfo, SwitchState
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import EsphomeEntity, esphome_state_property, platform_async_setup_entry
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up ESPHome switches based on a config entry."""
await platform_async_setup_entry(
hass,
entry,
async_add_entities,
component_key="switch",
info_type=SwitchInfo,
entity_type=EsphomeSwitch,
state_type=SwitchState,
)
class EsphomeSwitch(EsphomeEntity, SwitchEntity):
"""A switch implementation for ESPHome."""
@property
def _static_info(self) -> SwitchInfo:
return super()._static_info
@property
def _state(self) -> Optional[SwitchState]:
return super()._state
@property
def icon(self) -> str:
"""Return the icon."""
return self._static_info.icon
@property
def assumed_state(self) -> bool:
"""Return true if we do optimistic updates."""
return self._static_info.assumed_state
# https://github.com/PyCQA/pylint/issues/3150 for @esphome_state_property
# pylint: disable=invalid-overridden-method
@esphome_state_property
def is_on(self) -> Optional[bool]:
"""Return true if the switch is on."""
return self._state.state
async def async_turn_on(self, **kwargs) -> None:
"""Turn the entity on."""
await self._client.switch_command(self._static_info.key, True)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the entity off."""
await self._client.switch_command(self._static_info.key, False)
|
vbannai/disk-qos-horizon | refs/heads/master | horizon/tests/api_tests/quantum_tests.py | 3 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from horizon import api
from horizon import test
class QuantumApiTests(test.APITestCase):
def test_network_list(self):
networks = {'networks': self.api_networks.list()}
subnets = {'subnets': self.api_subnets.list()}
quantumclient = self.stub_quantumclient()
quantumclient.list_networks().AndReturn(networks)
quantumclient.list_subnets().AndReturn(subnets)
self.mox.ReplayAll()
ret_val = api.quantum.network_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.quantum.Network)
def test_network_get(self):
network = {'network': self.api_networks.first()}
subnet = {'subnet': self.api_subnets.first()}
network_id = self.api_networks.first()['id']
subnet_id = self.api_networks.first()['subnets'][0]
quantumclient = self.stub_quantumclient()
quantumclient.show_network(network_id).AndReturn(network)
quantumclient.show_subnet(subnet_id).AndReturn(subnet)
self.mox.ReplayAll()
ret_val = api.quantum.network_get(self.request, network_id)
self.assertIsInstance(ret_val, api.quantum.Network)
def test_network_create(self):
network = {'network': self.api_networks.first()}
quantumclient = self.stub_quantumclient()
form_data = {'network': {'name': 'net1'}}
quantumclient.create_network(body=form_data).AndReturn(network)
self.mox.ReplayAll()
ret_val = api.quantum.network_create(self.request, name='net1')
self.assertIsInstance(ret_val, api.quantum.Network)
def test_network_modify(self):
network = {'network': self.api_networks.first()}
network_id = self.api_networks.first()['id']
quantumclient = self.stub_quantumclient()
form_data = {'network': {'name': 'net1'}}
quantumclient.update_network(network_id, body=form_data)\
.AndReturn(network)
self.mox.ReplayAll()
ret_val = api.quantum.network_modify(self.request, network_id,
name='net1')
self.assertIsInstance(ret_val, api.quantum.Network)
def test_network_delete(self):
network_id = self.api_networks.first()['id']
quantumclient = self.stub_quantumclient()
quantumclient.delete_network(network_id)
self.mox.ReplayAll()
api.quantum.network_delete(self.request, network_id)
def test_subnet_list(self):
subnets = {'subnets': self.api_subnets.list()}
quantumclient = self.stub_quantumclient()
quantumclient.list_subnets().AndReturn(subnets)
self.mox.ReplayAll()
ret_val = api.quantum.subnet_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.quantum.Subnet)
def test_subnet_get(self):
subnet = {'subnet': self.api_subnets.first()}
subnet_id = self.api_subnets.first()['id']
quantumclient = self.stub_quantumclient()
quantumclient.show_subnet(subnet_id).AndReturn(subnet)
self.mox.ReplayAll()
ret_val = api.quantum.subnet_get(self.request, subnet_id)
self.assertIsInstance(ret_val, api.quantum.Subnet)
def test_subnet_create(self):
subnet_data = self.api_subnets.first()
params = {'network_id': subnet_data['network_id'],
'tenant_id': subnet_data['tenant_id'],
'name': subnet_data['name'],
'cidr': subnet_data['cidr'],
'ip_version': subnet_data['ip_version'],
'gateway_ip': subnet_data['gateway_ip']}
quantumclient = self.stub_quantumclient()
quantumclient.create_subnet(body={'subnet': params})\
.AndReturn({'subnet': subnet_data})
self.mox.ReplayAll()
ret_val = api.quantum.subnet_create(self.request, **params)
self.assertIsInstance(ret_val, api.quantum.Subnet)
def test_subnet_modify(self):
subnet_data = self.api_subnets.first()
subnet_id = subnet_data['id']
params = {'name': subnet_data['name'],
'gateway_ip': subnet_data['gateway_ip']}
quantumclient = self.stub_quantumclient()
quantumclient.update_subnet(subnet_id, body={'subnet': params})\
.AndReturn({'subnet': subnet_data})
self.mox.ReplayAll()
ret_val = api.quantum.subnet_modify(self.request, subnet_id, **params)
self.assertIsInstance(ret_val, api.quantum.Subnet)
def test_subnet_delete(self):
subnet_id = self.api_subnets.first()['id']
quantumclient = self.stub_quantumclient()
quantumclient.delete_subnet(subnet_id)
self.mox.ReplayAll()
api.quantum.subnet_delete(self.request, subnet_id)
def test_port_list(self):
ports = {'ports': self.api_ports.list()}
quantumclient = self.stub_quantumclient()
quantumclient.list_ports().AndReturn(ports)
self.mox.ReplayAll()
ret_val = api.quantum.port_list(self.request)
for p in ret_val:
self.assertIsInstance(p, api.quantum.Port)
def test_port_get(self):
port = {'port': self.api_ports.first()}
port_id = self.api_ports.first()['id']
quantumclient = self.stub_quantumclient()
quantumclient.show_port(port_id).AndReturn(port)
self.mox.ReplayAll()
ret_val = api.quantum.port_get(self.request, port_id)
self.assertIsInstance(ret_val, api.quantum.Port)
def test_port_create(self):
port_data = self.api_ports.first()
params = {'network_id': port_data['network_id'],
'tenant_id': port_data['tenant_id'],
'name': port_data['name'],
'device_id': port_data['device_id']}
quantumclient = self.stub_quantumclient()
quantumclient.create_port(body={'port': params})\
.AndReturn({'port': port_data})
self.mox.ReplayAll()
ret_val = api.quantum.port_create(self.request, **params)
self.assertIsInstance(ret_val, api.quantum.Port)
self.assertEqual(ret_val.id, api.quantum.Port(port_data).id)
def test_port_modify(self):
port_data = self.api_ports.first()
port_id = port_data['id']
params = {'name': port_data['name'],
'device_id': port_data['device_id']}
quantumclient = self.stub_quantumclient()
quantumclient.update_port(port_id, body={'port': params})\
.AndReturn({'port': port_data})
self.mox.ReplayAll()
ret_val = api.quantum.port_modify(self.request, port_id, **params)
self.assertIsInstance(ret_val, api.quantum.Port)
self.assertEqual(ret_val.id, api.quantum.Port(port_data).id)
def test_port_delete(self):
port_id = self.api_ports.first()['id']
quantumclient = self.stub_quantumclient()
quantumclient.delete_port(port_id)
self.mox.ReplayAll()
api.quantum.port_delete(self.request, port_id)
|
matthew-brett/draft-statsmodels | refs/heads/master | scikits/statsmodels/rlm.py | 1 | """
Robust linear models with support for the M-estimators listed under
:ref:`norms <norms>`.
References
----------
PJ Huber. 'Robust Statistics' John Wiley and Sons, Inc., New York. 1981.
PJ Huber. 1973, 'The 1972 Wald Memorial Lectures: Robust Regression:
Asymptotics, Conjectures, and Monte Carlo.' The Annals of Statistics,
1.5, 799-821.
R Venables, B Ripley. 'Modern Applied Statistics in S' Springer, New York,
2002.
"""
import numpy as np
import tools
from regression import WLS, GLS
from robust import norms, scale
from model import LikelihoodModel, LikelihoodModelResults
from decorators import *
__all__ = ['RLM']
class RLM(LikelihoodModel):
"""
Robust Linear Models
Estimate a robust linear model via iteratively reweighted least squares
given a robust criterion estimator.
Parameters
----------
endog : array-like
1d endogenous response variable
exog : array-like
n x p exogenous design matrix
M : scikits.statsmodels.robust.norms.RobustNorm, optional
The robust criterion function for downweighting outliers.
The current options are LeastSquares, HuberT, RamsayE, AndrewWave,
TrimmedMean, Hampel, and TukeyBiweight. The default is HuberT().
See scikits.statsmodels.robust.norms for more information.
Methods
-------
deviance
Returns the (unnormalized) log-likelihood of the model
fit
Fits the model. Returns an RLMResults class.
information
Not yet implemented.
newton
Not yet implemented.
results
A property that returns an RLMResults class. Equivalent to calling
fit with the default arguments.
score
Not yet implemented.
Notes
-----
**Attributes**
df_model : float
The degrees of freedom of the model. The number of regressors p less
one for the intercept. Note that the reported model degrees
of freedom does not count the intercept as a regressor, though
the model is assumed to have an intercept.
df_resid : float
The residual degrees of freedom. The number of observations n
less the number of regressors p. Note that here p does include
the intercept as using a degree of freedom.
endog : array
See above. Note that endog is a reference to the data so that if
data is already an array and it is changed, then `endog` changes
as well.
exog : array
See above. Note that endog is a reference to the data so that if
data is already an array and it is changed, then `endog` changes
as well.
history : dict
Contains information about the iterations. Its keys are `fittedvalues`,
`deviance`, and `params`.
M : scikits.statsmodels.robust.norms.RobustNorm
See above. Robust estimator instance instantiated.
nobs : float
The number of observations n
pinv_wexog : array
The pseudoinverse of the design / exogenous data array. Note that
RLM has no whiten method, so this is just the pseudo inverse of the
design.
normalized_cov_params : array
The p x p normalized covariance of the design / exogenous data.
This is approximately equal to (X.T X)^(-1)
Examples
---------
>>> import scikits.statsmodels as sm
>>> data = sm.datasets.stackloss.load()
>>> data.exog = sm.add_constant(data.exog)
>>> rlm_model = sm.RLM(data.endog, data.exog, \
M=sm.robust.norms.HuberT())
>>> rlm_results = rlm_model.fit()
>>> rlm_results.params
array([ 0.82938433, 0.92606597, -0.12784672, -41.02649835])
>>> rlm_results.bse
array([ 0.11100521, 0.30293016, 0.12864961, 9.79189854])
>>> rlm_results_HC2 = rlm_model.fit(cov="H2")
>>> rlm_results_HC2.params
array([ 0.82938433, 0.92606597, -0.12784672, -41.02649835])
>>> rlm_results_HC2.bse
array([ 0.11945975, 0.32235497, 0.11796313, 9.08950419])
>>>
>>> rlm_hamp_hub = sm.RLM(data.endog, data.exog, \
M=sm.robust.norms.Hampel()).fit( \
sm.robust.scale.HuberScale())
>>> rlm_hamp_hub.params
array([ 0.73175452, 1.25082038, -0.14794399, -40.27122257])
"""
def __init__(self, endog, exog, M=norms.HuberT()):
self.M = M
self.endog = np.asarray(endog)
self.exog = np.asarray(exog)
self._initialize()
def _initialize(self):
"""
Initializes the model for the IRLS fit.
Resets the history and number of iterations.
"""
self.history = {'deviance' : [np.inf], 'params' : [np.inf],
'weights' : [np.inf], 'sresid' : [np.inf], 'scale' : []}
self.iteration = 0
self.pinv_wexog = np.linalg.pinv(self.exog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
self.df_resid = np.float(self.exog.shape[0] - tools.rank(self.exog))
self.df_model = np.float(tools.rank(self.exog)-1)
self.nobs = float(self.endog.shape[0])
def score(self, params):
raise NotImplementedError
def information(self, params):
raise NotImplementedError
def loglike(self, params):
raise NotImplementedError
def deviance(self, tmp_results):
"""
Returns the (unnormalized) log-likelihood from the M estimator.
"""
return self.M((self.endog - tmp_results.fittedvalues)/\
tmp_results.scale).sum()
def _update_history(self, tmp_results):
self.history['deviance'].append(self.deviance(tmp_results))
self.history['params'].append(tmp_results.params)
self.history['scale'].append(tmp_results.scale)
self.history['sresid'].append(tmp_results.resid/tmp_results.scale)
self.history['weights'].append(tmp_results.model.weights)
def _estimate_scale(self, resid):
"""
Estimates the scale based on the option provided to the fit method.
"""
if isinstance(self.scale_est, str):
if self.scale_est.lower() == 'mad':
return scale.mad(resid)
if self.scale_est.lower() == 'stand_mad':
return scale.stand_mad(resid)
elif isinstance(self.scale_est, scale.HuberScale):
return scale.hubers_scale(self.df_resid, self.nobs, resid)
else:
return scale.scale_est(self, resid)**2
def fit(self, maxiter=50, tol=1e-8, scale_est='mad', init=None, cov='H1',
update_scale=True, conv='dev'):
"""
Fits the model using iteratively reweighted least squares.
The IRLS routine runs until the specified objective converges to `tol`
or `maxiter` has been reached.
Parameters
----------
conv : string
Indicates the convergence criteria.
Available options are "coefs" (the coefficients), "weights" (the
weights in the iteration), "resids" (the standardized residuals),
and "dev" (the un-normalized log-likelihood for the M
estimator). The default is "dev".
cov : string, optional
'H1', 'H2', or 'H3'
Indicates how the covariance matrix is estimated. Default is 'H1'.
See rlm.RLMResults for more information.
init : string
Specifies method for the initial estimates of the parameters.
Default is None, which means that the least squares estimate
is used. Currently it is the only available choice.
maxiter : int
The maximum number of iterations to try. Default is 50.
scale_est : string or HuberScale()
'mad', 'stand_mad', or HuberScale()
Indicates the estimate to use for scaling the weights in the IRLS.
The default is 'mad' (median absolute deviation. Other options are
use 'stand_mad' for the median absolute deviation standardized
around the median and 'HuberScale' for Huber's proposal 2.
Huber's proposal 2 has optional keyword arguments d, tol, and
maxiter for specifying the tuning constant, the convergence
tolerance, and the maximum number of iterations.
See models.robust.scale for more information.
tol : float
The convergence tolerance of the estimate. Default is 1e-8.
update_scale : Bool
If `update_scale` is False then the scale estimate for the
weights is held constant over the iteration. Otherwise, it
is updated for each fit in the iteration. Default is True.
Returns
-------
results : object
scikits.statsmodels.rlm.RLMresults
"""
if not cov.upper() in ["H1","H2","H3"]:
raise AttributeError, "Covariance matrix %s not understood" % cov
else:
self.cov = cov.upper()
conv = conv.lower()
if not conv in ["weights","coefs","dev","resid"]:
raise AttributeError, "Convergence argument %s not understood" \
% conv
self.scale_est = scale_est
wls_results = WLS(self.endog, self.exog).fit()
if not init:
self.scale = self._estimate_scale(wls_results.resid)
self._update_history(wls_results)
self.iteration = 1
if conv == 'coefs':
criterion = self.history['params']
elif conv == 'dev':
criterion = self.history['deviance']
elif conv == 'resid':
criterion = self.history['sresid']
elif conv == 'weights':
criterion = self.history['weights']
while (np.all(np.fabs(criterion[self.iteration]-\
criterion[self.iteration-1]) > tol) and \
self.iteration < maxiter):
# self.weights = self.M.weights((self.endog - \
# wls_results.fittedvalues)/self.scale)
self.weights = self.M.weights(wls_results.resid/self.scale)
wls_results = WLS(self.endog, self.exog,
weights=self.weights).fit()
if update_scale is True:
self.scale = self._estimate_scale(wls_results.resid)
self._update_history(wls_results)
self.iteration += 1
results = RLMResults(self, wls_results.params,
self.normalized_cov_params, self.scale)
return results
class RLMResults(LikelihoodModelResults):
"""
Class to contain RLM results
**Attributes**
bcov_scaled : array
p x p scaled covariance matrix specified in the model fit method.
The default is H1. H1 is defined as
k**2 * (1/df_resid*sum(M.psi(sresid)**2)*scale**2)/
((1/nobs*sum(M.psi_deriv(sresid)))**2) * (X.T X)^(-1)
where k = 1 + (df_model +1)/nobs * var_psiprime/m**2
where m = mean(M.psi_deriv(sresid)) and
var_psiprime = var(M.psi_deriv(sresid))
H2 is defined as
k * (1/df_resid) * sum(M.psi(sresid)**2) *scale**2/
((1/nobs)*sum(M.psi_deriv(sresid)))*W_inv
H3 is defined as
1/k * (1/df_resid * sum(M.psi(sresid)**2)*scale**2 *
(W_inv X.T X W_inv))
where k is defined as above and
W_inv = (M.psi_deriv(sresid) exog.T exog)^(-1)
See the technical documentation for cleaner formulae.
bcov_unscaled : array
The usual p x p covariance matrix with scale set equal to 1. It
is then just equivalent to normalized_cov_params.
bse : array
An array of the standard errors of the parameters. The standard
errors are taken from the robust covariance matrix specified in the
argument to fit.
chisq : array
An array of the chi-squared values of the paramter estimates.
df_model
See RLM.df_model
df_resid
See RLM.df_resid
fittedvalues : array
The linear predicted values. dot(exog, params)
model : scikits.statsmodels.rlm.RLM
A reference to the model instance
nobs : float
The number of observations n
normalized_cov_params : array
See RLM.normalized_cov_params
params : array
The coefficients of the fitted model
pinv_wexog : array
See RLM.pinv_wexog
resid : array
The residuals of the fitted model. endog - fittedvalues
scale : float
The type of scale is determined in the arguments to the fit method in
RLM. The reported scale is taken from the residuals of the weighted
least squares in the last IRLS iteration if update_scale is True. If
update_scale is False, then it is the scale given by the first OLS
fit before the IRLS iterations.
sresid : array
The scaled residuals.
weights : array
The reported weights are determined by passing the scaled residuals
from the last weighted least squares fit in the IRLS algortihm.
See also
--------
scikits.statsmodels.model.LikelihoodModelResults
"""
def __init__(self, model, params, normalized_cov_params, scale):
super(RLMResults, self).__init__(model, params,
normalized_cov_params, scale)
self.model = model
self.df_model = model.df_model
self.df_resid = model.df_resid
self.nobs = model.nobs
self._cache = resettable_cache()
#TODO: "pvals" should come from chisq on bse?
@cache_readonly
def fittedvalues(self):
return np.dot(self.model.exog, self.params)
@cache_readonly
def resid(self):
return self.model.endog - self.fittedvalues # before bcov
@cache_readonly
def sresid(self):
return self.resid/self.scale
@cache_readonly
def bcov_unscaled(self):
return self.cov_params(scale=1.)
@cache_readonly
def weights(self):
return self.model.weights
@cache_readonly
def bcov_scaled(self):
model = self.model
m = np.mean(model.M.psi_deriv(self.sresid))
var_psiprime = np.var(model.M.psi_deriv(self.sresid))
k = 1 + (self.df_model+1)/self.nobs * var_psiprime/m**2
if model.cov == "H1":
return k**2 * (1/self.df_resid*\
np.sum(model.M.psi(self.sresid)**2)*self.scale**2)\
/((1/self.nobs*np.sum(model.M.psi_deriv(self.sresid)))**2)\
*model.normalized_cov_params
else:
W = np.dot(model.M.psi_deriv(self.sresid)*model.exog.T,
model.exog)
W_inv = np.linalg.inv(W)
# [W_jk]^-1 = [SUM(psi_deriv(Sr_i)*x_ij*x_jk)]^-1
# where Sr are the standardized residuals
if model.cov == "H2":
# These are correct, based on Huber (1973) 8.13
return k*(1/self.df_resid)*np.sum(\
model.M.psi(self.sresid)**2)*self.scale**2\
/((1/self.nobs)*np.sum(\
model.M.psi_deriv(self.sresid)))*W_inv
elif model.cov == "H3":
return k**-1*1/self.df_resid*np.sum(\
model.M.psi(self.sresid)**2)*self.scale**2\
*np.dot(np.dot(W_inv, np.dot(model.exog.T,model.exog)),\
W_inv)
#TODO: make the t-values (or whatever) based on these
@cache_readonly
def bse(self):
return np.sqrt(np.diag(self.bcov_scaled))
@cache_readonly
def chisq(self):
return (self.params/self.bse)**2
if __name__=="__main__":
#NOTE: This is to be removed
#Delivery Time Data is taken from Montgomery and Peck
import models
#delivery time(minutes)
endog = np.array([16.68, 11.50, 12.03, 14.88, 13.75, 18.11, 8.00, 17.83,
79.24, 21.50, 40.33, 21.00, 13.50, 19.75, 24.00, 29.00, 15.35, 19.00,
9.50, 35.10, 17.90, 52.32, 18.75, 19.83, 10.75])
#number of cases, distance (Feet)
exog = np.array([[7, 3, 3, 4, 6, 7, 2, 7, 30, 5, 16, 10, 4, 6, 9, 10, 6,
7, 3, 17, 10, 26, 9, 8, 4], [560, 220, 340, 80, 150, 330, 110, 210, 1460,
605, 688, 215, 255, 462, 448, 776, 200, 132, 36, 770, 140, 810, 450, 635,
150]])
exog = exog.T
exog = models.tools.add_constant(exog)
# model_ols = models.regression.OLS(endog, exog)
# results_ols = model_ols.fit()
# model_huber = RLM(endog, exog, M=norms.HuberT(t=2.))
# results_huber = model_huber.fit(scale_est="stand_mad", update_scale=False)
# model_ramsaysE = RLM(endog, exog, M=norms.RamsayE())
# results_ramsaysE = model_ramsaysE.fit(update_scale=False)
# model_andrewWave = RLM(endog, exog, M=norms.AndrewWave())
# results_andrewWave = model_andrewWave.fit(update_scale=False)
# model_hampel = RLM(endog, exog, M=norms.Hampel(a=1.7,b=3.4,c=8.5)) # convergence problems with scale changed, not with 2,4,8 though?
# results_hampel = model_hampel.fit(update_scale=False)
#######################
### Stack Loss Data ###
#######################
from models.datasets.stackloss import load
data = load()
data.exog = models.tools.add_constant(data.exog)
#############
### Huber ###
#############
# m1_Huber = RLM(data.endog, data.exog, M=norms.HuberT())
# results_Huber1 = m1_Huber.fit()
# m2_Huber = RLM(data.endog, data.exog, M=norms.HuberT())
# results_Huber2 = m2_Huber.fit(cov="H2")
# m3_Huber = RLM(data.endog, data.exog, M=norms.HuberT())
# results_Huber3 = m3_Huber.fit(cov="H3")
##############
### Hampel ###
##############
# m1_Hampel = RLM(data.endog, data.exog, M=norms.Hampel())
# results_Hampel1 = m1_Hampel.fit()
# m2_Hampel = RLM(data.endog, data.exog, M=norms.Hampel())
# results_Hampel2 = m2_Hampel.fit(cov="H2")
# m3_Hampel = RLM(data.endog, data.exog, M=norms.Hampel())
# results_Hampel3 = m3_Hampel.fit(cov="H3")
################
### Bisquare ###
################
# m1_Bisquare = RLM(data.endog, data.exog, M=norms.TukeyBiweight())
# results_Bisquare1 = m1_Bisquare.fit()
# m2_Bisquare = RLM(data.endog, data.exog, M=norms.TukeyBiweight())
# results_Bisquare2 = m2_Bisquare.fit(cov="H2")
# m3_Bisquare = RLM(data.endog, data.exog, M=norms.TukeyBiweight())
# results_Bisquare3 = m3_Bisquare.fit(cov="H3")
##############################################
# Huber's Proposal 2 scaling #
##############################################
################
### Huber'sT ###
################
m1_Huber_H = RLM(data.endog, data.exog, M=norms.HuberT())
results_Huber1_H = m1_Huber_H.fit(scale_est=scale.HuberScale())
# m2_Huber_H
# m3_Huber_H
# m4 = RLM(data.endog, data.exog, M=norms.HuberT())
# results4 = m1.fit(scale_est="Huber")
# m5 = RLM(data.endog, data.exog, M=norms.Hampel())
# results5 = m2.fit(scale_est="Huber")
# m6 = RLM(data.endog, data.exog, M=norms.TukeyBiweight())
# results6 = m3.fit(scale_est="Huber")
# print """Least squares fit
#%s
#Huber Params, t = 2.
#%s
#Ramsay's E Params
#%s
#Andrew's Wave Params
#%s
#Hampel's 17A Function
#%s
#""" % (results_ols.params, results_huber.params, results_ramsaysE.params,
# results_andrewWave.params, results_hampel.params)
|
belgrades/sabermetrics | refs/heads/master | docs/source/conf.py | 1 | # -*- coding: utf-8 -*-
#
# Sabermetrics documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 8 17:52:28 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sabermetrics'
copyright = u'2016, Fernando Crema'
author = u'Fernando Crema'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Sabermetrics v0.1.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Sabermetricsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Sabermetrics.tex', u'Sabermetrics Documentation',
u'Fernando Crema', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sabermetrics', u'Sabermetrics Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Sabermetrics', u'Sabermetrics Documentation',
author, 'Sabermetrics', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
|
debguy0x/PyBitmessage | refs/heads/master | src/pyelliptic/cipher.py | 24 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Yann GUIBET <yannguibet@gmail.com>
# See LICENSE for details.
from pyelliptic.openssl import OpenSSL
class Cipher:
"""
Symmetric encryption
import pyelliptic
iv = pyelliptic.Cipher.gen_IV('aes-256-cfb')
ctx = pyelliptic.Cipher("secretkey", iv, 1, ciphername='aes-256-cfb')
ciphertext = ctx.update('test1')
ciphertext += ctx.update('test2')
ciphertext += ctx.final()
ctx2 = pyelliptic.Cipher("secretkey", iv, 0, ciphername='aes-256-cfb')
print ctx2.ciphering(ciphertext)
"""
def __init__(self, key, iv, do, ciphername='aes-256-cbc'):
"""
do == 1 => Encrypt; do == 0 => Decrypt
"""
self.cipher = OpenSSL.get_cipher(ciphername)
self.ctx = OpenSSL.EVP_CIPHER_CTX_new()
if do == 1 or do == 0:
k = OpenSSL.malloc(key, len(key))
IV = OpenSSL.malloc(iv, len(iv))
OpenSSL.EVP_CipherInit_ex(
self.ctx, self.cipher.get_pointer(), 0, k, IV, do)
else:
raise Exception("RTFM ...")
@staticmethod
def get_all_cipher():
"""
static method, returns all ciphers available
"""
return OpenSSL.cipher_algo.keys()
@staticmethod
def get_blocksize(ciphername):
cipher = OpenSSL.get_cipher(ciphername)
return cipher.get_blocksize()
@staticmethod
def gen_IV(ciphername):
cipher = OpenSSL.get_cipher(ciphername)
return OpenSSL.rand(cipher.get_blocksize())
def update(self, input):
i = OpenSSL.c_int(0)
buffer = OpenSSL.malloc(b"", len(input) + self.cipher.get_blocksize())
inp = OpenSSL.malloc(input, len(input))
if OpenSSL.EVP_CipherUpdate(self.ctx, OpenSSL.byref(buffer),
OpenSSL.byref(i), inp, len(input)) == 0:
raise Exception("[OpenSSL] EVP_CipherUpdate FAIL ...")
return buffer.raw[0:i.value]
def final(self):
i = OpenSSL.c_int(0)
buffer = OpenSSL.malloc(b"", self.cipher.get_blocksize())
if (OpenSSL.EVP_CipherFinal_ex(self.ctx, OpenSSL.byref(buffer),
OpenSSL.byref(i))) == 0:
raise Exception("[OpenSSL] EVP_CipherFinal_ex FAIL ...")
return buffer.raw[0:i.value]
def ciphering(self, input):
"""
Do update and final in one method
"""
buff = self.update(input)
return buff + self.final()
def __del__(self):
OpenSSL.EVP_CIPHER_CTX_cleanup(self.ctx)
OpenSSL.EVP_CIPHER_CTX_free(self.ctx)
|
Carles-Figuerola/marathon-python | refs/heads/master | itests/steps/marathon_steps.py | 3 | import sys
import time
import marathon
from behave import given, when, then
import mock
from itest_utils import get_marathon_connection_string
sys.path.append('../')
@given('a working marathon instance')
def working_marathon(context):
"""Adds a working marathon client as context.client for the purposes of
interacting with it in the test."""
if not hasattr(context, 'client'):
marathon_connection_string = "http://%s" % \
get_marathon_connection_string()
context.client = marathon.MarathonClient(marathon_connection_string)
@when(u'we create a trivial new app')
def create_trivial_new_app(context):
context.client.create_app('test-trivial-app', marathon.MarathonApp(cmd='sleep 100', mem=16, cpus=1))
@when(u'we create a complex new app')
def create_complex_new_app_with_unicode(context):
app_config = {
'container': {
'type': 'DOCKER',
'docker': {
'portMappings': [{'protocol': 'tcp', 'containerPort': 8888, 'hostPort': 0}],
'image': u'localhost/fake_docker_url',
'network': 'BRIDGE',
},
'volumes': [{'hostPath': u'/etc/stuff', 'containerPath': u'/etc/stuff', 'mode': 'RO'}],
},
'instances': 1,
'mem': 30,
'args': [],
'backoff_factor': 2,
'cpus': 0.25,
'uris': ['file:///root/.dockercfg'],
'backoff_seconds': 1,
'constraints': None,
'cmd': u'/bin/true',
'health_checks': [
{
'protocol': 'HTTP',
'path': '/health',
'gracePeriodSeconds': 3,
'intervalSeconds': 10,
'portIndex': 0,
'timeoutSeconds': 10,
'maxConsecutiveFailures': 3
},
],
}
context.client.create_app('test-complex-app', marathon.MarathonApp(**app_config))
@then(u'we should see the {which} app running via the marathon api')
def see_complext_app_running(context, which):
print(context.client.list_apps())
assert context.client.get_app('test-%s-app' % which)
|
GunoH/intellij-community | refs/heads/master | python/testData/postfix/isNone/nonApplicable_after.py | 39 | a = 1.ifn <caret> |
autonoom/opencv-autonoom | refs/heads/master | opencv-autonoom/testROI.py | 1 | import cv2
import numpy as np
img = cv2.imread('watch.jpg', cv2.IMREAD_GRAYSCALE)
height, width = img.shape
########################################
#(0.0) (300.300)#
# #
# #
# #
# #
#(0.300) (300.300)#
########################################
# [y1:y2, x1, x2]
img = img[height-100:height, width-width:width]
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
reamdc1/gbeer_standalone | refs/heads/master | gbeer_standalone.py | 2 | #!/usr/bin/python
import time
import os
import sys
import argparse
import shutil
from ast import parse
from config import Config
import Configuration_Variables as conf_var
from turtle import config_dict
# Copyright(C) 2015 David Ream
# Released under GPL version 3 licence. http://www.gnu.org/licenses/lgpl.html
# Do not remove this comment
#TODO:
# 1) change program names in the project to reflect gene blocks as our model
# 2) add back the CD-hit functionality in "make_operon_query". the coding is there, i just need to benchmark it against current results.
# 3) rename this script, to whatever makes some sense... i actually like it how it is.
#########################################################################################################################################
# I am putting some globals here, they are command line arguments for some of the scripts that we are using. They are not #
# important enough, at least at this time, to justify making them command line arguments for them. This can be revised #
# later, or changed by someone who cares too much about these trivial things. after we get everything running to our satisfaction. #
# Most likely all/almost all will be removed because it may tempt someone to ruin what already seems to be working well. #
#########################################################################################################################################
#########################################################################################################
# regulondb_dl_parse.py: #
# the only returned file we are interested in is the regulon_default_gene_block_file, defined below. #
# it will be stored in the "regulonDB/" folder in the project outfolder that is supplied by the user. #
#########################################################################################################
# NOTE: If a gene block file is supplied, this stage will be skipped. I currently do not perform a validity check on this input.
# These options will not be alterable in the command line of this script, but have values that may be useful to control.
#regulon_url = 'http://regulondb.ccg.unam.mx/menu/download/datasets/files/OperonSet.txt'
#regulon_outfolder = 'regulonDB/'
#regulon_default_gene_block_file = regulon_outfolder + 'gene_block_names_and_genes.txt'
# TODO: add this option as a command line arg for the gbeer standalone script
#regulon_experimental_only = True
# What follows is a list of options that are controlled by user inputs to this script
# --infolder
# --outfolder
# --filter
# --num_proc
# --min_genes
# The download flag will not be set. If regulondb_dl_parse.py is invoked, it will download the regulon db file.
#############################################################################################################
# create_newick_tree.py #
# This program has three outputs: #
# a newick format phylogenetic tree - "out_tree.nwk" #
# a list of accession to common names in the top down order of the created tree - "accession_to_common.csv" #
# a reordered filter file list for use later in the analysis - "phylo_order_new.txt" #
# these files will be stored in the tree/ directory of the project folder #
#############################################################################################################
# TODO:
# 1) add command line arg to allow a user to supply their own newick tree file.
# 1.5) if this option is included, i will need to call this script first. Additionally, if both a filter and a tree file is provided,
# one will have to be preferred. I would choose the one generated by the tree file. Tree labels would need to conform to some specification.
# Tree creation has to have a better method available... i need to search papers.
# 2) add command line arg to allow a user to supply their own marker gene.
# 2.5) When we add this option we need to make it possible to determine protein/RNA product, and then handle tree creation accordingly.
#tree_marker_gene = "rpob"
# What follows is a list of options that are controlled by user inputs to this script
# --genbank_directory
# --filter
# This option will not be alterable in the command line of this script, but may be useful to control.
#tree_outfolder = "tree/"
#############################################################################################################
# format_db.py: #
# This script creates a single output, a directory that contains all of the BLAST-searchable databases for #
# use by blast_script.py in a later stage of this script. Each database is named for the accession it was #
# derived from. #
#############################################################################################################
# What follows is a list of options that are controlled by user inputs.
# --genbank_directory
# --outfolder
# --filter
# --num_proc
# TODO: nothing, other options are unlikely necessary for this stage. altering the DB type could be useful if we begin investigating RNA genes
# though currently this is not supported.
# These options will not be alterable in the command line of this script, but may be useful to control.
#BLAST_database_folder = 'db/'
#BLAST_format_protein = 'True'
#############################################################################################################
# make_operon_query.py: #
# This script produces a single output file that will serve as the BLAST query for the remainder of the #
# project, stored in the project file as 'operon_query.fa'. Curently only one reference is used, #
# but will expand to include more organisms automagically. #
#############################################################################################################
# What follows is a list of options that are controlled by user inputs.
#--num_proc
# Controlled by the output from a pervious step
# --infolder
# --gene_block_file
# not controlled by user inputs, but will be passed as a value to make_operon_query.py as a command line param for this script
#gene_block_query_outfile = 'gene_block_query.fa' # --outfile
# will possibly deprecate the next option in the future, in leu of an alternative approach
#refrence_organism = 'NC_000913' # --reference
#refrence_organism = 'NC_000964'
#############################################################################################################
# blast_script.py: #
# This script produces a tabular BLAST output per accession for the gene block query. #
#############################################################################################################
# What follows is a list of options that are controlled by user inputs.
# --filter
# --num_proc
# --query
# --eval
# Controlled by the output from a previous step
# --database_folder
# not controlled by user inputs, but will be passed as a value to blast_script.py so the project folder is used.
#BLAST_outfolder = 'blast_result/' # will be passed to --outfolder
#############################################################################################################
# blast_script.py: #
# This script produces a tabular BLAST output per accession for the gene block query. #
#############################################################################################################
# What follows is a list of options that are controlled by user inputs.
# --num_proc
# Controlled by the output from a pervious step
# --infolder (in this case the bast_script.py output directory)
# --gene_block_query (from make_operon_query.py)
# not controlled by user inputs, but will be passed as a value to blast_script.py so the project folder is used.
#BLAST_parse_outfolder = 'blast_parse/'# will be passed to --outfolder
# not controlled by user inputs, no input is appropriate. filter is for gene blocks that have been previously blasted, this option is to be run as a standalone option only.
# --filter
#############################################################################################################
# filter_operon_blast_results.py: #
# This program will be used to remove spurious results from a BLAST search organized by gene block. #
#############################################################################################################
# What follows is a list of options that are controlled by user inputs.
# --outfolder
# --num_proc
# --eval
# --max_gap
# not controlled by user inputs, but will be passed as a value to blast_script.py so the project folder is used.
#filter_BLAST_parse_outfolder = 'optimized_gene_blocks/'# will be passed to --outfolder
# not controlled by user inputs, no input is appropriate. filter is for gene blocks that have been previously blasted, this option is to be run as a standalone option only.
# --filter
#############################################################################################################
# make_event_distance_matrix.py: #
# This program will calculate the number of events that an organismal pair do not have in common. #
#############################################################################################################
# What follows is a list of options that are controlled by user inputs.
# --gene_block_file Though this could be generated by regulondb_dl_parse.py
# --num_proc
# --eval
# --max_gap
# Controlled by the output from a pervious step
# --infolder
# not controlled by user inputs, but will be passed as a value to blast_script.py so the project folder is used.
#event_distance_outfolder = 'gene_block_distance_matrices/'# will be passed to --outfolder
# not controlled by user inputs, no input is appropriate. filter is for gene blocks that have been previously blasted, this option is to be run as a standalone option only.
# --operon_filter
#############################################################################################################
# operon_visual.py: #
# This program run all of the visualization scrips that make the results of the project easier to interpret.#
#############################################################################################################
# I will add a more detailed description of what this does, and requires once i figure it out myself.
#visualization_outfolder = "visualization/"
# This exists to make the main function easier to read. It contains code to run the argument parser, and does nothing else.
def parser_code():
parser = argparse.ArgumentParser(description='The purpose of this script is to run the full software suite that we have developed to study gene blocks using as few inputs as possible. This will facilitate the ease of use as much as possible.')
# This option needs to carry a dual default. There should be 'NONE' to denote that there was no input. Further, there should be a secondary default where
# the regulondb_parse script will put the output file. The variable location of this file can be found in the 'regulondb_dl_parse.py command line args:' section.
parser.add_argument("-i", "--gene_block_file", dest="gene_block_file", metavar="FILE", default='NONE',
help="Input file for the gene block query step of the pipeline.")
#parser.add_argument("-i", "--gene_block_file", dest="gene_block_file", metavar="FILE", default='./regulonDB/operon_names_and_genes.txt',
# help="Input file for the gene block query step of the pipeline.")
#parser.add_argument("-I", "--infolder", dest="infolder", metavar="DIRECTORY", default='./genomes/',
# help="Folder containing all genbank files for use by the program.")
parser.add_argument("-G", "--genbank_directory", dest="genbank_directory", metavar="DIRECTORY", default='./genomes/',
help="Folder containing all genbank files for use by the program.")
parser.add_argument("-o", "--outfolder", dest="outfolder", metavar="DIRECTORY", default='./test_run/',
help="Folder where results will be stored.")
parser.add_argument("-f", "--filter", dest="filter", metavar="FILE", default='NONE',
help="File restricting which accession numbers this script will process. If no file is provided, filtering is not performed.")
parser.add_argument("-n", "--num_proc", dest="num_proc", metavar="INT", default = os.sysconf("SC_NPROCESSORS_CONF"), type=int,
help="Number of processors that you want this script to run on. The default is every CPU that the system has.")
parser.add_argument("-m", "--min_genes", dest="min_genes", metavar="INT", default = 5, type=int,
help="Minimum number of genes that a gene_block must contain before it can be considered for further analysis. The default is 5 because that is what we are currently using in the study.")
parser.add_argument("-g", "--max_gap", dest="max_gap", metavar="INT", default = 500, type=int,
help="Size in nucleotides of the maximum gap allowed between genes to be considered neighboring. The default is 500.")
# turns out, passing this as a float will cause the eval threshold to always be 0.000000, which obviously causes issues
parser.add_argument("-e", "--eval", dest="eval", default='1e-10', metavar="FLOAT", #type=float,
help="eval for the BLAST search.")
# Currently a placeholder, but i will be adding this functionality soon.
parser.add_argument("-t", "--tree", dest="tree_file", metavar="FILE", default='NONE',
help="Newick format tree file which will be used to bypass tree creation.")
parser.add_argument("-c", "--clean", dest="clean", default=False, action='store_true',
help="Flag to toggle the removal of intermediate files that are unnecessary for analysis, reducing the storage requirements for a run.")
parser.add_argument("-q", "--quiet", dest="quiet", action="store_true", default=False,
help="Suppresses most program text outputs.")
parser.add_argument("-d", "--db", dest="db_directory", metavar="DIRECTORY", default='NONE',
help="Directory where protein BLAST db are stored.")
#parser.add_argument("-r", "--refrence_organism", dest="refrence_organism", default='NC_000913',help="Genbank Id of the Reference Organism. Default is NC_000913 for E.Coli")
parser.add_argument("-C", "--configuration", dest="configuration_file",metavar="CONFIGURATION FILE", default='NONE',
help="Configuration File of the tool")
return parser.parse_args()
def check_options(parsed_args):
if parsed_args.gene_block_file == 'NONE' or os.path.exists(parsed_args.gene_block_file):
gene_block_file = parsed_args.gene_block_file
else:
print "The file %s does not exist." % parsed_args.gene_block_file
sys.exit()
# check the genbank folder
if os.path.isdir(parsed_args.genbank_directory):
genbank_directory = parsed_args.genbank_directory
else:
print "The folder %s does not exist." % parsed_args.genbank_directory
sys.exit()
# if the directory that the user specifies does not exist, then the program makes it for them.
# require that the folder name ends in a '/'
if parsed_args.outfolder[-1] != '/':
outfolder = parsed_args.outfolder + '/'
else:
outfolder = parsed_args.outfolder
if not os.path.isdir(outfolder):
os.makedirs(outfolder)
#print "outfolder", outfolder
if parsed_args.filter == 'NONE' or os.path.exists(parsed_args.filter):
filter_file = parsed_args.filter
else:
print "The file %s does not exist." % parsed_args.filter
sys.exit()
# section of code that deals determining the number of CPU cores that will be used by the program
try:
num_proc = int(parsed_args.num_proc)
if parsed_args.num_proc > os.sysconf("SC_NPROCESSORS_CONF"):
num_proc = os.sysconf("SC_NPROCESSORS_CONF")
elif parsed_args.num_proc < 1:
num_proc = 1
else:
num_proc = int(parsed_args.num_proc)
except:
print "The number of processors that you entered %s is not an integer, please enter a positive integer." % parsed_args.num_proc
sys.exit()
try:
min_no_genes = int(parsed_args.min_genes)
if parsed_args.min_genes <= 0:
min_genes = 1
else:
min_genes = parsed_args.min_genes
except:
print "The minimum number of genes that you entered %s is not an integer, please enter a positive integer." % parsed_args.min_genes
sys.exit()
# validate the input for the maximum allowed gap
try:
max_gap = int(parsed_args.max_gap)
if max_gap <= 0:
print "The gap that you entered %s is a negative number, please enter a positive integer." % parsed_args.max_gap
sys.exit()
else:
pass
except:
print "The gap that you entered %s is not an integer, please enter a positive integer." % parsed_args.max_gap
sys.exit()
# add these lines back in once i return the tree option to the program
if parsed_args.tree_file == 'NONE' or os.path.exists(parsed_args.tree_file):
tree_file = parsed_args.tree_file
else:
print "The file %s does not exist." % parsed_args.tree_file
sys.exit()
try:
float(parsed_args.eval)
e_val = parsed_args.eval
except:
print "The Blast Threshold that you entered %s is not a valid Float" % parsed_args.eval
sys.exit()
gene_block_file = parsed_args.gene_block_file
clean = parsed_args.clean
quiet = parsed_args.quiet
# check the genbank folder
if parsed_args.db_directory == 'NONE' or os.path.isdir(parsed_args.db_directory):
db_directory = parsed_args.db_directory
else:
print "The folder %s does not exist." % parsed_args.db_directory
sys.exit()
##New parameters Added
#refrence_organism = parsed_args.refrence_organism
if os.path.exists(parsed_args.configuration_file):
f = file(parsed_args.configuration_file)
cfg = Config(f)
configuration(cfg)
elif parsed_args.configuration_file == 'NONE':
pass
else:
print "The configuration file %s does not exist." % parsed_args.configuration_file
sys.exit()
#print conf_var.regulon_url
#return genbank_directory, gene_block_file, outfolder, filter_file, num_proc, min_genes, max_gap, e_val
return genbank_directory, gene_block_file, outfolder, filter_file, num_proc, min_genes, max_gap, e_val, tree_file, clean, quiet, db_directory
def configuration(configuration_object):
config_dict = {}
for m in configuration_object.configuration:
config_dict.update({m.variable_name:m.value})
#print m.variable_name,m.value
#print "\n"
conf_var.regulon_url = config_dict['regulon_url'] if 'regulon_url' in config_dict else ""
conf_var.regulon_outfolder = config_dict['regulon_outfolder'] if 'regulon_outfolder' in config_dict else ""
conf_var.regulon_default_gene_block_file = config_dict['regulon_default_gene_block_file'] if 'regulon_default_gene_block_file' in config_dict else ""
conf_var.tree_marker_gene = config_dict['tree_marker_gene'] if 'tree_marker_gene' in config_dict else ""
conf_var.tree_outfolder = config_dict['tree_outfolder'] if 'tree_outfolder' in config_dict else ""
conf_var.BLAST_database_folder = config_dict['BLAST_database_folder'] if 'BLAST_database_folder' in config_dict else ""
conf_var.BLAST_format_protein = config_dict['BLAST_format_protein'] if 'BLAST_format_protein' in config_dict else ""
conf_var.gene_block_query_outfile = config_dict['gene_block_query_outfile'] if 'gene_block_query_outfile' in config_dict else ""
conf_var.refrence_organism = config_dict['refrence_organism'] if 'refrence_organism' in config_dict else ""
conf_var.BLAST_outfolder = config_dict['BLAST_outfolder'] if 'BLAST_outfolder' in config_dict else ""
conf_var.BLAST_parse_outfolder = config_dict['BLAST_parse_outfolder'] if 'BLAST_parse_outfolder' in config_dict else ""
conf_var.filter_BLAST_parse_outfolder = config_dict['filter_BLAST_parse_outfolder'] if 'filter_BLAST_parse_outfolder' in config_dict else ""
conf_var.event_distance_outfolder = config_dict['event_distance_outfolder'] if 'event_distance_outfolder' in config_dict else ""
conf_var.visualization_outfolder = config_dict['visualization_outfolder'] if 'visualization_outfolder' in config_dict else ""
conf_var.newick_tree = config_dict['newick_tree_name'] if 'newick_tree_name' in config_dict else ""
conf_var.accession_to_common = config_dict['Accesion_Name_mapping_File'] if 'Accesion_Name_mapping_File' in config_dict else ""
conf_var.phylo_order_new = config_dict['Phylogenetic_Order_Species_File'] if 'Phylogenetic_Order_Species_File' in config_dict else ""
# This function will clean up the output of the program so that intermediate files or directories are removed.
# The major reason that this function exists is that a large number of debugging intermediate steps exist, and
# regular users of the program do not benefit from keeping this data, so i'm removing it.
def cleanup_function(results_to_move, results_to_remove, outfolder):
for item in results_to_move:
if os.path.isdir(item): #The item is a directory to move to the output folder
folder_name = item.rstrip('/').split('/')[-1]
new_directory = outfolder + folder_name + '/'
shutil.copytree(item, new_directory)
else:
new_file_name = outfolder + os.path.basename(item)
shutil.copyfile(item, new_file_name)
for item in results_to_remove:
if os.path.isdir(item): #The item is a directory to remove
shutil.rmtree(item)
else:
os.remove(item) # The item to remove is a file
def main():
start = time.time()
#list_files_remove = []
results_to_move = []
results_to_remove = []
parsed_args = parser_code()
#genbank_directory, gene_block_file, outfolder, filter_file, num_proc, min_genes, max_gap, e_val = check_options(parsed_args)
genbank_directory, gene_block_file, outfolder, filter_file, num_proc, min_genes, max_gap, e_val, tree_file, clean, quiet, db_directory = check_options(parsed_args)
#print genbank_directory, gene_block_file, outfolder, filter_file, num_proc, min_genes, max_gap, e_val
if not quiet:
print genbank_directory, gene_block_file, outfolder, filter_file, num_proc, min_genes, max_gap, e_val, tree_file, clean, quiet, db_directory
########################################################
# Step 1 fully checked and operational as of 7/21/2015 #
# removed operon instances and print statements #
########################################################
# Step 1: Create a phylogenetic tree from the organisms in the either the whole set provided in the genome directory,
# or from the organisms that are included in the organism filter file.
#TODO:
# 1) Make it possible to choose a protein or RNA gene, currently only protein genes are accepted.
# 2) Allow users to provide their own phylogenetic tree, and have the program use it, producing an accession list from the data if the tree lables are of a certain format
# 2.1) options have been added to allow this. the script needs to accomodate this option at this point.
project_tree_outfolder = outfolder + conf_var.tree_outfolder.lstrip("./")
cmd1 = "./create_newick_tree.py --genbank_directory %s --filter %s --outfolder %s --marker_gene %s" % (genbank_directory, filter_file, project_tree_outfolder, conf_var.tree_marker_gene)
#cmd2 = "./create_newick_tree.py --genbank_directory %s -f %s" % (genbank_directory, filter_file)
if quiet:
cmd1 = cmd1 + " --quiet"
else:
print "cmd1", cmd1
if tree_file != 'NONE':
cmd1 = cmd1 + " --tree %s" % tree_file
print "cmd1", cmd1
os.system(cmd1)
# There are no files/directories to delete from this stage
########################################################
# Step 2 fully checked and operational as of 7/13/2015 #
# removed operon instances and print statements #
########################################################
# Step 2: Get gene block set and parse into something that we can use
# currently the default is to get operons from regulon db and use them as the gene blocks we are searching for
if gene_block_file == "NONE": # run the regulon script
#now we need to set the output file for this stage
project_regulon_outfolder = outfolder + conf_var.regulon_outfolder
#print "project_regulon_outfolder ", project_regulon_outfolder
cmd2 = "./regulondb_dl_parse.py --filter %s --infolder %s --outfolder %s --num_proc %i --url %s --min_genes %i" % (filter_file, genbank_directory, project_regulon_outfolder, num_proc, conf_var.regulon_url, min_genes)
if not conf_var.regulon_experimental_only:
cmd2 = cmd2 + " --experimantal_only"
#cmd2 = "./regulondb_dl_parse.py -f phylo_order.txt"
if quiet:
cmd2 = cmd2 + " --quiet"
else:
print "cmd2", cmd2
os.system(cmd2)
gene_block_file = outfolder + conf_var.regulon_default_gene_block_file
#The gene block file is useful to anyone doing further analysis, so I have chosen to keep this ile, the rest of the files that are created can be removed
results_to_move.append(gene_block_file)
# The directory that is created (project_regulon_outfolder) can have its contents removed, as no other files are currently useful
results_to_remove.append(project_regulon_outfolder)
gene_block_file = project_regulon_outfolder + 'gene_block_names_and_genes.txt'
else: # do not run the regulon script, use the user-supplied file instead
pass
#print "gene_block_file", gene_block_file
########################################################
# Step 3 fully checked and operational as of 7/23/2015 #
# removed operon instances and print statements #
########################################################
#Step 3: Create BLAST searchable databases. (I am limiting this to protein databases right now since that is what we do in the paper)
if db_directory == 'NONE':
project_BLAST_database_folder = outfolder + conf_var.BLAST_database_folder
# BLAST databases are not necessary to keep, unless provided. This was created for the specifice run, and should be deleted since the databases are large
results_to_remove.append(project_BLAST_database_folder)
cmd3 = "./format_db.py --filter %s --genbank_directory %s --outfolder %s --num_proc %i" % (filter_file, genbank_directory, project_BLAST_database_folder, num_proc)
# Set the database formatting option[Protein or DNA], even though we don't use it (yet). verified working 7/22/2015
if conf_var.BLAST_format_protein == 'True':
pass
else:
cmd3 = cmd3 + ' -d'
if quiet:
cmd3 = cmd3 + " --quiet"
else:
print "cmd3", cmd3
os.system(cmd3)
else:
project_BLAST_database_folder = db_directory
'''
# BLAST databases are not necessary to keep, unless I add an option to explicitly provide them. This should be deleted, as the databases are large
# TODO: if the databases are an option the user can provide, add logic to choose deletion for only programatically created databases
# Do this step when the project_BLAST_database_folderis set immediately above.
results_to_remove.append(project_BLAST_database_folder)
cmd3 = "./format_db.py --filter %s --genbank_directory %s --outfolder %s --num_proc %i" % (filter_file, genbank_directory, project_BLAST_database_folder, num_proc)
# Set the database formatting option[Protein or DNA], even though we don't use it (yet). verified working 7/22/2015
if BLAST_format_protein == 'True':
pass
else:
cmd3 = cmd3 + ' -d'
if quiet:
cmd3 = cmd3 + " --quiet"
else:
print "cmd3", cmd3
os.system(cmd3)
'''
# BLAST databases are not necessary to keep, unless I add an option to explicitly provide them. This should be deleted, as the databases are large
#results_to_remove.append(project_BLAST_database_folder)
########################################################
# Step 4 version 1 checked as of 8/7/2015 #
# removed most operon instances, I need to check print #
########################################################
# TODO: fix the rest of the operon references in this script, there are only a few that are left.
#Step 4: make the operon query fasta file(s)
#gene_block_file = project_regulon_outfolder + 'gene_block_names_and_genes.txt'
#This is the only outfile for this stage. I don't think this needs its own seperate output directory, since this file is potentialy useful
# to a user after the program is run.
project_gene_block_query_outfile = outfolder + conf_var.gene_block_query_outfile
cmd4 = "./make_operon_query.py --infolder %s --outfile %s --gene_block_file %s --num_proc %i --refrence %s" % (genbank_directory, project_gene_block_query_outfile, gene_block_file, num_proc, conf_var.refrence_organism)
if quiet:
cmd4 = cmd4 + " --quiet"
else:
print "cmd4", cmd4
os.system(cmd4)
# The resulting file is useful, so no cleaning is not necessary
########################################################
# Step 5 fully checked and operational as of 7/24/2015 #
# removed operon instances and print statements #
########################################################
#Step 5: run BLAST with the query that we made in stage 3, using the databases that we produced in stage 2.
# TODO:
# make it possible to run this on an RNA database, though we have no use case for this currently. This is of low priority.
project_BLAST_outfolder = outfolder + conf_var.BLAST_outfolder
#cmd5 = "./blast_script.py -d %s -o %s -f %s -n %i -q %s -e %f" % (BLAST_database_folder, blast_outfolder, filter_file, num_proc, gene_block_query_outfile, e_val)
#cmd5 = "./blast_script.py --database_folder %s --outfolder %s --filter %s --num_proc %i --query %s --eval %f" % (project_BLAST_database_folder, project_BLAST_outfolder, filter_file, num_proc, project_gene_block_query_outfile, e_val)
#cmd5 = "./blast_script.py --database_folder %s --outfolder %s --filter %s --num_proc %i --query %s --eval %f" % (project_BLAST_database_folder, project_BLAST_outfolder, filter_file, num_proc, project_gene_block_query_outfile, e_val)
cmd5 = "./blast_script.py --database_folder %s --outfolder %s --filter %s --num_proc %i --query %s --eval %s" % (project_BLAST_database_folder, project_BLAST_outfolder, filter_file, num_proc, project_gene_block_query_outfile, e_val)
if quiet:
cmd5 = cmd5 + " --quiet"
else:
print "cmd5", cmd5
os.system(cmd5)
# The resulting directory is not necessary, so we may remove if the option is selected.
results_to_remove.append(project_BLAST_outfolder)
########################################################
# Step 6 fully checked and operational as of 8/9/2015 #
# though it had been a while ago. #
# removed operon instances and print statements #
########################################################
# Step 6: Parse the BLAST result and sort it by gene block
project_BLAST_parse_outfolder = outfolder + conf_var.BLAST_parse_outfolder
#cmd6 = "./blast_parse.py -f %s -n %i" % (filter_file, num_proc)
#cmd6 = "./blast_parse.py --infolder %s --outfolder %s --gene_block_query %s --num_proc %i" % (project_BLAST_outfolder, project_BLAST_parse_outfolder, project_gene_block_query_outfile, num_proc)
cmd6 = "./blast_parse.py --infolder %s --outfolder %s --gene_block_query %s --num_proc %i" % (project_BLAST_outfolder, project_BLAST_parse_outfolder, gene_block_file, num_proc)
if quiet:
cmd6 = cmd6 + " --quiet"
else:
print "cmd6", cmd6
os.system(cmd6)
# The resulting directory is not necessary, so we may remove if the option is selected.
results_to_remove.append(project_BLAST_parse_outfolder)
# Step 7: filter out spurious results and report the gene blocks that best represent the original.
project_filter_BLAST_parse_outfolder = outfolder + conf_var.filter_BLAST_parse_outfolder
#cmd7 = "./filter_gene_block_blast_results.py -n %i -g %i" % (num_proc, max_gap)
#cmd7 = "./filter_gene_block_blast_results.py --infolder %s --outfolder %s --num_proc %i --eval %f --max_gap %i" % (project_BLAST_parse_outfolder, project_filter_BLAST_parse_outfolder, num_proc, e_val, max_gap)
cmd7 = "./filter_operon_blast_results.py --infolder %s --outfolder %s --num_proc %i --eval %s --max_gap %i" % (project_BLAST_parse_outfolder, project_filter_BLAST_parse_outfolder, num_proc, e_val, max_gap)
if quiet:
cmd7 = cmd7 + " --quiet"
else:
print "cmd7", cmd7
os.system(cmd7)
# TODO: decide on this option for cleaning.
# The resulting directory is useful/not useful... not sure, but i will not add to the list yet
##########################################################################
# seems to be running through step 8correctly no through testing yet. #
##########################################################################
# Step 8: determine z-scores for each value in the pairwaise event matrix that is calculated in step 7
project_event_distance_outfolder = outfolder + conf_var.event_distance_outfolder
# This input will need to be altered to explicity take in the file created in 7... but for now the default will do.
#cmd8 = "./make_event_distance_matrix.py"
cmd8 = "./make_event_distance_matrix.py --gene_block_file %s --infolder %s --outfolder %s --num_proc %i --max_gap %i" % (gene_block_file, project_filter_BLAST_parse_outfolder, project_event_distance_outfolder, num_proc, max_gap)
if quiet:
cmd8 = cmd8 + " --quiet"
else:
print "cmd8", cmd8
os.system(cmd8)
# The resulting directory contains only one file, and it is not the most useful file for a user. it is small though, so i'm leaning toward keeping.
# Step 9: Run the visualization pipelilne using the pairwaise event matrix that is calculated in step 7
project_visualization_outfolder = outfolder + conf_var.visualization_outfolder
# during a normal run this code should not be necessary, but right now i'm having a problem and have to shove it in.
if not os.path.isdir(project_visualization_outfolder):
os.mkdir(project_visualization_outfolder)
# this is the location of the various result files from create_newick_tree.py (might need to put this in step 2's documentation)
project_mapping_file = project_tree_outfolder + "accession_to_common.csv"
project_organism_reorder_file = project_tree_outfolder + "phylo_order_new.txt"
# if a tree file is supplied by the user, this will have to be overridden with
project_tree_file = project_tree_outfolder + "out_tree.nwk"
project_event_dict = project_event_distance_outfolder + "event_dict.p"
# This input will need to be altered to explicity take in the file created in 7... but for now the default will do.
#cmd9 = "./operonVisual.py -n ./optimized_operon/ -m ./mapping.csv -t ./out_tree.nwk -e ./event_dict.p -o ./visualization"
#cmd9 = "./gbeerVisual/operonVisual.py -n ./gbeerVisual/data/optimized_operon/ -m ./gbeerVisual/data/mapping.csv -t ./gbeerVisual/data/reorder.nwk -e./gbeerVisual/data/event_dict.p -o ./visual"
#cmd9 = "./operonVisual.py -n ./optimized_operon/ -m ./mapping.csv -t ./out_tree.nwk -e./event_dict.p -o ./visual"
#cmd9 = "./operonVisual.py --OperonDataDirectory %s --MappingFile %s --NewickTree %s --EventsDict %s --OutputDirectory %s" % (project_event_distance_outfolder, project_mapping_file, project_tree_file, project_event_dict, project_visualization_outfolder)
cmd9 = "./operonVisual.py --OperonDataDirectory %s --MappingFile %s --NewickTree %s --EventsDict %s --OutputDirectory %s" % (project_filter_BLAST_parse_outfolder, project_mapping_file, project_tree_file, project_event_dict, project_visualization_outfolder)
if not quiet:
print "cmd9", cmd9
os.system(cmd9)
#print "results_to_move", results_to_move, "results_to_remove", results_to_remove
if clean:
cleanup_function(results_to_move, results_to_remove, outfolder)
#print "Clean function Commented";
if not quiet:
print time.time() - start
if __name__ == '__main__':
main()
|
csitarichie/boost_msm_bare_metal | refs/heads/master | boost/libs/python/example/test_std_pair.py | 53 | # Copyright Ralf W. Grosse-Kunstleve 2006. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
import std_pair_ext
assert std_pair_ext.foo() == (3, 5)
print "OK"
|
chromium2014/src | refs/heads/master | native_client_sdk/src/build_tools/verify_ppapi.py | 62 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper script for PPAPI's PRESUBMIT.py to detect if additions or removals of
PPAPI interfaces have been propagated to the Native Client libraries (.dsc
files).
For example, if a user adds "ppapi/c/foo.h", we check that the interface has
been added to "native_client_sdk/src/libraries/ppapi/library.dsc".
"""
import optparse
import os
import sys
from build_paths import PPAPI_DIR, SRC_DIR, SDK_LIBRARY_DIR
import parse_dsc
class VerifyException(Exception):
def __init__(self, lib_path, expected, unexpected):
self.expected = expected
self.unexpected = unexpected
msg = 'In %s:\n' % lib_path
if expected:
msg += ' these files are missing and should be added:\n'
for filename in sorted(expected):
msg += ' %s\n' % filename
if unexpected:
msg += ' these files no longer exist and should be removed:\n'
for filename in sorted(unexpected):
msg += ' %s\n' % filename
Exception.__init__(self, msg)
def PartitionFiles(filenames):
c_filenames = set()
cpp_filenames = set()
private_filenames = set()
for filename in filenames:
if os.path.splitext(filename)[1] not in ('.cc', '.h'):
continue
parts = filename.split(os.sep)
if 'private' in filename:
if 'flash' in filename:
continue
private_filenames.add(filename)
elif parts[0:2] == ['ppapi', 'c']:
if len(parts) >= 2 and parts[2] in ('documentation', 'trusted'):
continue
c_filenames.add(filename)
elif (parts[0:2] == ['ppapi', 'cpp'] or
parts[0:2] == ['ppapi', 'utility']):
if len(parts) >= 2 and parts[2] in ('documentation', 'trusted'):
continue
cpp_filenames.add(filename)
else:
continue
return {
'ppapi': c_filenames,
'ppapi_cpp': cpp_filenames,
'ppapi_cpp_private': private_filenames
}
def GetDirectoryList(directory_path, relative_to):
result = []
for root, _, files in os.walk(directory_path):
rel_root = os.path.relpath(root, relative_to)
if rel_root == '.':
rel_root = ''
for base_name in files:
result.append(os.path.join(rel_root, base_name))
return result
def GetDscSourcesAndHeaders(dsc):
result = []
for headers_info in dsc.get('HEADERS', []):
result.extend(headers_info['FILES'])
for targets_info in dsc.get('TARGETS', []):
result.extend(targets_info['SOURCES'])
return result
def GetChangedAndRemovedFilenames(modified_filenames, directory_list):
changed = set()
removed = set()
directory_list_set = set(directory_list)
for filename in modified_filenames:
if filename in directory_list_set:
# We can't know if a file was added (that would require knowing the
# previous state of the working directory). Instead, we assume that a
# changed file may have been added, and check it accordingly.
changed.add(filename)
else:
removed.add(filename)
return changed, removed
def GetDscFilenameFromLibraryName(lib_name):
return os.path.join(SDK_LIBRARY_DIR, lib_name, 'library.dsc')
def Verify(dsc_filename, dsc_sources_and_headers, changed_filenames,
removed_filenames):
expected_filenames = set()
unexpected_filenames = set()
for filename in changed_filenames:
basename = os.path.basename(filename)
if basename not in dsc_sources_and_headers:
expected_filenames.add(filename)
for filename in removed_filenames:
basename = os.path.basename(filename)
if basename in dsc_sources_and_headers:
unexpected_filenames.add(filename)
if expected_filenames or unexpected_filenames:
raise VerifyException(dsc_filename, expected_filenames,
unexpected_filenames)
def VerifyOrPrintError(dsc_filename, dsc_sources_and_headers, changed_filenames,
removed_filenames, is_private=False):
try:
Verify(dsc_filename, dsc_sources_and_headers, changed_filenames,
removed_filenames)
except VerifyException as e:
should_fail = True
if is_private and e.expected:
# For ppapi_cpp_private, we don't fail if there are expected filenames...
# we may not want to include them. We still want to fail if there are
# unexpected filenames, though.
sys.stderr.write('>>> WARNING: private interface files changed. '
'Should they be added to the Native Client SDK? <<<\n')
if not e.unexpected:
should_fail = False
sys.stderr.write(str(e) + '\n')
if should_fail:
return False
return True
def main(args):
usage = '%prog <file>...'
description = __doc__
parser = optparse.OptionParser(usage=usage, description=description)
args = parser.parse_args(args)[1]
if not args:
parser.error('Expected a PPAPI header or source file.')
retval = 0
lib_files = PartitionFiles(args)
directory_list = GetDirectoryList(PPAPI_DIR, relative_to=SRC_DIR)
for lib_name, filenames in lib_files.iteritems():
if not filenames:
continue
changed_filenames, removed_filenames = \
GetChangedAndRemovedFilenames(filenames, directory_list)
dsc_filename = GetDscFilenameFromLibraryName(lib_name)
dsc = parse_dsc.LoadProject(dsc_filename)
dsc_sources_and_headers = GetDscSourcesAndHeaders(dsc)
# Use the relative path to the .dsc to make the error messages shorter.
rel_dsc_filename = os.path.relpath(dsc_filename, SRC_DIR)
is_private = lib_name == 'ppapi_cpp_private'
if not VerifyOrPrintError(rel_dsc_filename, dsc_sources_and_headers,
changed_filenames, removed_filenames,
is_private=is_private):
retval = 1
return retval
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
DirtyUnicorns/android_external_chromium_org | refs/heads/lollipop | tools/telemetry/telemetry/results/base_test_results_unittest.py | 48 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import unittest
from telemetry.core import exceptions
class TestOutputStream(object):
def __init__(self):
self.output_data = []
def write(self, data):
assert isinstance(data, str)
self.output_data.append(data)
def flush(self):
pass
class BaseTestResultsUnittest(unittest.TestCase):
def CreateException(self):
try:
raise exceptions.IntentionalException
except Exception:
return sys.exc_info()
def assertEquals(self, ex, res):
# This helps diagnose result mismatches.
if ex != res and isinstance(ex, list):
def CleanList(l):
res = []
for x in l:
x = x.split('\n')
res.extend(x)
return res
ex = CleanList(ex)
res = CleanList(res)
max_len = max(len(ex), len(res))
max_width = max([len(x) for x in ex + res])
max_width = max(10, max_width)
print 'Lists differ!'
print '%*s | %*s' % (max_width, 'expected', max_width, 'result')
for i in range(max_len):
if i < len(ex):
e = ex[i]
else:
e = ''
if i < len(res):
r = res[i]
else:
r = ''
if e != r:
sep = '*'
else:
sep = '|'
print '%*s %s %*s' % (max_width, e, sep, max_width, r)
print ''
if ex != res and isinstance(ex, str) and isinstance(res, str):
print 'Strings differ!'
print 'exepected:\n%s' % repr(ex)
print 'result:\n%s\n' % repr(res)
super(BaseTestResultsUnittest, self).assertEquals(ex, res)
|
miracle2k/stgit | refs/heads/master | stgit/commands/prev.py | 6 | __copyright__ = """
Copyright (C) 2005, Catalin Marinas <catalin.marinas@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from stgit.argparse import opt
from stgit.commands import common
from stgit.out import out
from stgit import argparse
help = 'Print the name of the previous patch'
kind = 'stack'
usage = ['']
description = """
Print the name of the previous patch."""
args = []
options = [
opt('-b', '--branch', args = [argparse.stg_branches],
short = 'Use BRANCH instead of the default branch')]
directory = common.DirectoryHasRepositoryLib()
def func(parser, options, args):
"""Show the name of the previous patch
"""
if len(args) != 0:
parser.error('incorrect number of arguments')
stack = directory.repository.get_stack(options.branch)
applied = stack.patchorder.applied
if applied and len(applied) >= 2:
out.stdout(applied[-2])
else:
raise common.CmdException, 'Not enough applied patches'
|
annakoop/jkphoto | refs/heads/master | inventory/settings.py | 2 | """
Django settings for inventory project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')pp+lo#mq_9$eg#9$f2#-n-c9fg_916=8%9e6cf(a)#=hilkoz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'inventory.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'inventory.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'inventory',
'USER': 'web',
'PASSWORD': 'up so floating many bells down',
'HOST': 'localhost',
'PORT': '',
},
'development': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
STATIC_URL = '/static/'
|
QuantCrimAtLeeds/PredictCode | refs/heads/master | open_cp/gui/hierarchical.py | 1 | """
hierarchical
~~~~~~~~~~~~
An abstract way to view "hierarchical" data.
We imagine that we have a spreadsheet of data (or in Python, perhaps a list
of tuples). We have columns `0`,...,`n-1` with column `n` storing some data.
Each column is associated with finitely many "keys".
E.g.
A 1 a Bob
A 2 a Dave
B 1 b Kate
B 3 d Amy
C 1 c Fred
C 1 d Ahmed
Column 0 keys are `{A,B,C}` while column 2 keys are `{a,b,c,d}`. We assume
that having specified all column keys, we obtain a unique piece of data.
However, not all combinations need to valid: in this example, `A,3,c` codes
to nothing.
We wish to display a drop-down list to select which key to view in each column.
If for a given column there is only one key, we should just display this
without a widget to choose it.
- When the user makes a choice for column 0, all other choices should be
refreshed. In our example, if `B` is chosen for column 0, then column 1
should only offer the choices `{1,3}` and column 2 only offers `{b,d}`.
- If possible, maintain the current choice. If previously the user had
selected `B,3,d` and then choices `C` for column 0, we should fix column 1
as being `1` (this is the only choice) but leave column 2 choice as `d`
(with `c` as another choice).
For technical reasons (ultimately tied to the usage of `tkinter`) all key
values are treated as _strings_ internally. We hide this, a bit, from the
user, but you should make sure that:
- Any key has a sensibly defined `__str__` method
- Calling `str` maintains uniqueness
At the level of the model, we work with Python types; but the view and
controller convert these to strings internally.
The canonical use is in `browse_analysis`.
"""
from open_cp.gui.tk.hierarchical_view import HierarchicalView
class Model():
"""(Absract) base class which just defines methods for accessing available
keys.
:param number_keys: The number of "keys"/"columns" we'll use.
"""
def __init__(self, number_keys):
self._number_keys = number_keys
@property
def number_keys(self):
"""The number of keys which defines each entry."""
return self._number_keys
@property
def current_selection(self):
"""A tuple giving the current selection."""
return self._selection
@current_selection.setter
def current_selection(self, key):
try:
self.get(tuple(key))
self._selection = key
except KeyError:
raise ValueError("Key not valid")
@property
def current_item(self):
"""Return the data item indexed by the current selection."""
return self.get(self.current_selection)
def get(self, key):
"""Obtain the data object corresponding to the key. Should raise
:class:`KeyError` on failure to find.
:param key: Tuple of length `self.number_keys`, or object which can
be converted to a key.
"""
raise NotImplementedError()
def get_key_options(self, partial_key):
"""Return an iterable of the available keys for the next level,
given the partial key. E.g. in our example,
- () -> {A,B,C}
- (A,) -> {1,2}
- (A,1) -> {a}
:param partial_key: Tuple, maybe empty, of length less than
`self.number_keys`
"""
raise NotImplementedError()
class DictionaryModel(Model):
"""Implementation of :class:`Model` where the input data is a dictionary,
each key of which should be a tuple of a fixed length. We do not assume
that the dictionary keys are tuples-- they merely have to be uniquely
convertable to a tuple (e.g. have a sensible implementation of `__iter__`).
:param dictionary: The input dictionary. We do not make a copy, so it is
possible to mutate this, if you are careful...
"""
def __init__(self, dictionary):
super().__init__(self._key_length(dictionary))
self._dict = dictionary
@staticmethod
def _key_length(dictionary):
length = -1
for k in dictionary.keys():
try:
k = tuple(k)
except:
raise ValueError("Keys should be (convertible to) tuples")
if length == -1:
length = len(k)
if len(k) != length:
raise ValueError("Keys should be of the same length")
return length
def get(self, key):
try:
return self._dict[key]
except:
key = self._tuple_to_key(key)
return self._dict[key]
def _tuple_to_key(self, key):
key = tuple(key)
for k in self._dict.keys():
if tuple(k) == key:
return k
raise KeyError("Key not valid")
def get_key_options(self, partial_key):
partial_key = tuple(partial_key)
prefix_length = len(partial_key)
return { tuple(key)[prefix_length] for key in self._dict.keys()
if tuple(key)[:prefix_length] == partial_key }
@property
def current_selection(self):
"""A tuple giving the current selection. Will always be a key of the
original dictionary, and not necessarily a tuple."""
return self._selection
@current_selection.setter
def current_selection(self, key):
self._selection = self._tuple_to_key(key)
class Hierarchical():
"""Main class. Pass in the instance of :class:`Model` you wish to use.
The "view" can be accessed from the :attr:`view` attribute. Register a
callback on a selection change by setting the :attr:`callback` attribute.
:param model: Instance of :class:`Model`
:param view: View object; typically leave as `None` to use the default
:param parent: If you wish to build the default view, pass the `tk` parent
widget.
"""
def __init__(self, model, view=None, parent=None):
self._model = model
if view is None:
view = HierarchicalView(model, self, parent)
else:
view.controller = self
self.view = view
self._callback = None
self._init()
@property
def callback(self):
"""A callable with signature `callback()` which is called when a
selection changes. Interrogate the model to see the selection."""
return self._callback
@callback.setter
def callback(self, v):
self._callback = v
def _init(self):
self._in_fill_choices((), None)
def _in_fill_choices(self, partial_selection, old_selection):
while len(partial_selection) < self._model.number_keys:
index = len(partial_selection)
new_choices = list(self._model.get_key_options(partial_selection))
new_choices.sort()
self.view.set_choices(index, new_choices)
if old_selection is None or old_selection[index] not in new_choices:
next_value = new_choices[0]
else:
next_value = old_selection[index]
self.view.set_selection(index, next_value)
partial_selection += (next_value,)
self._model.current_selection = partial_selection
if self.callback is not None:
self.callback()
def _de_stringify(self, partial_selection, string_value):
for k in self._model.get_key_options(partial_selection):
if str(k) == string_value:
return k
raise ValueError()
def new_selection(self, level, value):
"""Notify that the user has selected `value` from column `level`.
We should refresh the choices of selections of each subsequent level,
aiming to leave the selection unchanged if possible.
:param value: Assumed to be the _string representation_ of the key
"""
if level < 0 or level >= self._model.number_keys:
raise ValueError()
old_selection = tuple(self._model.current_selection)
partial_selection = old_selection[:level]
value = self._de_stringify(partial_selection, str(value))
partial_selection += (value,)
self._in_fill_choices(partial_selection, old_selection)
|
infoxchange/lettuce | refs/heads/master | tests/integration/lib/Django-1.2.5/django/contrib/gis/tests/test_spatialrefsys.py | 64 | import unittest
from django.db import connection
from django.contrib.gis.tests.utils import mysql, no_mysql, oracle, postgis, spatialite
test_srs = ({'srid' : 4326,
'auth_name' : ('EPSG', True),
'auth_srid' : 4326,
'srtext' : 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'srtext14' : 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'proj4' : '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ',
'spheroid' : 'WGS 84', 'name' : 'WGS 84',
'geographic' : True, 'projected' : False, 'spatialite' : True,
'ellipsoid' : (6378137.0, 6356752.3, 298.257223563), # From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'eprec' : (1, 1, 9),
},
{'srid' : 32140,
'auth_name' : ('EPSG', False),
'auth_srid' : 32140,
'srtext' : 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","32140"]]',
'srtext14': 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],UNIT["metre",1,AUTHORITY["EPSG","9001"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],AUTHORITY["EPSG","32140"],AXIS["X",EAST],AXIS["Y",NORTH]]',
'proj4' : '+proj=lcc +lat_1=30.28333333333333 +lat_2=28.38333333333333 +lat_0=27.83333333333333 +lon_0=-99 +x_0=600000 +y_0=4000000 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ',
'spheroid' : 'GRS 1980', 'name' : 'NAD83 / Texas South Central',
'geographic' : False, 'projected' : True, 'spatialite' : False,
'ellipsoid' : (6378137.0, 6356752.31414, 298.257222101), # From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'eprec' : (1, 5, 10),
},
)
if oracle:
from django.contrib.gis.db.backends.oracle.models import SpatialRefSys
elif postgis:
from django.contrib.gis.db.backends.postgis.models import SpatialRefSys
elif spatialite:
from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys
class SpatialRefSysTest(unittest.TestCase):
@no_mysql
def test01_retrieve(self):
"Testing retrieval of SpatialRefSys model objects."
for sd in test_srs:
srs = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(sd['srid'], srs.srid)
# Some of the authority names are borked on Oracle, e.g., SRID=32140.
# also, Oracle Spatial seems to add extraneous info to fields, hence the
# the testing with the 'startswith' flag.
auth_name, oracle_flag = sd['auth_name']
if postgis or (oracle and oracle_flag):
self.assertEqual(True, srs.auth_name.startswith(auth_name))
self.assertEqual(sd['auth_srid'], srs.auth_srid)
# No proj.4 and different srtext on oracle backends :(
if postgis:
if connection.ops.spatial_version >= (1, 4, 0):
srtext = sd['srtext14']
else:
srtext = sd['srtext']
self.assertEqual(srtext, srs.wkt)
self.assertEqual(sd['proj4'], srs.proj4text)
@no_mysql
def test02_osr(self):
"Testing getting OSR objects from SpatialRefSys model objects."
for sd in test_srs:
sr = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(True, sr.spheroid.startswith(sd['spheroid']))
self.assertEqual(sd['geographic'], sr.geographic)
self.assertEqual(sd['projected'], sr.projected)
if not (spatialite and not sd['spatialite']):
# Can't get 'NAD83 / Texas South Central' from PROJ.4 string
# on SpatiaLite
self.assertEqual(True, sr.name.startswith(sd['name']))
# Testing the SpatialReference object directly.
if postgis or spatialite:
srs = sr.srs
self.assertEqual(sd['proj4'], srs.proj4)
# No `srtext` field in the `spatial_ref_sys` table in SpatiaLite
if not spatialite:
if connection.ops.spatial_version >= (1, 4, 0):
srtext = sd['srtext14']
else:
srtext = sd['srtext']
self.assertEqual(srtext, srs.wkt)
@no_mysql
def test03_ellipsoid(self):
"Testing the ellipsoid property."
for sd in test_srs:
# Getting the ellipsoid and precision parameters.
ellps1 = sd['ellipsoid']
prec = sd['eprec']
# Getting our spatial reference and its ellipsoid
srs = SpatialRefSys.objects.get(srid=sd['srid'])
ellps2 = srs.ellipsoid
for i in range(3):
param1 = ellps1[i]
param2 = ellps2[i]
self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i])
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(SpatialRefSysTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
|
gpitel/pyjs | refs/heads/master | pyjs/lib/htmlentitydefs.py | 8 | """HTML character entity references."""
# maps the HTML entity name to the Unicode codepoint
name2codepoint = {
'AElig': 0x00c6, # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
'Aacute': 0x00c1, # latin capital letter A with acute, U+00C1 ISOlat1
'Acirc': 0x00c2, # latin capital letter A with circumflex, U+00C2 ISOlat1
'Agrave': 0x00c0, # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1
'Alpha': 0x0391, # greek capital letter alpha, U+0391
'Aring': 0x00c5, # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1
'Atilde': 0x00c3, # latin capital letter A with tilde, U+00C3 ISOlat1
'Auml': 0x00c4, # latin capital letter A with diaeresis, U+00C4 ISOlat1
'Beta': 0x0392, # greek capital letter beta, U+0392
'Ccedil': 0x00c7, # latin capital letter C with cedilla, U+00C7 ISOlat1
'Chi': 0x03a7, # greek capital letter chi, U+03A7
'Dagger': 0x2021, # double dagger, U+2021 ISOpub
'Delta': 0x0394, # greek capital letter delta, U+0394 ISOgrk3
'ETH': 0x00d0, # latin capital letter ETH, U+00D0 ISOlat1
'Eacute': 0x00c9, # latin capital letter E with acute, U+00C9 ISOlat1
'Ecirc': 0x00ca, # latin capital letter E with circumflex, U+00CA ISOlat1
'Egrave': 0x00c8, # latin capital letter E with grave, U+00C8 ISOlat1
'Epsilon': 0x0395, # greek capital letter epsilon, U+0395
'Eta': 0x0397, # greek capital letter eta, U+0397
'Euml': 0x00cb, # latin capital letter E with diaeresis, U+00CB ISOlat1
'Gamma': 0x0393, # greek capital letter gamma, U+0393 ISOgrk3
'Iacute': 0x00cd, # latin capital letter I with acute, U+00CD ISOlat1
'Icirc': 0x00ce, # latin capital letter I with circumflex, U+00CE ISOlat1
'Igrave': 0x00cc, # latin capital letter I with grave, U+00CC ISOlat1
'Iota': 0x0399, # greek capital letter iota, U+0399
'Iuml': 0x00cf, # latin capital letter I with diaeresis, U+00CF ISOlat1
'Kappa': 0x039a, # greek capital letter kappa, U+039A
'Lambda': 0x039b, # greek capital letter lambda, U+039B ISOgrk3
'Mu': 0x039c, # greek capital letter mu, U+039C
'Ntilde': 0x00d1, # latin capital letter N with tilde, U+00D1 ISOlat1
'Nu': 0x039d, # greek capital letter nu, U+039D
'OElig': 0x0152, # latin capital ligature OE, U+0152 ISOlat2
'Oacute': 0x00d3, # latin capital letter O with acute, U+00D3 ISOlat1
'Ocirc': 0x00d4, # latin capital letter O with circumflex, U+00D4 ISOlat1
'Ograve': 0x00d2, # latin capital letter O with grave, U+00D2 ISOlat1
'Omega': 0x03a9, # greek capital letter omega, U+03A9 ISOgrk3
'Omicron': 0x039f, # greek capital letter omicron, U+039F
'Oslash': 0x00d8, # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1
'Otilde': 0x00d5, # latin capital letter O with tilde, U+00D5 ISOlat1
'Ouml': 0x00d6, # latin capital letter O with diaeresis, U+00D6 ISOlat1
'Phi': 0x03a6, # greek capital letter phi, U+03A6 ISOgrk3
'Pi': 0x03a0, # greek capital letter pi, U+03A0 ISOgrk3
'Prime': 0x2033, # double prime = seconds = inches, U+2033 ISOtech
'Psi': 0x03a8, # greek capital letter psi, U+03A8 ISOgrk3
'Rho': 0x03a1, # greek capital letter rho, U+03A1
'Scaron': 0x0160, # latin capital letter S with caron, U+0160 ISOlat2
'Sigma': 0x03a3, # greek capital letter sigma, U+03A3 ISOgrk3
'THORN': 0x00de, # latin capital letter THORN, U+00DE ISOlat1
'Tau': 0x03a4, # greek capital letter tau, U+03A4
'Theta': 0x0398, # greek capital letter theta, U+0398 ISOgrk3
'Uacute': 0x00da, # latin capital letter U with acute, U+00DA ISOlat1
'Ucirc': 0x00db, # latin capital letter U with circumflex, U+00DB ISOlat1
'Ugrave': 0x00d9, # latin capital letter U with grave, U+00D9 ISOlat1
'Upsilon': 0x03a5, # greek capital letter upsilon, U+03A5 ISOgrk3
'Uuml': 0x00dc, # latin capital letter U with diaeresis, U+00DC ISOlat1
'Xi': 0x039e, # greek capital letter xi, U+039E ISOgrk3
'Yacute': 0x00dd, # latin capital letter Y with acute, U+00DD ISOlat1
'Yuml': 0x0178, # latin capital letter Y with diaeresis, U+0178 ISOlat2
'Zeta': 0x0396, # greek capital letter zeta, U+0396
'aacute': 0x00e1, # latin small letter a with acute, U+00E1 ISOlat1
'acirc': 0x00e2, # latin small letter a with circumflex, U+00E2 ISOlat1
'acute': 0x00b4, # acute accent = spacing acute, U+00B4 ISOdia
'aelig': 0x00e6, # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1
'agrave': 0x00e0, # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1
'alefsym': 0x2135, # alef symbol = first transfinite cardinal, U+2135 NEW
'alpha': 0x03b1, # greek small letter alpha, U+03B1 ISOgrk3
'amp': 0x0026, # ampersand, U+0026 ISOnum
'and': 0x2227, # logical and = wedge, U+2227 ISOtech
'ang': 0x2220, # angle, U+2220 ISOamso
'aring': 0x00e5, # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1
'asymp': 0x2248, # almost equal to = asymptotic to, U+2248 ISOamsr
'atilde': 0x00e3, # latin small letter a with tilde, U+00E3 ISOlat1
'auml': 0x00e4, # latin small letter a with diaeresis, U+00E4 ISOlat1
'bdquo': 0x201e, # double low-9 quotation mark, U+201E NEW
'beta': 0x03b2, # greek small letter beta, U+03B2 ISOgrk3
'brvbar': 0x00a6, # broken bar = broken vertical bar, U+00A6 ISOnum
'bull': 0x2022, # bullet = black small circle, U+2022 ISOpub
'cap': 0x2229, # intersection = cap, U+2229 ISOtech
'ccedil': 0x00e7, # latin small letter c with cedilla, U+00E7 ISOlat1
'cedil': 0x00b8, # cedilla = spacing cedilla, U+00B8 ISOdia
'cent': 0x00a2, # cent sign, U+00A2 ISOnum
'chi': 0x03c7, # greek small letter chi, U+03C7 ISOgrk3
'circ': 0x02c6, # modifier letter circumflex accent, U+02C6 ISOpub
'clubs': 0x2663, # black club suit = shamrock, U+2663 ISOpub
'cong': 0x2245, # approximately equal to, U+2245 ISOtech
'copy': 0x00a9, # copyright sign, U+00A9 ISOnum
'crarr': 0x21b5, # downwards arrow with corner leftwards = carriage return, U+21B5 NEW
'cup': 0x222a, # union = cup, U+222A ISOtech
'curren': 0x00a4, # currency sign, U+00A4 ISOnum
'dArr': 0x21d3, # downwards double arrow, U+21D3 ISOamsa
'dagger': 0x2020, # dagger, U+2020 ISOpub
'darr': 0x2193, # downwards arrow, U+2193 ISOnum
'deg': 0x00b0, # degree sign, U+00B0 ISOnum
'delta': 0x03b4, # greek small letter delta, U+03B4 ISOgrk3
'diams': 0x2666, # black diamond suit, U+2666 ISOpub
'divide': 0x00f7, # division sign, U+00F7 ISOnum
'eacute': 0x00e9, # latin small letter e with acute, U+00E9 ISOlat1
'ecirc': 0x00ea, # latin small letter e with circumflex, U+00EA ISOlat1
'egrave': 0x00e8, # latin small letter e with grave, U+00E8 ISOlat1
'empty': 0x2205, # empty set = null set = diameter, U+2205 ISOamso
'emsp': 0x2003, # em space, U+2003 ISOpub
'ensp': 0x2002, # en space, U+2002 ISOpub
'epsilon': 0x03b5, # greek small letter epsilon, U+03B5 ISOgrk3
'equiv': 0x2261, # identical to, U+2261 ISOtech
'eta': 0x03b7, # greek small letter eta, U+03B7 ISOgrk3
'eth': 0x00f0, # latin small letter eth, U+00F0 ISOlat1
'euml': 0x00eb, # latin small letter e with diaeresis, U+00EB ISOlat1
'euro': 0x20ac, # euro sign, U+20AC NEW
'exist': 0x2203, # there exists, U+2203 ISOtech
'fnof': 0x0192, # latin small f with hook = function = florin, U+0192 ISOtech
'forall': 0x2200, # for all, U+2200 ISOtech
'frac12': 0x00bd, # vulgar fraction one half = fraction one half, U+00BD ISOnum
'frac14': 0x00bc, # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum
'frac34': 0x00be, # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum
'frasl': 0x2044, # fraction slash, U+2044 NEW
'gamma': 0x03b3, # greek small letter gamma, U+03B3 ISOgrk3
'ge': 0x2265, # greater-than or equal to, U+2265 ISOtech
'gt': 0x003e, # greater-than sign, U+003E ISOnum
'hArr': 0x21d4, # left right double arrow, U+21D4 ISOamsa
'harr': 0x2194, # left right arrow, U+2194 ISOamsa
'hearts': 0x2665, # black heart suit = valentine, U+2665 ISOpub
'hellip': 0x2026, # horizontal ellipsis = three dot leader, U+2026 ISOpub
'iacute': 0x00ed, # latin small letter i with acute, U+00ED ISOlat1
'icirc': 0x00ee, # latin small letter i with circumflex, U+00EE ISOlat1
'iexcl': 0x00a1, # inverted exclamation mark, U+00A1 ISOnum
'igrave': 0x00ec, # latin small letter i with grave, U+00EC ISOlat1
'image': 0x2111, # blackletter capital I = imaginary part, U+2111 ISOamso
'infin': 0x221e, # infinity, U+221E ISOtech
'int': 0x222b, # integral, U+222B ISOtech
'iota': 0x03b9, # greek small letter iota, U+03B9 ISOgrk3
'iquest': 0x00bf, # inverted question mark = turned question mark, U+00BF ISOnum
'isin': 0x2208, # element of, U+2208 ISOtech
'iuml': 0x00ef, # latin small letter i with diaeresis, U+00EF ISOlat1
'kappa': 0x03ba, # greek small letter kappa, U+03BA ISOgrk3
'lArr': 0x21d0, # leftwards double arrow, U+21D0 ISOtech
'lambda': 0x03bb, # greek small letter lambda, U+03BB ISOgrk3
'lang': 0x2329, # left-pointing angle bracket = bra, U+2329 ISOtech
'laquo': 0x00ab, # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum
'larr': 0x2190, # leftwards arrow, U+2190 ISOnum
'lceil': 0x2308, # left ceiling = apl upstile, U+2308 ISOamsc
'ldquo': 0x201c, # left double quotation mark, U+201C ISOnum
'le': 0x2264, # less-than or equal to, U+2264 ISOtech
'lfloor': 0x230a, # left floor = apl downstile, U+230A ISOamsc
'lowast': 0x2217, # asterisk operator, U+2217 ISOtech
'loz': 0x25ca, # lozenge, U+25CA ISOpub
'lrm': 0x200e, # left-to-right mark, U+200E NEW RFC 2070
'lsaquo': 0x2039, # single left-pointing angle quotation mark, U+2039 ISO proposed
'lsquo': 0x2018, # left single quotation mark, U+2018 ISOnum
'lt': 0x003c, # less-than sign, U+003C ISOnum
'macr': 0x00af, # macron = spacing macron = overline = APL overbar, U+00AF ISOdia
'mdash': 0x2014, # em dash, U+2014 ISOpub
'micro': 0x00b5, # micro sign, U+00B5 ISOnum
'middot': 0x00b7, # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum
'minus': 0x2212, # minus sign, U+2212 ISOtech
'mu': 0x03bc, # greek small letter mu, U+03BC ISOgrk3
'nabla': 0x2207, # nabla = backward difference, U+2207 ISOtech
'nbsp': 0x00a0, # no-break space = non-breaking space, U+00A0 ISOnum
'ndash': 0x2013, # en dash, U+2013 ISOpub
'ne': 0x2260, # not equal to, U+2260 ISOtech
'ni': 0x220b, # contains as member, U+220B ISOtech
'not': 0x00ac, # not sign, U+00AC ISOnum
'notin': 0x2209, # not an element of, U+2209 ISOtech
'nsub': 0x2284, # not a subset of, U+2284 ISOamsn
'ntilde': 0x00f1, # latin small letter n with tilde, U+00F1 ISOlat1
'nu': 0x03bd, # greek small letter nu, U+03BD ISOgrk3
'oacute': 0x00f3, # latin small letter o with acute, U+00F3 ISOlat1
'ocirc': 0x00f4, # latin small letter o with circumflex, U+00F4 ISOlat1
'oelig': 0x0153, # latin small ligature oe, U+0153 ISOlat2
'ograve': 0x00f2, # latin small letter o with grave, U+00F2 ISOlat1
'oline': 0x203e, # overline = spacing overscore, U+203E NEW
'omega': 0x03c9, # greek small letter omega, U+03C9 ISOgrk3
'omicron': 0x03bf, # greek small letter omicron, U+03BF NEW
'oplus': 0x2295, # circled plus = direct sum, U+2295 ISOamsb
'or': 0x2228, # logical or = vee, U+2228 ISOtech
'ordf': 0x00aa, # feminine ordinal indicator, U+00AA ISOnum
'ordm': 0x00ba, # masculine ordinal indicator, U+00BA ISOnum
'oslash': 0x00f8, # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1
'otilde': 0x00f5, # latin small letter o with tilde, U+00F5 ISOlat1
'otimes': 0x2297, # circled times = vector product, U+2297 ISOamsb
'ouml': 0x00f6, # latin small letter o with diaeresis, U+00F6 ISOlat1
'para': 0x00b6, # pilcrow sign = paragraph sign, U+00B6 ISOnum
'part': 0x2202, # partial differential, U+2202 ISOtech
'permil': 0x2030, # per mille sign, U+2030 ISOtech
'perp': 0x22a5, # up tack = orthogonal to = perpendicular, U+22A5 ISOtech
'phi': 0x03c6, # greek small letter phi, U+03C6 ISOgrk3
'pi': 0x03c0, # greek small letter pi, U+03C0 ISOgrk3
'piv': 0x03d6, # greek pi symbol, U+03D6 ISOgrk3
'plusmn': 0x00b1, # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum
'pound': 0x00a3, # pound sign, U+00A3 ISOnum
'prime': 0x2032, # prime = minutes = feet, U+2032 ISOtech
'prod': 0x220f, # n-ary product = product sign, U+220F ISOamsb
'prop': 0x221d, # proportional to, U+221D ISOtech
'psi': 0x03c8, # greek small letter psi, U+03C8 ISOgrk3
'quot': 0x0022, # quotation mark = APL quote, U+0022 ISOnum
'rArr': 0x21d2, # rightwards double arrow, U+21D2 ISOtech
'radic': 0x221a, # square root = radical sign, U+221A ISOtech
'rang': 0x232a, # right-pointing angle bracket = ket, U+232A ISOtech
'raquo': 0x00bb, # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum
'rarr': 0x2192, # rightwards arrow, U+2192 ISOnum
'rceil': 0x2309, # right ceiling, U+2309 ISOamsc
'rdquo': 0x201d, # right double quotation mark, U+201D ISOnum
'real': 0x211c, # blackletter capital R = real part symbol, U+211C ISOamso
'reg': 0x00ae, # registered sign = registered trade mark sign, U+00AE ISOnum
'rfloor': 0x230b, # right floor, U+230B ISOamsc
'rho': 0x03c1, # greek small letter rho, U+03C1 ISOgrk3
'rlm': 0x200f, # right-to-left mark, U+200F NEW RFC 2070
'rsaquo': 0x203a, # single right-pointing angle quotation mark, U+203A ISO proposed
'rsquo': 0x2019, # right single quotation mark, U+2019 ISOnum
'sbquo': 0x201a, # single low-9 quotation mark, U+201A NEW
'scaron': 0x0161, # latin small letter s with caron, U+0161 ISOlat2
'sdot': 0x22c5, # dot operator, U+22C5 ISOamsb
'sect': 0x00a7, # section sign, U+00A7 ISOnum
'shy': 0x00ad, # soft hyphen = discretionary hyphen, U+00AD ISOnum
'sigma': 0x03c3, # greek small letter sigma, U+03C3 ISOgrk3
'sigmaf': 0x03c2, # greek small letter final sigma, U+03C2 ISOgrk3
'sim': 0x223c, # tilde operator = varies with = similar to, U+223C ISOtech
'spades': 0x2660, # black spade suit, U+2660 ISOpub
'sub': 0x2282, # subset of, U+2282 ISOtech
'sube': 0x2286, # subset of or equal to, U+2286 ISOtech
'sum': 0x2211, # n-ary sumation, U+2211 ISOamsb
'sup': 0x2283, # superset of, U+2283 ISOtech
'sup1': 0x00b9, # superscript one = superscript digit one, U+00B9 ISOnum
'sup2': 0x00b2, # superscript two = superscript digit two = squared, U+00B2 ISOnum
'sup3': 0x00b3, # superscript three = superscript digit three = cubed, U+00B3 ISOnum
'supe': 0x2287, # superset of or equal to, U+2287 ISOtech
'szlig': 0x00df, # latin small letter sharp s = ess-zed, U+00DF ISOlat1
'tau': 0x03c4, # greek small letter tau, U+03C4 ISOgrk3
'there4': 0x2234, # therefore, U+2234 ISOtech
'theta': 0x03b8, # greek small letter theta, U+03B8 ISOgrk3
'thetasym': 0x03d1, # greek small letter theta symbol, U+03D1 NEW
'thinsp': 0x2009, # thin space, U+2009 ISOpub
'thorn': 0x00fe, # latin small letter thorn with, U+00FE ISOlat1
'tilde': 0x02dc, # small tilde, U+02DC ISOdia
'times': 0x00d7, # multiplication sign, U+00D7 ISOnum
'trade': 0x2122, # trade mark sign, U+2122 ISOnum
'uArr': 0x21d1, # upwards double arrow, U+21D1 ISOamsa
'uacute': 0x00fa, # latin small letter u with acute, U+00FA ISOlat1
'uarr': 0x2191, # upwards arrow, U+2191 ISOnum
'ucirc': 0x00fb, # latin small letter u with circumflex, U+00FB ISOlat1
'ugrave': 0x00f9, # latin small letter u with grave, U+00F9 ISOlat1
'uml': 0x00a8, # diaeresis = spacing diaeresis, U+00A8 ISOdia
'upsih': 0x03d2, # greek upsilon with hook symbol, U+03D2 NEW
'upsilon': 0x03c5, # greek small letter upsilon, U+03C5 ISOgrk3
'uuml': 0x00fc, # latin small letter u with diaeresis, U+00FC ISOlat1
'weierp': 0x2118, # script capital P = power set = Weierstrass p, U+2118 ISOamso
'xi': 0x03be, # greek small letter xi, U+03BE ISOgrk3
'yacute': 0x00fd, # latin small letter y with acute, U+00FD ISOlat1
'yen': 0x00a5, # yen sign = yuan sign, U+00A5 ISOnum
'yuml': 0x00ff, # latin small letter y with diaeresis, U+00FF ISOlat1
'zeta': 0x03b6, # greek small letter zeta, U+03B6 ISOgrk3
'zwj': 0x200d, # zero width joiner, U+200D NEW RFC 2070
'zwnj': 0x200c, # zero width non-joiner, U+200C NEW RFC 2070
}
# maps the Unicode codepoint to the HTML entity name
codepoint2name = {}
# maps the HTML entity name to the character
# (or a character reference if the character is outside the Latin-1 range)
entitydefs = {}
for (name, codepoint) in name2codepoint.iteritems():
codepoint2name[codepoint] = name
if codepoint <= 0xff:
entitydefs[name] = chr(codepoint)
else:
entitydefs[name] = '&#%d;' % codepoint
# del name, codepoint
|
sekikn/incubator-airflow | refs/heads/master | airflow/contrib/utils/log/__init__.py | 10 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This package is deprecated. Please use `airflow.utils.log`."""
import warnings
warnings.warn("This module is deprecated. Please use `airflow.utils.log`.", DeprecationWarning, stacklevel=2)
|
moraes/tipfy | refs/heads/master | examples/auth/bootstrap.py | 16 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os, shutil, sys, tempfile, textwrap, urllib, urllib2, subprocess
from optparse import OptionParser
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
quote = str
# See zc.buildout.easy_install._has_broken_dash_S for motivation and comments.
stdout, stderr = subprocess.Popen(
[sys.executable, '-Sc',
'try:\n'
' import ConfigParser\n'
'except ImportError:\n'
' print 1\n'
'else:\n'
' print 0\n'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
has_broken_dash_S = bool(int(stdout.strip()))
# In order to be more robust in the face of system Pythons, we want to
# run without site-packages loaded. This is somewhat tricky, in
# particular because Python 2.6's distutils imports site, so starting
# with the -S flag is not sufficient. However, we'll start with that:
if not has_broken_dash_S and 'site' in sys.modules:
# We will restart with python -S.
args = sys.argv[:]
args[0:0] = [sys.executable, '-S']
args = map(quote, args)
os.execv(sys.executable, args)
# Now we are running with -S. We'll get the clean sys.path, import site
# because distutils will do it later, and then reset the path and clean
# out any namespace packages from site-packages that might have been
# loaded by .pth files.
clean_path = sys.path[:]
import site
sys.path[:] = clean_path
for k, v in sys.modules.items():
if k in ('setuptools', 'pkg_resources') or (
hasattr(v, '__path__') and
len(v.__path__)==1 and
not os.path.exists(os.path.join(v.__path__[0],'__init__.py'))):
# This is a namespace package. Remove it.
sys.modules.pop(k)
is_jython = sys.platform.startswith('java')
setuptools_source = 'http://peak.telecommunity.com/dist/ez_setup.py'
distribute_source = 'http://python-distribute.org/distribute_setup.py'
# parsing arguments
def normalize_to_url(option, opt_str, value, parser):
if value:
if '://' not in value: # It doesn't smell like a URL.
value = 'file://%s' % (
urllib.pathname2url(
os.path.abspath(os.path.expanduser(value))),)
if opt_str == '--download-base' and not value.endswith('/'):
# Download base needs a trailing slash to make the world happy.
value += '/'
else:
value = None
name = opt_str[2:].replace('-', '_')
setattr(parser.values, name, value)
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --setup-source and --download-base to point to
local resources, you can keep this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", dest="version",
help="use a specific zc.buildout version")
parser.add_option("-d", "--distribute",
action="store_true", dest="use_distribute", default=True,
help="Use Distribute rather than Setuptools.")
parser.add_option("--setup-source", action="callback", dest="setup_source",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or file location for the setup file. "
"If you use Setuptools, this will default to " +
setuptools_source + "; if you use Distribute, this "
"will default to " + distribute_source +"."))
parser.add_option("--download-base", action="callback", dest="download_base",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or directory for downloading "
"zc.buildout and either Setuptools or Distribute. "
"Defaults to PyPI."))
parser.add_option("--eggs",
help=("Specify a directory for storing eggs. Defaults to "
"a temporary directory that is deleted when the "
"bootstrap script completes."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", None, action="store", dest="config_file",
help=("Specify the path to the buildout configuration "
"file to be used."))
options, args = parser.parse_args()
# if -c was provided, we push it back into args for buildout's main function
if options.config_file is not None:
args += ['-c', options.config_file]
if options.eggs:
eggs_dir = os.path.abspath(os.path.expanduser(options.eggs))
else:
eggs_dir = tempfile.mkdtemp()
if options.setup_source is None:
if options.use_distribute:
options.setup_source = distribute_source
else:
options.setup_source = setuptools_source
if options.accept_buildout_test_releases:
args.append('buildout:accept-buildout-test-releases=true')
args.append('bootstrap')
try:
import pkg_resources
import setuptools # A flag. Sometimes pkg_resources is installed alone.
if not hasattr(pkg_resources, '_distribute'):
raise ImportError
except ImportError:
ez_code = urllib2.urlopen(
options.setup_source).read().replace('\r\n', '\n')
ez = {}
exec ez_code in ez
setup_args = dict(to_dir=eggs_dir, download_delay=0)
if options.download_base:
setup_args['download_base'] = options.download_base
if options.use_distribute:
setup_args['no_fake'] = True
ez['use_setuptools'](**setup_args)
if 'pkg_resources' in sys.modules:
reload(sys.modules['pkg_resources'])
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
cmd = [quote(sys.executable),
'-c',
quote('from setuptools.command.easy_install import main; main()'),
'-mqNxd',
quote(eggs_dir)]
if not has_broken_dash_S:
cmd.insert(1, '-S')
find_links = options.download_base
if not find_links:
find_links = os.environ.get('bootstrap-testing-find-links')
if find_links:
cmd.extend(['-f', quote(find_links)])
if options.use_distribute:
setup_requirement = 'distribute'
else:
setup_requirement = 'setuptools'
ws = pkg_resources.working_set
setup_requirement_path = ws.find(
pkg_resources.Requirement.parse(setup_requirement)).location
env = dict(
os.environ,
PYTHONPATH=setup_requirement_path)
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setup_requirement_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
if is_jython:
import subprocess
exitcode = subprocess.Popen(cmd, env=env).wait()
else: # Windows prefers this, apparently; otherwise we would prefer subprocess
exitcode = os.spawnle(*([os.P_WAIT, sys.executable] + cmd + [env]))
if exitcode != 0:
sys.stdout.flush()
sys.stderr.flush()
print ("An error occurred when trying to install zc.buildout. "
"Look above this message for any errors that "
"were output by easy_install.")
sys.exit(exitcode)
ws.add_entry(eggs_dir)
ws.require(requirement)
import zc.buildout.buildout
zc.buildout.buildout.main(args)
if not options.eggs: # clean up temporary egg directory
shutil.rmtree(eggs_dir)
|
Payshares/medida | refs/heads/master | test/gtest-1.6.0/test/gtest_shuffle_test.py | 3023 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
|
yujikato/DIRAC | refs/heads/integration | src/DIRAC/Core/DISET/private/ServiceConfiguration.py | 2 | """
It keeps the service configuration parameters like maximum running threads, number of processes, etc. ,
which can be configured in CS.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities import Network, List
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.DISET.private.Protocols import gDefaultProtocol
class ServiceConfiguration:
def __init__(self, nameList):
self.serviceName = nameList[0]
self.serviceURL = None
self.nameList = nameList
self.pathList = []
for svcName in nameList:
self.pathList.append(PathFinder.getServiceSection(svcName))
def getOption(self, optionName):
if optionName[0] == "/":
return gConfigurationData.extractOptionFromCFG(optionName)
for path in self.pathList:
value = gConfigurationData.extractOptionFromCFG("%s/%s" % (path, optionName))
if value:
return value
return None
def getAddress(self):
return ("", self.getPort())
def getHandlerLocation(self):
return self.getOption("HandlerPath")
def getName(self):
return self.serviceName
def setURL(self, sURL):
self.serviceURL = sURL
def __getCSURL(self, URL=None):
optionValue = self.getOption("URL")
if optionValue:
return optionValue
return URL
def registerAlsoAs(self):
optionValue = self.getOption("RegisterAlsoAs")
if optionValue:
return List.fromChar(optionValue)
else:
return []
def getMaxThreads(self):
try:
return int(self.getOption("MaxThreads"))
except Exception:
return 15
def getMinThreads(self):
try:
return int(self.getOption("MinThreads"))
except Exception:
return 1
def getMaxWaitingPetitions(self):
try:
return int(self.getOption("MaxWaitingPetitions"))
except Exception:
return 500
def getMaxMessagingConnections(self):
try:
return int(self.getOption("MaxMessagingConnections"))
except Exception:
return 20
def getMaxThreadsForMethod(self, actionType, method):
try:
return int(self.getOption("ThreadLimit/%s/%s" % (actionType, method)))
except Exception:
return 15
def getCloneProcesses(self):
try:
return int(self.getOption("CloneProcesses"))
except Exception:
return 0
def getPort(self):
try:
return int(self.getOption("Port"))
except Exception:
return 9876
def getProtocol(self):
optionValue = self.getOption("Protocol")
if optionValue:
return optionValue
return gDefaultProtocol
def getHostname(self):
hostname = self.getOption("/DIRAC/Hostname")
if not hostname:
return Network.getFQDN()
return hostname
def getURL(self):
"""
Build the service URL
"""
if self.serviceURL:
return self.serviceURL
protocol = self.getProtocol()
serviceURL = self.__getCSURL()
if serviceURL:
if serviceURL.find(protocol) != 0:
urlFields = serviceURL.split(":")
urlFields[0] = protocol
serviceURL = ":".join(urlFields)
self.setURL(serviceURL)
return serviceURL
hostName = self.getHostname()
port = self.getPort()
serviceURL = "%s://%s:%s/%s" % (protocol,
hostName,
port,
self.getName())
if serviceURL[-1] == "/":
serviceURL = serviceURL[:-1]
self.setURL(serviceURL)
return serviceURL
def getContextLifeTime(self):
optionValue = self.getOption("ContextLifeTime")
try:
return int(optionValue)
except Exception:
return 21600
|
kdaniels/cobbler | refs/heads/master | cobbler/clogger.py | 16 | """
Python standard logging doesn't super-intelligent and won't expose filehandles,
which we want. So we're not using it.
Copyright 2009, Red Hat, Inc and Others
Michael DeHaan <michael.dehaan AT gmail>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os
import time
ERROR = "ERROR"
WARNING = "WARNING"
DEBUG = "DEBUG"
INFO = "INFO"
class Logger:
def __init__(self, logfile="/var/log/cobbler/cobbler.log"):
self.logfile = None
# Main logfile is append mode, other logfiles not.
if not os.path.exists(logfile) and os.path.exists(os.path.dirname(logfile)):
self.logfile = open(logfile, "a")
self.logfile.close()
try:
self.logfile = open(logfile, "a")
except IOError:
# You likely don't have write access, this logger will just print
# things to stdout.
pass
def warning(self, msg):
self.__write(WARNING, msg)
def error(self, msg):
self.__write(ERROR, msg)
def debug(self, msg):
self.__write(DEBUG, msg)
def info(self, msg):
self.__write(INFO, msg)
def flat(self, msg):
self.__write(None, msg)
def __write(self, level, msg):
if level is not None:
msg = "%s - %s | %s" % (time.asctime(), level, msg)
if self.logfile is not None:
self.logfile.write(msg)
self.logfile.write("\n")
self.logfile.flush()
else:
print(msg)
def handle(self):
return self.logfile
def close(self):
self.logfile.close()
|
bytedance/fedlearner | refs/heads/master | test/data_join/test_data_portal_job_manager.py | 1 | # Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import time
import unittest
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow.compat.v1 import gfile
from fnmatch import fnmatch
from google.protobuf import text_format
from fedlearner.data_join import common
from fedlearner.common import data_portal_service_pb2 as dp_pb
from fedlearner.common.db_client import DBClient
from fedlearner.data_join.data_portal_job_manager import DataPortalJobManager
class Timer:
def __init__(self, content):
self._content = content
self._start_time = 0
def __enter__(self):
self._start_time = time.time()
def __exit__(self, exc_type, exc_val, exc_tb):
logging.info("%s takes %s second", self._content,
time.time() - self._start_time)
class TestDataPortalJobManager(unittest.TestCase):
def setUp(self) -> None:
logging.getLogger().setLevel(logging.DEBUG)
self._data_portal_name = 'test_data_portal_job_manager'
self._kvstore = DBClient('etcd', True)
self._portal_input_base_dir = './portal_input_dir'
self._portal_output_base_dir = './portal_output_dir'
self._raw_data_publish_dir = 'raw_data_publish_dir'
if gfile.Exists(self._portal_input_base_dir):
gfile.DeleteRecursively(self._portal_input_base_dir)
gfile.MakeDirs(self._portal_input_base_dir)
self._data_fnames = ['1001/{}.data'.format(i) for i in range(100)]
self._data_fnames_without_success = \
['1002/{}.data'.format(i) for i in range(100)]
self._csv_fnames = ['1003/{}.csv'.format(i) for i in range(100)]
self._unused_fnames = ['{}.xx'.format(100)]
self._ignored_fnames = [f'.part-{i}.crc' for i in range(10)]
self._all_fnames = self._data_fnames + \
self._data_fnames_without_success + \
self._csv_fnames + self._unused_fnames
all_fnames_with_success = ['1001/_SUCCESS'] + ['1003/_SUCCESS'] +\
self._all_fnames + self._ignored_fnames
for fname in all_fnames_with_success:
fpath = os.path.join(self._portal_input_base_dir, fname)
gfile.MakeDirs(os.path.dirname(fpath))
with gfile.Open(fpath, "w") as f:
f.write('xxx')
def tearDown(self) -> None:
gfile.DeleteRecursively(self._portal_input_base_dir)
def _list_input_dir(self, portal_options, file_wildcard,
target_fnames, max_files_per_job=8000):
portal_manifest = dp_pb.DataPortalManifest(
name=self._data_portal_name,
data_portal_type=dp_pb.DataPortalType.Streaming,
output_partition_num=4,
input_file_wildcard=file_wildcard,
input_base_dir=self._portal_input_base_dir,
output_base_dir=self._portal_output_base_dir,
raw_data_publish_dir=self._raw_data_publish_dir,
processing_job_id=-1,
next_job_id=0
)
self._kvstore.set_data(
common.portal_kvstore_base_dir(self._data_portal_name),
text_format.MessageToString(portal_manifest))
with Timer("DataPortalJobManager initialization"):
data_portal_job_manager = DataPortalJobManager(
self._kvstore, self._data_portal_name,
portal_options.long_running,
portal_options.check_success_tag,
portal_options.single_subfolder,
portal_options.files_per_job_limit,
max_files_per_job
)
portal_job = data_portal_job_manager._sync_processing_job()
target_fnames.sort()
fpaths = [os.path.join(self._portal_input_base_dir, f)
for f in target_fnames]
self.assertEqual(len(fpaths), len(portal_job.fpaths))
for index, fpath in enumerate(fpaths):
self.assertEqual(fpath, portal_job.fpaths[index])
def test_list_input_dir(self):
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=True,
single_subfolder=False,
files_per_job_limit=None
)
self._list_input_dir(portal_options, "*.data", self._data_fnames)
def test_list_input_dir_single_folder(self):
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=False,
single_subfolder=True,
files_per_job_limit=None,
)
self._list_input_dir(
portal_options, "*.data", self._data_fnames)
def test_list_input_dir_files_limit(self):
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=False,
single_subfolder=False,
files_per_job_limit=1,
)
self._list_input_dir(
portal_options, "*.data", self._data_fnames)
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=False,
single_subfolder=False,
files_per_job_limit=150,
)
self._list_input_dir(
portal_options, "*.data", self._data_fnames)
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=False,
single_subfolder=False,
files_per_job_limit=200,
)
self._list_input_dir(
portal_options, "*.data",
self._data_fnames + self._data_fnames_without_success)
def test_list_input_dir_over_limit(self):
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=False,
single_subfolder=False,
)
self._list_input_dir(
portal_options, "*.data", self._data_fnames, max_files_per_job=100)
self._list_input_dir(
portal_options, "*.data",
self._data_fnames + self._data_fnames_without_success,
max_files_per_job=200)
def test_list_input_dir_without_success_check(self):
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=False,
single_subfolder=False,
files_per_job_limit=None
)
self._list_input_dir(
portal_options, "*.data",
self._data_fnames + self._data_fnames_without_success)
def test_list_input_dir_without_wildcard(self):
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=True,
single_subfolder=False,
files_per_job_limit=None
)
self._list_input_dir(
portal_options, None,
self._data_fnames + self._csv_fnames)
def test_list_input_dir_without_wildcard_and_success_check(self):
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=False,
single_subfolder=False,
files_per_job_limit=None
)
self._list_input_dir(portal_options, None, self._all_fnames)
if __name__ == '__main__':
unittest.main()
|
srajag/nova | refs/heads/master | nova/tests/virt/hyperv/db_fakes.py | 17 | # Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Stubouts, mocks and fixtures for the test suite
"""
import uuid
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova import utils
def get_fake_instance_data(name, project_id, user_id):
return {'name': name,
'id': 1,
'uuid': str(uuid.uuid4()),
'project_id': project_id,
'user_id': user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'flavor':
{'name': 'm1.tiny',
'memory_mb': 512,
'vcpus': 1,
'root_gb': 1024,
'flavorid': 1,
'rxtx_factor': 1}
}
def get_fake_image_data(project_id, user_id):
return {'name': 'image1',
'id': 1,
'project_id': project_id,
'user_id': user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'flavor': 'm1.tiny',
}
def get_fake_volume_info_data(target_portal, volume_id):
return {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': 1,
'target_iqn': 'iqn.2010-10.org.openstack:volume-' + volume_id,
'target_portal': target_portal,
'target_lun': 1,
'auth_method': 'CHAP',
}
}
def get_fake_block_device_info(target_portal, volume_id):
return {'block_device_mapping': [{'connection_info': {
'driver_volume_type': 'iscsi',
'data': {'target_lun': 1,
'volume_id': volume_id,
'target_iqn':
'iqn.2010-10.org.openstack:volume-' +
volume_id,
'target_portal': target_portal,
'target_discovered': False}},
'mount_device': 'vda',
'delete_on_termination': False}],
'root_device_name': None,
'ephemerals': [],
'swap': None
}
def stub_out_db_instance_api(stubs):
"""Stubs out the db API for creating Instances."""
FLAVORS = {
'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1),
'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2),
'm1.medium': dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),
'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4),
'm1.xlarge': dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}
class FakeModel(object):
"""Stubs out for model."""
def __init__(self, values):
self.values = values
def get(self, key, default=None):
if key in self.values:
return self.values[key]
else:
return default
def __getattr__(self, name):
return self.values[name]
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.values[key] = value
def __str__(self):
return str(self.values)
def fake_instance_create(context, values):
"""Stubs out the db.instance_create method."""
if 'flavor' not in values:
return
flavor = values['flavor']
base_options = {
'name': values['name'],
'id': values['id'],
'uuid': str(uuid.uuid4()),
'reservation_id': utils.generate_uid('r'),
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
'vm_state': vm_states.BUILDING,
'task_state': task_states.SCHEDULING,
'user_id': values['user_id'],
'project_id': values['project_id'],
'flavor': flavor,
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'mac_addresses': [{'address': values['mac_address']}],
'root_gb': flavor['root_gb'],
}
return FakeModel(base_options)
def fake_flavor_get_all(context, inactive=0, filters=None):
return FLAVORS.values()
def fake_flavor_get_by_name(context, name):
return FLAVORS[name]
def fake_block_device_mapping_get_all_by_instance(context, instance_uuid):
return {}
stubs.Set(db, 'instance_create', fake_instance_create)
stubs.Set(db, 'flavor_get_all', fake_flavor_get_all)
stubs.Set(db, 'flavor_get_by_name', fake_flavor_get_by_name)
stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
|
mypetyak/StrokeWidthTransform | refs/heads/master | swt.py | 1 | # -*- encoding: utf-8 -*-
from __future__ import division
from collections import defaultdict
import hashlib
import math
import os
import time
from urllib2 import urlopen
import numpy as np
import cv2
import scipy.sparse, scipy.spatial
t0 = time.clock()
diagnostics = True
class SWTScrubber(object):
@classmethod
def scrub(cls, filepath):
"""
Apply Stroke-Width Transform to image.
:param filepath: relative or absolute filepath to source image
:return: numpy array representing result of transform
"""
canny, sobelx, sobely, theta = cls._create_derivative(filepath)
swt = cls._swt(theta, canny, sobelx, sobely)
shapes = cls._connect_components(swt)
swts, heights, widths, topleft_pts, images = cls._find_letters(swt, shapes)
word_images = cls._find_words(swts, heights, widths, topleft_pts, images)
final_mask = np.zeros(swt.shape)
for word in word_images:
final_mask += word
return final_mask
@classmethod
def _create_derivative(cls, filepath):
img = cv2.imread(filepath,0)
edges = cv2.Canny(img, 175, 320, apertureSize=3)
# Create gradient map using Sobel
sobelx64f = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=-1)
sobely64f = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=-1)
theta = np.arctan2(sobely64f, sobelx64f)
if diagnostics:
cv2.imwrite('edges.jpg',edges)
cv2.imwrite('sobelx64f.jpg', np.absolute(sobelx64f))
cv2.imwrite('sobely64f.jpg', np.absolute(sobely64f))
# amplify theta for visual inspection
theta_visible = (theta + np.pi)*255/(2*np.pi)
cv2.imwrite('theta.jpg', theta_visible)
return (edges, sobelx64f, sobely64f, theta)
@classmethod
def _swt(self, theta, edges, sobelx64f, sobely64f):
# create empty image, initialized to infinity
swt = np.empty(theta.shape)
swt[:] = np.Infinity
rays = []
print time.clock() - t0
# now iterate over pixels in image, checking Canny to see if we're on an edge.
# if we are, follow a normal a ray to either the next edge or image border
# edgesSparse = scipy.sparse.coo_matrix(edges)
step_x_g = -1 * sobelx64f
step_y_g = -1 * sobely64f
mag_g = np.sqrt( step_x_g * step_x_g + step_y_g * step_y_g )
grad_x_g = step_x_g / mag_g
grad_y_g = step_y_g / mag_g
for x in xrange(edges.shape[1]):
for y in xrange(edges.shape[0]):
if edges[y, x] > 0:
step_x = step_x_g[y, x]
step_y = step_y_g[y, x]
mag = mag_g[y, x]
grad_x = grad_x_g[y, x]
grad_y = grad_y_g[y, x]
ray = []
ray.append((x, y))
prev_x, prev_y, i = x, y, 0
while True:
i += 1
cur_x = math.floor(x + grad_x * i)
cur_y = math.floor(y + grad_y * i)
if cur_x != prev_x or cur_y != prev_y:
# we have moved to the next pixel!
try:
if edges[cur_y, cur_x] > 0:
# found edge,
ray.append((cur_x, cur_y))
theta_point = theta[y, x]
alpha = theta[cur_y, cur_x]
if math.acos(grad_x * -grad_x_g[cur_y, cur_x] + grad_y * -grad_y_g[cur_y, cur_x]) < np.pi/2.0:
thickness = math.sqrt( (cur_x - x) * (cur_x - x) + (cur_y - y) * (cur_y - y) )
for (rp_x, rp_y) in ray:
swt[rp_y, rp_x] = min(thickness, swt[rp_y, rp_x])
rays.append(ray)
break
# this is positioned at end to ensure we don't add a point beyond image boundary
ray.append((cur_x, cur_y))
except IndexError:
# reached image boundary
break
prev_x = cur_x
prev_y = cur_y
# Compute median SWT
for ray in rays:
median = np.median([swt[y, x] for (x, y) in ray])
for (x, y) in ray:
swt[y, x] = min(median, swt[y, x])
if diagnostics:
cv2.imwrite('swt.jpg', swt * 100)
return swt
@classmethod
def _connect_components(cls, swt):
# STEP: Compute distinct connected components
# Implementation of disjoint-set
class Label(object):
def __init__(self, value):
self.value = value
self.parent = self
self.rank = 0
def __eq__(self, other):
if type(other) is type(self):
return self.value == other.value
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
ld = {}
def MakeSet(x):
try:
return ld[x]
except KeyError:
item = Label(x)
ld[x] = item
return item
def Find(item):
# item = ld[x]
if item.parent != item:
item.parent = Find(item.parent)
return item.parent
def Union(x, y):
"""
:param x:
:param y:
:return: root node of new union tree
"""
x_root = Find(x)
y_root = Find(y)
if x_root == y_root:
return x_root
if x_root.rank < y_root.rank:
x_root.parent = y_root
return y_root
elif x_root.rank > y_root.rank:
y_root.parent = x_root
return x_root
else:
y_root.parent = x_root
x_root.rank += 1
return x_root
# apply Connected Component algorithm, comparing SWT values.
# components with a SWT ratio less extreme than 1:3 are assumed to be
# connected. Apply twice, once for each ray direction/orientation, to
# allow for dark-on-light and light-on-dark texts
trees = {}
# Assumption: we'll never have more than 65535-1 unique components
label_map = np.zeros(shape=swt.shape, dtype=np.uint16)
next_label = 1
# First Pass, raster scan-style
swt_ratio_threshold = 3.0
for y in xrange(swt.shape[0]):
for x in xrange(swt.shape[1]):
sw_point = swt[y, x]
if sw_point < np.Infinity and sw_point > 0:
neighbors = [(y, x-1), # west
(y-1, x-1), # northwest
(y-1, x), # north
(y-1, x+1)] # northeast
connected_neighbors = None
neighborvals = []
for neighbor in neighbors:
# west
try:
sw_n = swt[neighbor]
label_n = label_map[neighbor]
except IndexError:
continue
if label_n > 0 and sw_n / sw_point < swt_ratio_threshold and sw_point / sw_n < swt_ratio_threshold:
neighborvals.append(label_n)
if connected_neighbors:
connected_neighbors = Union(connected_neighbors, MakeSet(label_n))
else:
connected_neighbors = MakeSet(label_n)
if not connected_neighbors:
# We don't see any connections to North/West
trees[next_label] = (MakeSet(next_label))
label_map[y, x] = next_label
next_label += 1
else:
# We have at least one connection to North/West
label_map[y, x] = min(neighborvals)
# For each neighbor, make note that their respective connected_neighbors are connected
# for label in connected_neighbors. @todo: do I need to loop at all neighbor trees?
trees[connected_neighbors.value] = Union(trees[connected_neighbors.value], connected_neighbors)
# Second pass. re-base all labeling with representative label for each connected tree
layers = {}
contours = defaultdict(list)
for x in xrange(swt.shape[1]):
for y in xrange(swt.shape[0]):
if label_map[y, x] > 0:
item = ld[label_map[y, x]]
common_label = Find(item).value
label_map[y, x] = common_label
contours[common_label].append([x, y])
try:
layer = layers[common_label]
except KeyError:
layers[common_label] = np.zeros(shape=swt.shape, dtype=np.uint16)
layer = layers[common_label]
layer[y, x] = 1
return layers
@classmethod
def _find_letters(cls, swt, shapes):
# STEP: Discard shapes that are probably not letters
swts = []
heights = []
widths = []
topleft_pts = []
images = []
for label,layer in shapes.iteritems():
(nz_y, nz_x) = np.nonzero(layer)
east, west, south, north = max(nz_x), min(nz_x), max(nz_y), min(nz_y)
width, height = east - west, south - north
if width < 8 or height < 8:
continue
if width / height > 10 or height / width > 10:
continue
diameter = math.sqrt(width * width + height * height)
median_swt = np.median(swt[(nz_y, nz_x)])
if diameter / median_swt > 10:
continue
if width / layer.shape[1] > 0.4 or height / layer.shape[0] > 0.4:
continue
if diagnostics:
print " written to image."
cv2.imwrite('layer'+ str(label) +'.jpg', layer * 255)
# we use log_base_2 so we can do linear distance comparison later using k-d tree
# ie, if log2(x) - log2(y) > 1, we know that x > 2*y
# Assumption: we've eliminated anything with median_swt == 1
swts.append([math.log(median_swt, 2)])
heights.append([math.log(height, 2)])
topleft_pts.append(np.asarray([north, west]))
widths.append(width)
images.append(layer)
return swts, heights, widths, topleft_pts, images
@classmethod
def _find_words(cls, swts, heights, widths, topleft_pts, images):
# Find all shape pairs that have similar median stroke widths
print 'SWTS'
print swts
print 'DONESWTS'
swt_tree = scipy.spatial.KDTree(np.asarray(swts))
stp = swt_tree.query_pairs(1)
# Find all shape pairs that have similar heights
height_tree = scipy.spatial.KDTree(np.asarray(heights))
htp = height_tree.query_pairs(1)
# Intersection of valid pairings
isect = htp.intersection(stp)
chains = []
pairs = []
pair_angles = []
for pair in isect:
left = pair[0]
right = pair[1]
widest = max(widths[left], widths[right])
distance = np.linalg.norm(topleft_pts[left] - topleft_pts[right])
if distance < widest * 3:
delta_yx = topleft_pts[left] - topleft_pts[right]
angle = np.arctan2(delta_yx[0], delta_yx[1])
if angle < 0:
angle += np.pi
pairs.append(pair)
pair_angles.append(np.asarray([angle]))
angle_tree = scipy.spatial.KDTree(np.asarray(pair_angles))
atp = angle_tree.query_pairs(np.pi/12)
for pair_idx in atp:
pair_a = pairs[pair_idx[0]]
pair_b = pairs[pair_idx[1]]
left_a = pair_a[0]
right_a = pair_a[1]
left_b = pair_b[0]
right_b = pair_b[1]
# @todo - this is O(n^2) or similar, extremely naive. Use a search tree.
added = False
for chain in chains:
if left_a in chain:
chain.add(right_a)
added = True
elif right_a in chain:
chain.add(left_a)
added = True
if not added:
chains.append(set([left_a, right_a]))
added = False
for chain in chains:
if left_b in chain:
chain.add(right_b)
added = True
elif right_b in chain:
chain.add(left_b)
added = True
if not added:
chains.append(set([left_b, right_b]))
word_images = []
for chain in [c for c in chains if len(c) > 3]:
for idx in chain:
word_images.append(images[idx])
# cv2.imwrite('keeper'+ str(idx) +'.jpg', images[idx] * 255)
# final += images[idx]
return word_images
file_url = 'http://upload.wikimedia.org/wikipedia/commons/0/0b/ReceiptSwiss.jpg'
local_filename = hashlib.sha224(file_url).hexdigest()
try:
s3_response = urlopen(file_url)
with open(local_filename, 'wb+') as destination:
while True:
# read file in 4kB chunks
chunk = s3_response.read(4096)
if not chunk: break
destination.write(chunk)
#final_mask = SWTScrubber.scrub('wallstreetsmd.jpeg')
final_mask = SWTScrubber.scrub(local_filename)
# final_mask = cv2.GaussianBlur(final_mask, (1, 3), 0)
# cv2.GaussianBlur(sobelx64f, (3, 3), 0)
cv2.imwrite('final.jpg', final_mask * 255)
print time.clock() - t0
finally:
s3_response.close()
|
duyanning/epm | refs/heads/master | epmaux.py | 1 | # -*- coding: utf-8 -*-
import glob
import os
import os.path
from Project import *
from Solution import *
import settings
def findSlnFile():
slnFilesList = glob.glob("*.epmsln")
if slnFilesList:
return os.path.abspath(slnFilesList[0])
else:
return None
def findPrjFile():
topArrived = False
cd = os.getcwd();
while not topArrived:
if settings.verbose:
print "finding .epmprj in " + cd + " ... ",
# 在当前目录下找epmprj文件
prjFilesList = glob.glob(os.path.join(cd, "*.epmprj"))
# print glob.glob(os.path.join(cd, "*.epmprj"))
# 如果找到就返回
if prjFilesList:
if settings.verbose:
print "found."
os.chdir(cd)
return os.path.abspath(prjFilesList[0])
if settings.verbose:
print "" # 没找到也输出一个换行
lastCd = cd
# 否则就去父目录中找
cd = os.path.normpath(os.path.join(cd, os.pardir))
if (cd == lastCd):
topArrived = True
return None
def findFile(pattern):
prjFilesList = glob.glob(pattern)
if prjFilesList:
return os.path.abspath(prjFilesList[0])
else:
return None
def list_prj(name):
prj = Project(name)
prj.load()
prj.show()
def list_sln(name):
sln = EpmSolution(name)
sln.load()
sln.show()
def list_prj_or_sln(name, isSln):
if not isSln:
list_prj(name)
else:
list_sln(name)
def build_solution():
slnName = findSlnFile()
if not slnName:
print "cannot find a solution"
return
sln = Solution(slnName)
sln.load()
sln.build()
def build_project():
prjName = findPrjFile()
if not prjName:
print "cannot find a project"
return
prj = Project(prjName)
prj.load()
return prj.build()
def build_gch():
prjName = findPrjFile()
if not prjName:
print "cannot find a project"
return
prj = Project(prjName)
prj.load()
prj.buildGch()
def compile_sourcefile(srcfile):
prjName = findPrjFile()
if not prjName:
print "cannot find a project"
return
prj = Project(prjName)
prj.load()
return prj.compile(srcfile)
def set_active_config(name):
prjName = findPrjFile()
prj = Project(prjName)
prj.load()
prj.setActiveConfig(name)
prj.saveProjectFile()
def add_config(name):
prjName = findPrjFile()
prj = Project(prjName)
prj.load()
prj.addConfig(name)
prj.save()
def add_source(srcList):
prjName = findPrjFile()
if not prjName:
print "cannot find project"
return
prj = Project(prjName)
prj.load()
for f in srcList:
if not prj.addSource(f):
print f, "is already in project"
prj.save()
def remove_source(srcList):
prjName = findPrjFile()
if not prjName:
print "cannot find project"
return
prj = Project(prjName)
prj.load()
for f in srcList:
if not prj.removeSource(f):
print f, "is not in project"
prj.save()
def add_project(prj):
slnName = findSlnFile()
if not slnName:
print "cannot find solution"
return
sln = Solution(slnName)
sln.load()
sln.addProject(prj)
sln.save()
def remove_project(prj):
slnName = findSlnFile()
if not slnName:
print "cannot find solution"
return
sln = Solution(slnName)
sln.load()
sln.removeProject(prj)
sln.save()
def new_project(prjName, type):
# prjPath = os.path.abspath(prjName)
prjName += ".epmprj"
prj = Project(prjName)
(name, ext) = os.path.splitext(prjName)
if type == "dll":
postfix = ".so"
elif type == "lib":
postfix = ".a"
else:
postfix = ""
exeName = name + postfix
prj.addConfig("debug")
print 'exe name: ' + exeName
prj.setExeName(exeName)
prj.setType(type)
prj.save()
def new_solution(slnName):
slnName += ".epmsln"
sln = Solution(slnName)
sln.save()
def add_pch(headerList):
prjName = findPrjFile()
if not prjName:
print "cannot find project"
return
prj = Project(prjName)
prj.load()
for f in headerList:
if not prj.addPch(f):
print f, "is already in project"
prj.save()
def remove_pch(headerList):
prjName = findPrjFile()
if not prjName:
print "cannot find project"
return
prj = Project(prjName)
prj.load()
for f in headerList:
if not prj.removePch(f):
print f, "is not in project"
prj.save()
def open_gui():
import epmapp
app = epmapp.EpmApp()
app.main()
|
peterkuma/fileshackproject | refs/heads/master | fileshackproject/urls.py | 1 | from django.urls import path
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
import django.views.static
import fileshack.urls
admin.autodiscover()
handler404 = 'fileshack.views.page_not_found'
handler500 = 'fileshack.views.server_error'
urlpatterns = []
if settings.DEBUG or settings.SERVE_STATIC:
urlpatterns += [
url(r'^static/(?P<path>.*)$', django.views.static.serve, {'document_root': settings.STATIC_ROOT}),
url(r'^media/(?P<path>.*)$', django.views.static.serve, {'document_root': settings.MEDIA_ROOT}),
]
urlpatterns += [
# Be sure to comment out the following line in a production environment!
#url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
#url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
path('admin/doc/', include('django.contrib.admindocs.urls')),
path('admin/', admin.site.urls),
url(r'^', include(fileshack.urls)),
]
|
Pexego/odoo | refs/heads/master | addons/website_gengo/__init__.py | 76 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers |
angelapper/edx-platform | refs/heads/master | common/djangoapps/third_party_auth/saml.py | 2 | """
Slightly customized python-social-auth backend for SAML 2.0 support
"""
import logging
from copy import deepcopy
import requests
from django.contrib.sites.models import Site
from django.http import Http404
from django.utils.functional import cached_property
from django_countries import countries
from social_core.backends.saml import OID_EDU_PERSON_ENTITLEMENT, SAMLAuth, SAMLIdentityProvider
from social_core.exceptions import AuthForbidden
from openedx.core.djangoapps.theming.helpers import get_current_request
STANDARD_SAML_PROVIDER_KEY = 'standard_saml_provider'
SAP_SUCCESSFACTORS_SAML_KEY = 'sap_success_factors'
log = logging.getLogger(__name__)
class SAMLAuthBackend(SAMLAuth): # pylint: disable=abstract-method
"""
Customized version of SAMLAuth that gets the list of IdPs from third_party_auth's list of
enabled providers.
"""
name = "tpa-saml"
def get_idp(self, idp_name):
""" Given the name of an IdP, get a SAMLIdentityProvider instance """
from .models import SAMLProviderConfig
return SAMLProviderConfig.current(idp_name).get_config()
def setting(self, name, default=None):
""" Get a setting, from SAMLConfiguration """
try:
return self._config.get_setting(name)
except KeyError:
return self.strategy.setting(name, default, backend=self)
def auth_url(self):
"""
Check that SAML is enabled and that the request includes an 'idp'
parameter before getting the URL to which we must redirect in order to
authenticate the user.
raise Http404 if SAML authentication is disabled.
"""
if not self._config.enabled:
log.error('SAML authentication is not enabled')
raise Http404
return super(SAMLAuthBackend, self).auth_url()
def _check_entitlements(self, idp, attributes):
"""
Check if we require the presence of any specific eduPersonEntitlement.
raise AuthForbidden if the user should not be authenticated, or do nothing
to allow the login pipeline to continue.
"""
if "requiredEntitlements" in idp.conf:
entitlements = attributes.get(OID_EDU_PERSON_ENTITLEMENT, [])
for expected in idp.conf['requiredEntitlements']:
if expected not in entitlements:
log.warning(
"SAML user from IdP %s rejected due to missing eduPersonEntitlement %s", idp.name, expected)
raise AuthForbidden(self)
def _create_saml_auth(self, idp):
"""
Get an instance of OneLogin_Saml2_Auth
idp: The Identity Provider - a social_core.backends.saml.SAMLIdentityProvider instance
"""
# We only override this method so that we can add extra debugging when debug_mode is True
# Note that auth_inst is instantiated just for the current HTTP request, then is destroyed
auth_inst = super(SAMLAuthBackend, self)._create_saml_auth(idp)
from .models import SAMLProviderConfig
if SAMLProviderConfig.current(idp.name).debug_mode:
def wrap_with_logging(method_name, action_description, xml_getter):
""" Wrap the request and response handlers to add debug mode logging """
method = getattr(auth_inst, method_name)
def wrapped_method(*args, **kwargs):
""" Wrapped login or process_response method """
result = method(*args, **kwargs)
log.info("SAML login %s for IdP %s. XML is:\n%s", action_description, idp.name, xml_getter())
return result
setattr(auth_inst, method_name, wrapped_method)
wrap_with_logging("login", "request", auth_inst.get_last_request_xml)
wrap_with_logging("process_response", "response", auth_inst.get_last_response_xml)
return auth_inst
@cached_property
def _config(self):
from .models import SAMLConfiguration
return SAMLConfiguration.current(Site.objects.get_current(get_current_request()))
class EdXSAMLIdentityProvider(SAMLIdentityProvider):
"""
Customized version of SAMLIdentityProvider that can retrieve details beyond the standard
details supported by the canonical upstream version.
"""
def get_user_details(self, attributes):
"""
Overrides `get_user_details` from the base class; retrieves those details,
then updates the dict with values from whatever additional fields are desired.
"""
details = super(EdXSAMLIdentityProvider, self).get_user_details(attributes)
extra_field_definitions = self.conf.get('extra_field_definitions', [])
details.update({
field['name']: attributes[field['urn']][0] if field['urn'] in attributes else None
for field in extra_field_definitions
})
return details
class SapSuccessFactorsIdentityProvider(EdXSAMLIdentityProvider):
"""
Customized version of EdXSAMLIdentityProvider that knows how to retrieve user details
from the SAPSuccessFactors OData API, rather than parse them directly off the
SAML assertion that we get in response to a login attempt.
"""
required_variables = (
'sapsf_oauth_root_url',
'sapsf_private_key',
'odata_api_root_url',
'odata_company_id',
'odata_client_id',
)
# Define the relationships between SAPSF record fields and Open edX logistration fields.
default_field_mapping = {
'username': 'username',
'firstName': 'first_name',
'lastName': 'last_name',
'defaultFullName': 'fullname',
'email': 'email',
'country': 'country',
'city': 'city',
}
# Define a simple mapping to relate SAPSF values to Open edX-compatible values for
# any given field. By default, this only contains the Country field, as SAPSF supplies
# a country name, which has to be translated to a country code.
default_value_mapping = {
'country': {name: code for code, name in countries}
}
# Unfortunately, not everything has a 1:1 name mapping between Open edX and SAPSF, so
# we need some overrides. TODO: Fill in necessary mappings
default_value_mapping.update({
'United States': 'US',
})
def get_registration_fields(self, response):
"""
Get a dictionary mapping registration field names to default values.
"""
field_mapping = self.field_mappings
registration_fields = {edx_name: response['d'].get(odata_name, '') for odata_name, edx_name in field_mapping.items()}
value_mapping = self.value_mappings
for field, value in registration_fields.items():
if field in value_mapping and value in value_mapping[field]:
registration_fields[field] = value_mapping[field][value]
return registration_fields
@property
def field_mappings(self):
"""
Get a dictionary mapping the field names returned in an SAP SuccessFactors
user entity to the field names with which those values should be used in
the Open edX registration form.
"""
overrides = self.conf.get('sapsf_field_mappings', {})
base = self.default_field_mapping.copy()
base.update(overrides)
return base
@property
def value_mappings(self):
"""
Get a dictionary mapping of field names to override objects which each
map values received from SAP SuccessFactors to values expected in the
Open edX platform registration form.
"""
overrides = self.conf.get('sapsf_value_mappings', {})
base = deepcopy(self.default_value_mapping)
for field, override in overrides.items():
if field in base:
base[field].update(override)
else:
base[field] = override[field]
return base
@property
def timeout(self):
"""
The number of seconds OData API requests should wait for a response before failing.
"""
return self.conf.get('odata_api_request_timeout', 10)
@property
def sapsf_idp_url(self):
return self.conf['sapsf_oauth_root_url'] + 'idp'
@property
def sapsf_token_url(self):
return self.conf['sapsf_oauth_root_url'] + 'token'
@property
def sapsf_private_key(self):
return self.conf['sapsf_private_key']
@property
def odata_api_root_url(self):
return self.conf['odata_api_root_url']
@property
def odata_company_id(self):
return self.conf['odata_company_id']
@property
def odata_client_id(self):
return self.conf['odata_client_id']
def missing_variables(self):
"""
Check that we have all the details we need to properly retrieve rich data from the
SAP SuccessFactors OData API. If we don't, then we should log a warning indicating
the specific variables that are missing.
"""
if not all(var in self.conf for var in self.required_variables):
missing = [var for var in self.required_variables if var not in self.conf]
log.warning(
"To retrieve rich user data for an SAP SuccessFactors identity provider, the following keys in "
"'other_settings' are required, but were missing: %s",
missing
)
return missing
def get_odata_api_client(self, user_id):
"""
Get a Requests session with the headers needed to properly authenticate it with
the SAP SuccessFactors OData API.
"""
session = requests.Session()
assertion = session.post(
self.sapsf_idp_url,
data={
'client_id': self.odata_client_id,
'user_id': user_id,
'token_url': self.sapsf_token_url,
'private_key': self.sapsf_private_key,
},
timeout=self.timeout,
)
assertion.raise_for_status()
assertion = assertion.text
token = session.post(
self.sapsf_token_url,
data={
'client_id': self.odata_client_id,
'company_id': self.odata_company_id,
'grant_type': 'urn:ietf:params:oauth:grant-type:saml2-bearer',
'assertion': assertion,
},
timeout=self.timeout,
)
token.raise_for_status()
token = token.json()['access_token']
session.headers.update({'Authorization': 'Bearer {}'.format(token), 'Accept': 'application/json'})
return session
def get_user_details(self, attributes):
"""
Attempt to get rich user details from the SAP SuccessFactors OData API. If we're missing any
of the details we need to do that, fail nicely by returning the details we're able to extract
from just the SAML response and log a warning.
"""
details = super(SapSuccessFactorsIdentityProvider, self).get_user_details(attributes)
if self.missing_variables():
# If there aren't enough details to make the request, log a warning and return the details
# from the SAML assertion.
return details
username = details['username']
fields = ','.join(self.field_mappings)
odata_api_url = '{root_url}User(userId=\'{user_id}\')?$select={fields}'.format(
root_url=self.odata_api_root_url,
user_id=username,
fields=fields,
)
try:
client = self.get_odata_api_client(user_id=username)
response = client.get(
odata_api_url,
timeout=self.timeout,
)
response.raise_for_status()
response = response.json()
except requests.RequestException as err:
# If there was an HTTP level error, log the error and return the details from the SAML assertion.
log.warning(
'Unable to retrieve user details with username %s from SAPSuccessFactors for company ID %s '
'with url "%s" and error message: %s',
username,
self.odata_company_id,
odata_api_url,
err.message,
exc_info=True,
)
return details
return self.get_registration_fields(response)
def get_saml_idp_choices():
"""
Get a list of the available SAMLIdentityProvider subclasses that can be used to process
SAML requests, for use in the Django administration form.
"""
return (
(STANDARD_SAML_PROVIDER_KEY, 'Standard SAML provider'),
(SAP_SUCCESSFACTORS_SAML_KEY, 'SAP SuccessFactors provider'),
)
def get_saml_idp_class(idp_identifier_string):
"""
Given a string ID indicating the type of identity provider in use during a given request, return
the SAMLIdentityProvider subclass able to handle requests for that type of identity provider.
"""
choices = {
STANDARD_SAML_PROVIDER_KEY: EdXSAMLIdentityProvider,
SAP_SUCCESSFACTORS_SAML_KEY: SapSuccessFactorsIdentityProvider,
}
if idp_identifier_string not in choices:
log.error(
'%s is not a valid EdXSAMLIdentityProvider subclass; using EdXSAMLIdentityProvider base class.',
idp_identifier_string
)
return choices.get(idp_identifier_string, EdXSAMLIdentityProvider)
|
youdonghai/intellij-community | refs/heads/master | python/testData/inspections/AddCallSuperKeywordOnlyParamInInit.py | 79 | class A:
def __init__(self, a):
pass
class B(A):
def <warning descr="Call to __init__ of super class is missed">__i<caret>nit__</warning>(self, b, c=1, *args, kw_only):
pass |
trtt/apitrace | refs/heads/guiwip2 | specs/scripts/xml2api.py | 7 | #!/usr/bin/env python
##########################################################################
#
# Copyright 2014 VMware, Inc
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
#
# Script to half-generate *api.py based on Khronos' *.xml
#
import optparse
import sys
import xml.etree.ElementTree as ET
import c2api
def appendToken(tokens, text):
for token in text.split():
if token.startswith('*'):
for c in token:
tokens.append(c)
else:
tokens.append(token)
def getType(node):
tokens = []
if node.text is not None:
appendToken(tokens, node.text)
ptype = node.find('ptype')
if ptype is not None:
appendToken(tokens, ptype.text)
appendToken(tokens, ptype.tail)
# Array
lenExpr = node.get('len')
if lenExpr is not None:
assert tokens[-1] == '*'
tokens = tokens[:-1]
if lenExpr == "COMPSIZE(pname)":
lenExpr = "_gl_param_size(pname)"
typeText = ' '.join(tokens)
parser = c2api.DeclParser()
parser.tokenize(typeText + ';')
typeExpr = parser.parse_type()
if lenExpr is not None:
if lenExpr == "1":
typeExpr = 'Pointer(%s)' % (typeExpr)
else:
if not lenExpr.isdigit():
lenExpr = '"' + lenExpr + '"'
typeExpr = 'Array(%s, %s)' % (typeExpr, lenExpr)
return typeExpr
def processCommand(prototypes, command):
proto = command.find('proto')
functionName = proto.find('name').text
retType = getType(proto)
args = []
for param in command.findall('param'):
argName = param.find('name').text
#print argName, param.text
argType = getType(param)
if argName.lower() == 'hdc':
argName = 'hDC'
arg = '(%s, "%s")' % (argType, argName)
args.append(arg)
if namespace == 'WGL':
constructor = 'StdFunction'
else:
constructor = 'GlFunction'
prototype = '%s(%s, "%s", [%s])' % (constructor, retType, functionName, ', '.join(args))
prototypes[functionName] = prototype
def processRequire(node, filterName):
nodeName = node.get('name')
if filterName is not None and nodeName != filterName:
return
commands = []
for requireNode in node.findall('require'):
commands.extend(requireNode.findall('command'))
if not commands:
return
functionNames = [command.get('name') for command in commands]
return nodeName, functionNames
def printPrototypes(prototypes, extensionName, functionNames, skip=set()):
print ' # %s' % extensionName
if extensionName == 'GL_EXT_direct_state_access':
functionNames.sort()
for functionName in functionNames:
if functionName not in skip:
prototype = prototypes[functionName]
print ' %s,' % prototype
print
def main():
optparser = optparse.OptionParser(
usage='\n\t%prog [options] <xml> ...',
version='%%prog')
optparser.add_option(
'--filter', metavar='NAME',
type='string', dest='filter',
help='filter feature/extension')
(options, args) = optparser.parse_args(sys.argv[1:])
global prototypes
global namespace
for arg in args:
tree = ET.parse(arg)
root = tree.getroot()
prototypes = {}
for commands in root.findall('commands'):
namespace = commands.get('namespace')
for command in commands.findall('command'):
processCommand(prototypes, command)
# Extract features
features = []
for feature in root.findall('feature'):
ret = processRequire(feature, options.filter)
if ret is not None:
features.append(ret)
# Extract extensions
extensions = []
for extension in root.find('extensions').findall('extension'):
ret = processRequire(extension, options.filter)
if ret is not None:
extensions.append(ret)
# Eliminate the functions from features that are in extensions
for extensionName, extensionFunctionNames in extensions:
for featureName, featureFunctionNames in features:
for functionName in extensionFunctionNames:
try:
featureFunctionNames.remove(functionName)
except ValueError:
pass
# Print all
skip = set()
for extensionName, functionNames in features + extensions:
printPrototypes(prototypes, extensionName, functionNames, skip)
skip.update(functionNames)
if __name__ == '__main__':
main()
|
dajohnso/cfme_tests | refs/heads/master | cfme/tests/cloud/test_providers.py | 2 | # -*- coding: utf-8 -*-
# pylint: disable=E1101
# pylint: disable=W0621
import uuid
import fauxfactory
import pytest
from utils import error
from cfme.base.credential import Credential
from cfme.cloud.provider import discover, wait_for_a_provider, CloudProvider
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.gce import GCEProvider
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.cloud.provider.openstack import OpenStackProvider, RHOSEndpoint
from cfme.common.provider_views import (CloudProviderAddView,
CloudProvidersView,
CloudProvidersDiscoverView)
from cfme import test_requirements
from utils import testgen, version
from utils.appliance.implementations.ui import navigate_to
from utils.update import update
from cfme.rest.gen_data import arbitration_profiles as _arbitration_profiles
from cfme.rest.gen_data import _creating_skeleton as creating_skeleton
pytest_generate_tests = testgen.generate([CloudProvider], scope="function")
@pytest.mark.tier(3)
@test_requirements.discovery
def test_empty_discovery_form_validation(appliance):
""" Tests that the flash message is correct when discovery form is empty."""
discover(None, EC2Provider)
view = appliance.browser.create_view(CloudProvidersDiscoverView)
view.flash.assert_message('Username is required')
@pytest.mark.tier(3)
@test_requirements.discovery
def test_discovery_cancelled_validation(appliance):
""" Tests that the flash message is correct when discovery is cancelled."""
discover(None, EC2Provider, cancel=True)
view = appliance.browser.create_view(CloudProvidersView)
view.flash.assert_success_message('Cloud Providers Discovery was cancelled by the user')
@pytest.mark.tier(3)
@test_requirements.discovery
def test_add_cancelled_validation(request):
"""Tests that the flash message is correct when add is cancelled."""
prov = EC2Provider()
request.addfinalizer(prov.delete_if_exists)
prov.create(cancel=True)
view = prov.browser.create_view(CloudProvidersView)
view.flash.assert_success_message('Add of Cloud Provider was cancelled by the user')
@pytest.mark.tier(3)
def test_password_mismatch_validation(appliance):
cred = Credential(
principal=fauxfactory.gen_alphanumeric(5),
secret=fauxfactory.gen_alphanumeric(5),
verify_secret=fauxfactory.gen_alphanumeric(7))
discover(cred, EC2Provider)
view = appliance.browser.create_view(CloudProvidersView)
view.flash.assert_message('Password/Verify Password do not match')
@pytest.mark.tier(3)
@pytest.mark.uncollect()
@pytest.mark.usefixtures('has_no_cloud_providers')
@test_requirements.discovery
def test_providers_discovery_amazon(appliance):
# This test was being uncollected anyway, and needs to be parametrized and not directory call
# out to specific credential keys
# amazon_creds = get_credentials_from_config('cloudqe_amazon')
# discover(amazon_creds, EC2Provider)
view = appliance.browser.create_view(CloudProvidersView)
view.flash.assert_success_message('Amazon Cloud Providers: Discovery successfully initiated')
wait_for_a_provider()
@pytest.mark.tier(3)
@pytest.mark.usefixtures('has_no_cloud_providers')
@test_requirements.discovery
def test_provider_add_with_bad_credentials(provider):
""" Tests provider add with bad credentials
Metadata:
test_flag: crud
"""
default_credentials = provider.default_endpoint.credentials
# default settings
flash = 'Login failed due to a bad username or password.'
default_credentials.principal = "bad"
default_credentials.secret = 'notyourday'
if provider.one_of(AzureProvider):
flash = (
"Credential validation was not successful: Incorrect credentials - "
"check your Azure Client ID and Client Key"
)
default_credentials.principal = str(uuid.uuid4())
default_credentials.secret = 'notyourday'
elif provider.one_of(GCEProvider):
flash = 'Credential validation was not successful: Invalid Google JSON key'
default_credentials.service_account = '{"test": "bad"}'
elif provider.one_of(OpenStackProvider):
for endp_name in provider.endpoints.keys():
if endp_name != 'default':
del provider.endpoints[endp_name]
with error.expected(flash):
provider.create(validate_credentials=True)
@pytest.mark.tier(2)
@pytest.mark.usefixtures('has_no_cloud_providers')
@test_requirements.discovery
def test_provider_crud(provider):
""" Tests provider add with good credentials
Metadata:
test_flag: crud
"""
provider.create()
provider.validate_stats(ui=True)
old_name = provider.name
with update(provider):
provider.name = str(uuid.uuid4()) # random uuid
with update(provider):
provider.name = old_name # old name
provider.delete(cancel=False)
provider.wait_for_delete()
@pytest.mark.tier(3)
@test_requirements.discovery
def test_type_required_validation(request):
"""Test to validate type while adding a provider"""
prov = CloudProvider()
request.addfinalizer(prov.delete_if_exists)
view = navigate_to(prov, 'Add')
view.fill({'name': 'foo'})
assert not view.add.active
@pytest.mark.tier(3)
@test_requirements.discovery
def test_name_required_validation(request):
"""Tests to validate the name while adding a provider"""
prov = EC2Provider(
name=None,
region='US East (Northern Virginia)')
request.addfinalizer(prov.delete_if_exists)
with pytest.raises(AssertionError):
prov.create()
view = prov.create_view(CloudProviderAddView)
assert view.name.help_block == "Required"
assert not view.add.active
@pytest.mark.tier(3)
def test_region_required_validation(request, soft_assert):
"""Tests to validate the region while adding a provider"""
prov = EC2Provider(name=fauxfactory.gen_alphanumeric(5), region=None)
request.addfinalizer(prov.delete_if_exists)
with pytest.raises(AssertionError):
prov.create()
view = prov.create_view(CloudProviderAddView)
soft_assert(view.region.help_block == "Required")
@pytest.mark.tier(3)
@test_requirements.discovery
def test_host_name_required_validation(request):
"""Test to validate the hostname while adding a provider"""
prov = OpenStackProvider(
name=fauxfactory.gen_alphanumeric(5),
hostname=None,
ip_address=fauxfactory.gen_ipaddr(prefix=[10]))
request.addfinalizer(prov.delete_if_exists)
# It must raise an exception because it keeps on the form
with pytest.raises(AssertionError):
prov.create()
endpoints = prov.create_view(prov.endpoints_form)
assert endpoints.default.hostname.help_block == "Required"
@pytest.mark.tier(3)
def test_api_port_blank_validation(request):
"""Test to validate blank api port while adding a provider"""
endpoint = RHOSEndpoint(hostname=fauxfactory.gen_alphanumeric(5),
ip_address=fauxfactory.gen_ipaddr(prefix=[10]),
api_port='',
security_protocol='Non-SSL')
prov = OpenStackProvider(name=fauxfactory.gen_alphanumeric(5), endpoints=endpoint)
request.addfinalizer(prov.delete_if_exists)
# It must raise an exception because it keeps on the form
with pytest.raises(AssertionError):
prov.create()
endpoints = prov.create_view(prov.endpoints_form)
assert endpoints.default.api_port.help_block == "Required"
@pytest.mark.tier(3)
def test_user_id_max_character_validation():
cred = Credential(principal=fauxfactory.gen_alphanumeric(51), secret='')
discover(cred, EC2Provider)
@pytest.mark.tier(3)
def test_password_max_character_validation():
password = fauxfactory.gen_alphanumeric(51)
cred = Credential(
principal=fauxfactory.gen_alphanumeric(5),
secret=password,
verify_secret=password)
discover(cred, EC2Provider)
@pytest.mark.tier(3)
@test_requirements.discovery
def test_name_max_character_validation(request, cloud_provider):
"""Test to validate that provider can have up to 255 characters in name"""
request.addfinalizer(lambda: cloud_provider.delete_if_exists(cancel=False))
name = fauxfactory.gen_alphanumeric(255)
with update(cloud_provider):
cloud_provider.name = name
assert cloud_provider.exists
@pytest.mark.tier(3)
def test_hostname_max_character_validation():
"""Test to validate max character for hostname field"""
endpoint = RHOSEndpoint(hostname=fauxfactory.gen_alphanumeric(256),
api_port=None,
security_protocol=None)
prov = OpenStackProvider(name=fauxfactory.gen_alphanumeric(5), endpoints=endpoint)
try:
prov.create()
except AssertionError:
endpoints = prov.create_view(prov.endpoints_form)
assert endpoints.default.hostname.value == prov.hostname[0:255]
@pytest.mark.tier(3)
@test_requirements.discovery
def test_api_port_max_character_validation():
"""Test to validate max character for api port field"""
endpoint = RHOSEndpoint(hostname=fauxfactory.gen_alphanumeric(5),
api_port=fauxfactory.gen_alphanumeric(16),
security_protocol='Non-SSL')
prov = OpenStackProvider(name=fauxfactory.gen_alphanumeric(5), endpoints=endpoint)
try:
prov.create()
except AssertionError:
view = prov.create_view(prov.endpoints_form)
text = view.default.api_port.value
assert text == prov.default_endpoint.api_port[0:15]
@pytest.mark.tier(3)
def test_openstack_provider_has_api_version():
"""Check whether the Keystone API version field is present for Openstack."""
prov = CloudProvider()
view = navigate_to(prov, 'Add')
view.fill({"prov_type": "OpenStack"})
assert view.api_version.is_displayed, "API version select is not visible"
class TestProvidersRESTAPI(object):
@pytest.fixture(scope="function")
def arbitration_profiles(self, request, appliance, cloud_provider):
num_profiles = 2
response = _arbitration_profiles(
request, appliance.rest_api, cloud_provider, num=num_profiles)
assert appliance.rest_api.response.status_code == 200
assert len(response) == num_profiles
return response
@pytest.mark.tier(3)
@pytest.mark.uncollectif(lambda: version.current_version() < '5.7')
@pytest.mark.parametrize('from_detail', [True, False], ids=['from_detail', 'from_collection'])
def test_cloud_networks_query(self, cloud_provider, appliance, from_detail):
"""Tests querying cloud providers and cloud_networks collection for network info.
Metadata:
test_flag: rest
"""
if from_detail:
networks = appliance.rest_api.collections.providers.get(
name=cloud_provider.name).cloud_networks
else:
networks = appliance.rest_api.collections.cloud_networks
assert appliance.rest_api.response.status_code == 200
assert networks
assert len(networks) == networks.subcount
assert len(networks.find_by(enabled=True)) >= 1
assert 'CloudNetwork' in networks[0].type
@pytest.mark.tier(3)
@pytest.mark.uncollectif(lambda: version.current_version() < '5.7')
def test_security_groups_query(self, cloud_provider, appliance):
"""Tests querying cloud networks subcollection for security groups info.
Metadata:
test_flag: rest
"""
network = appliance.rest_api.collections.providers.get(
name=cloud_provider.name).cloud_networks[0]
network.reload(attributes='security_groups')
security_groups = network.security_groups
# "security_groups" needs to be present, even if it's just an empty list
assert isinstance(security_groups, list)
# if it's not empty, check type
if security_groups:
assert 'SecurityGroup' in security_groups[0]['type']
@pytest.mark.tier(3)
@pytest.mark.uncollectif(lambda: version.current_version() < '5.7')
def test_create_arbitration_profiles(self, appliance, arbitration_profiles):
"""Tests creation of arbitration profiles.
Metadata:
test_flag: rest
"""
for profile in arbitration_profiles:
record = appliance.rest_api.collections.arbitration_profiles.get(id=profile.id)
assert appliance.rest_api.response.status_code == 200
assert record._data == profile._data
assert 'ArbitrationProfile' in profile.type
@pytest.mark.tier(3)
@pytest.mark.uncollectif(lambda: version.current_version() < '5.7')
@pytest.mark.parametrize('method', ['post', 'delete'])
def test_delete_arbitration_profiles_from_detail(self, appliance, arbitration_profiles, method):
"""Tests delete arbitration profiles from detail.
Metadata:
test_flag: rest
"""
status = 204 if method == 'delete' else 200
for entity in arbitration_profiles:
entity.action.delete(force_method=method)
assert appliance.rest_api.response.status_code == status
with error.expected('ActiveRecord::RecordNotFound'):
entity.action.delete(force_method=method)
assert appliance.rest_api.response.status_code == 404
@pytest.mark.tier(3)
@pytest.mark.uncollectif(lambda: version.current_version() < '5.7')
def test_delete_arbitration_profiles_from_collection(self, appliance, arbitration_profiles):
"""Tests delete arbitration profiles from collection.
Metadata:
test_flag: rest
"""
collection = appliance.rest_api.collections.arbitration_profiles
collection.action.delete(*arbitration_profiles)
assert appliance.rest_api.response.status_code == 200
with error.expected('ActiveRecord::RecordNotFound'):
collection.action.delete(*arbitration_profiles)
assert appliance.rest_api.response.status_code == 404
@pytest.mark.tier(3)
@pytest.mark.uncollectif(lambda: version.current_version() < '5.7')
@pytest.mark.parametrize('from_detail', [True, False], ids=['from_detail', 'from_collection'])
def test_edit_arbitration_profiles(self, appliance, arbitration_profiles, from_detail):
"""Tests editing of arbitration profiles.
Metadata:
test_flag: rest
"""
response_len = len(arbitration_profiles)
zone = appliance.rest_api.collections.availability_zones[-1]
locators = [{'id': zone.id}, {'href': zone.href}]
new = [{'availability_zone': locators[i % 2]} for i in range(response_len)]
if from_detail:
edited = []
for i in range(response_len):
edited.append(arbitration_profiles[i].action.edit(**new[i]))
assert appliance.rest_api.response.status_code == 200
else:
for i in range(response_len):
new[i].update(arbitration_profiles[i]._ref_repr())
edited = appliance.rest_api.collections.arbitration_profiles.action.edit(*new)
assert appliance.rest_api.response.status_code == 200
assert len(edited) == response_len
for i in range(response_len):
assert edited[i].availability_zone_id == zone.id
@pytest.mark.tier(3)
@pytest.mark.uncollectif(lambda: version.current_version() < '5.8')
def test_create_arbitration_rules_with_profile(self, request, appliance, arbitration_profiles):
"""Tests creation of arbitration rules referencing arbitration profiles.
Metadata:
test_flag: rest
"""
num_rules = 2
profile = arbitration_profiles[0]
references = [{'id': profile.id}, {'href': profile._href}]
data = []
for index in range(num_rules):
data.append({
'description': 'test admin rule {}'.format(fauxfactory.gen_alphanumeric(5)),
'operation': 'inject',
'arbitration_profile': references[index % 2],
'expression': {'EQUAL': {'field': 'User-userid', 'value': 'admin'}}
})
response = creating_skeleton(request, appliance.rest_api, 'arbitration_rules', data)
assert appliance.rest_api.response.status_code == 200
assert len(response) == num_rules
for rule in response:
record = appliance.rest_api.collections.arbitration_rules.get(id=rule.id)
assert record.arbitration_profile_id == rule.arbitration_profile_id == profile.id
@pytest.mark.tier(3)
@pytest.mark.uncollectif(lambda: version.current_version() < '5.8')
def test_create_arbitration_rule_with_invalid_profile(self, request, appliance):
"""Tests creation of arbitration rule referencing invalid arbitration profile.
Metadata:
test_flag: rest
"""
data = [{
'description': 'test admin rule {}'.format(fauxfactory.gen_alphanumeric(5)),
'operation': 'inject',
'arbitration_profile': 'invalid_value',
'expression': {'EQUAL': {'field': 'User-userid', 'value': 'admin'}}
}]
response = creating_skeleton(request, appliance.rest_api, 'arbitration_rules', data)
# this will fail once BZ 1433477 is fixed - change and expand the test accordingly
assert appliance.rest_api.response.status_code == 200
for rule in response:
assert not hasattr(rule, 'arbitration_profile_id')
|
Soya93/Extract-Refactoring | refs/heads/master | python/testData/intentions/convertLambdaToFunction.py | 83 | newlist = lambda x<caret>, y: (x+y)/y
x = 1 |
pducks32/intergrala | refs/heads/master | python/sympy/sympy/plotting/pygletplot/plot_mode_base.py | 15 | from __future__ import print_function, division
from pyglet.gl import *
from plot_mode import PlotMode
from threading import Thread, Event, RLock
from color_scheme import ColorScheme
from sympy.core import S
from sympy.core.compatibility import is_sequence
from time import sleep
import warnings
class PlotModeBase(PlotMode):
"""
Intended parent class for plotting
modes. Provides base functionality
in conjunction with its parent,
PlotMode.
"""
##
## Class-Level Attributes
##
"""
The following attributes are meant
to be set at the class level, and serve
as parameters to the plot mode registry
(in PlotMode). See plot_modes.py for
concrete examples.
"""
"""
i_vars
'x' for Cartesian2D
'xy' for Cartesian3D
etc.
d_vars
'y' for Cartesian2D
'r' for Polar
etc.
"""
i_vars, d_vars = '', ''
"""
intervals
Default intervals for each i_var, and in the
same order. Specified [min, max, steps].
No variable can be given (it is bound later).
"""
intervals = []
"""
aliases
A list of strings which can be used to
access this mode.
'cartesian' for Cartesian2D and Cartesian3D
'polar' for Polar
'cylindrical', 'polar' for Cylindrical
Note that _init_mode chooses the first alias
in the list as the mode's primary_alias, which
will be displayed to the end user in certain
contexts.
"""
aliases = []
"""
is_default
Whether to set this mode as the default
for arguments passed to PlotMode() containing
the same number of d_vars as this mode and
at most the same number of i_vars.
"""
is_default = False
"""
All of the above attributes are defined in PlotMode.
The following ones are specific to PlotModeBase.
"""
"""
A list of the render styles. Do not modify.
"""
styles = {'wireframe': 1, 'solid': 2, 'both': 3}
"""
style_override
Always use this style if not blank.
"""
style_override = ''
"""
default_wireframe_color
default_solid_color
Can be used when color is None or being calculated.
Used by PlotCurve and PlotSurface, but not anywhere
in PlotModeBase.
"""
default_wireframe_color = (0.85, 0.85, 0.85)
default_solid_color = (0.6, 0.6, 0.9)
default_rot_preset = 'xy'
##
## Instance-Level Attributes
##
## 'Abstract' member functions
def _get_evaluator(self):
if self.use_lambda_eval:
try:
e = self._get_lambda_evaluator()
return e
except Exception:
warnings.warn("\nWarning: creating lambda evaluator failed. "
"Falling back on sympy subs evaluator.")
return self._get_sympy_evaluator()
def _get_sympy_evaluator(self):
raise NotImplementedError()
def _get_lambda_evaluator(self):
raise NotImplementedError()
def _on_calculate_verts(self):
raise NotImplementedError()
def _on_calculate_cverts(self):
raise NotImplementedError()
## Base member functions
def __init__(self, *args, **kwargs):
self.verts = []
self.cverts = []
self.bounds = [[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0]]
self.cbounds = [[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0],
[S.Infinity, -S.Infinity, 0]]
self._draw_lock = RLock()
self._calculating_verts = Event()
self._calculating_cverts = Event()
self._calculating_verts_pos = 0.0
self._calculating_verts_len = 0.0
self._calculating_cverts_pos = 0.0
self._calculating_cverts_len = 0.0
self._max_render_stack_size = 3
self._draw_wireframe = [-1]
self._draw_solid = [-1]
self._style = None
self._color = None
self.predraw = []
self.postdraw = []
self.use_lambda_eval = self.options.pop('use_sympy_eval', None) is None
self.style = self.options.pop('style', '')
self.color = self.options.pop('color', 'rainbow')
self.bounds_callback = kwargs.pop('bounds_callback', None)
self._on_calculate()
def synchronized(f):
def w(self, *args, **kwargs):
self._draw_lock.acquire()
try:
r = f(self, *args, **kwargs)
return r
finally:
self._draw_lock.release()
return w
@synchronized
def push_wireframe(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_wireframe.append(function)
if len(self._draw_wireframe) > self._max_render_stack_size:
del self._draw_wireframe[1] # leave marker element
@synchronized
def push_solid(self, function):
"""
Push a function which performs gl commands
used to build a display list. (The list is
built outside of the function)
"""
assert callable(function)
self._draw_solid.append(function)
if len(self._draw_solid) > self._max_render_stack_size:
del self._draw_solid[1] # leave marker element
def _create_display_list(self, function):
dl = glGenLists(1)
glNewList(dl, GL_COMPILE)
function()
glEndList()
return dl
def _render_stack_top(self, render_stack):
top = render_stack[-1]
if top == -1:
return -1 # nothing to display
elif callable(top):
dl = self._create_display_list(top)
render_stack[-1] = (dl, top)
return dl # display newly added list
elif len(top) == 2:
if GL_TRUE == glIsList(top[0]):
return top[0] # display stored list
dl = self._create_display_list(top[1])
render_stack[-1] = (dl, top[1])
return dl # display regenerated list
def _draw_solid_display_list(self, dl):
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glCallList(dl)
glPopAttrib()
def _draw_wireframe_display_list(self, dl):
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glEnable(GL_POLYGON_OFFSET_LINE)
glPolygonOffset(-0.005, -50.0)
glCallList(dl)
glPopAttrib()
@synchronized
def draw(self):
for f in self.predraw:
if callable(f):
f()
if self.style_override:
style = self.styles[self.style_override]
else:
style = self.styles[self._style]
# Draw solid component if style includes solid
if style & 2:
dl = self._render_stack_top(self._draw_solid)
if dl > 0 and GL_TRUE == glIsList(dl):
self._draw_solid_display_list(dl)
# Draw wireframe component if style includes wireframe
if style & 1:
dl = self._render_stack_top(self._draw_wireframe)
if dl > 0 and GL_TRUE == glIsList(dl):
self._draw_wireframe_display_list(dl)
for f in self.postdraw:
if callable(f):
f()
def _on_change_color(self, color):
Thread(target=self._calculate_cverts).start()
def _on_calculate(self):
Thread(target=self._calculate_all).start()
def _calculate_all(self):
self._calculate_verts()
self._calculate_cverts()
def _calculate_verts(self):
if self._calculating_verts.isSet():
return
self._calculating_verts.set()
try:
self._on_calculate_verts()
finally:
self._calculating_verts.clear()
if callable(self.bounds_callback):
self.bounds_callback()
def _calculate_cverts(self):
if self._calculating_verts.isSet():
return
while self._calculating_cverts.isSet():
sleep(0) # wait for previous calculation
self._calculating_cverts.set()
try:
self._on_calculate_cverts()
finally:
self._calculating_cverts.clear()
def _get_calculating_verts(self):
return self._calculating_verts.isSet()
def _get_calculating_verts_pos(self):
return self._calculating_verts_pos
def _get_calculating_verts_len(self):
return self._calculating_verts_len
def _get_calculating_cverts(self):
return self._calculating_cverts.isSet()
def _get_calculating_cverts_pos(self):
return self._calculating_cverts_pos
def _get_calculating_cverts_len(self):
return self._calculating_cverts_len
## Property handlers
def _get_style(self):
return self._style
@synchronized
def _set_style(self, v):
if v is None:
return
if v == '':
step_max = 0
for i in self.intervals:
if i.v_steps is None:
continue
step_max = max([step_max, int(i.v_steps)])
v = ['both', 'solid'][step_max > 40]
#try:
if v not in self.styles:
raise ValueError("v should be there in self.styles")
if v == self._style:
return
self._style = v
#except Exception as e:
#raise RuntimeError(("Style change failed. "
# "Reason: %s is not a valid "
# "style. Use one of %s.") %
# (str(v), ', '.join(self.styles.iterkeys())))
def _get_color(self):
return self._color
@synchronized
def _set_color(self, v):
try:
if v is not None:
if is_sequence(v):
v = ColorScheme(*v)
else:
v = ColorScheme(v)
if repr(v) == repr(self._color):
return
self._on_change_color(v)
self._color = v
except Exception as e:
raise RuntimeError(("Color change failed. "
"Reason: %s" % (str(e))))
style = property(_get_style, _set_style)
color = property(_get_color, _set_color)
calculating_verts = property(_get_calculating_verts)
calculating_verts_pos = property(_get_calculating_verts_pos)
calculating_verts_len = property(_get_calculating_verts_len)
calculating_cverts = property(_get_calculating_cverts)
calculating_cverts_pos = property(_get_calculating_cverts_pos)
calculating_cverts_len = property(_get_calculating_cverts_len)
## String representations
def __str__(self):
f = ", ".join(str(d) for d in self.d_vars)
o = "'mode=%s'" % (self.primary_alias)
return ", ".join([f, o])
def __repr__(self):
f = ", ".join(str(d) for d in self.d_vars)
i = ", ".join(str(i) for i in self.intervals)
d = [('mode', self.primary_alias),
('color', str(self.color)),
('style', str(self.style))]
o = "'%s'" % (("; ".join("%s=%s" % (k, v)
for k, v in d if v != 'None')))
return ", ".join([f, i, o])
|
mbedmicro/pyOCD | refs/heads/master | pyocd/core/session.py | 1 | # pyOCD debugger
# Copyright (c) 2018-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import logging.config
import six
import yaml
import os
import weakref
# inspect.getargspec is deprecated in Python 3.
try:
from inspect import getfullargspec as getargspec
except ImportError:
from inspect import getargspec
from . import exceptions
from .options_manager import OptionsManager
from ..board.board import Board
from ..utility.notification import Notifier
LOG = logging.getLogger(__name__)
## @brief Set of default config filenames to search for.
_CONFIG_FILE_NAMES = [
"pyocd.yaml",
"pyocd.yml",
".pyocd.yaml",
".pyocd.yml",
]
## @brief Set of default user script names to search for.
_USER_SCRIPT_NAMES = [
"pyocd_user.py",
".pyocd_user.py",
]
class Session(Notifier):
"""! @brief Top-level object for a debug session.
This class represents a debug session with a single debug probe. It is the root of the object
graph, where it owns the debug probe and the board objects.
Another important function of this class is that it contains a dictionary of session-scope
options. These would normally be passed in from the command line. Options can also be loaded
from a config file.
Precedence for session options:
1. Keyword arguments to constructor.
2. _options_ parameter to constructor.
3. Probe-specific options from a config file.
4. General options from a config file.
5. _option_defaults_ parameter to constructor.
The session also tracks several other objects:
- @ref pyocd.gdbserver.gdbserver.GDBServer "GDBServer" instances created for any cores.
- @ref pyocd.probe.tcp_probe_server.DebugProbeServer "DebugProbeServer".
- The user script proxy.
See the @ref pyocd.core.helpers.ConnectHelper "ConnectHelper" class for several methods that
make it easy to create new sessions, with or without user interaction in the case of multiple
available debug probes. A common pattern is to combine @ref
pyocd.core.helpers.ConnectHelper.session_with_chosen_probe()
"ConnectHelper.session_with_chosen_probe()" and a **with** block.
A Session instance can be used as a context manager. The session will, by default, be
automatically opened when the context is entered. And, of course, it will be closed when the
**with** block is exited (which is harmless if the session was never opened). If you wish to
disable automatic opening, set the `auto_open` parameter to the constructor to False. If an
exception is raised while opening a session inside a **with** statement, the session will be
closed for you to undo any partial initialisation.
"""
## @brief Weak reference to the most recently created session.
_current_session = None
@classmethod
def get_current(cls):
"""! @brief Return the most recently created Session instance or a default Session.
By default this method will return the most recently created Session object that is
still alive. If no live session exists, a new default session will be created and returned.
That at least provides access to the user's config file(s).
Used primarily so code that doesn't have a session reference can access session options. This
method should only be used to access options that are unlikely to differ between sessions,
or for debug or other purposes.
"""
if cls._current_session is not None:
return cls._current_session()
else:
return Session(None)
def __init__(self, probe, auto_open=True, options=None, option_defaults=None, **kwargs):
"""! @brief Session constructor.
Creates a new session using the provided debug probe. Session options are merged from the
_options_ parameter and any keyword arguments. Normally a board instance is created that can
either be a generic board or a board associated with the debug probe.
Note that the 'project_dir' and 'config' options must be set in either keyword arguments or
the _options_ parameter.
Passing in a _probe_ that is None is allowed. This is useful to create a session that operates
only as a container for session options. In this case, the board instance is not created, so the
#board attribute will be None. Such a Session cannot be opened.
@param self
@param probe The @ref pyocd.probe.debug_probe. "DebugProbe" instance. May be None.
@param auto_open Whether to automatically open the session when used as a context manager.
@param options Optional session options dictionary.
@param option_defaults Optional dictionary of session option values. This dictionary has the
lowest priority in determining final session option values, and is intended to set new
defaults for option if they are not set through any other method.
@param kwargs Session options passed as keyword arguments.
"""
super(Session, self).__init__()
Session._current_session = weakref.ref(self)
self._probe = probe
self._closed = True
self._inited = False
self._user_script_namespace = None
self._user_script_proxy = None
self._delegate = None
self._auto_open = auto_open
self._options = OptionsManager()
self._gdbservers = {}
self._probeserver = None
# Set this session on the probe, if we were given a probe.
if probe is not None:
probe.session = self
# Update options.
self._options.add_front(kwargs)
self._options.add_back(options)
# Init project directory.
if self.options.get('project_dir') is None:
self._project_dir = os.getcwd()
else:
self._project_dir = os.path.abspath(os.path.expanduser(self.options.get('project_dir')))
LOG.debug("Project directory: %s", self.project_dir)
# Apply common configuration settings from the config file.
config = self._get_config()
probesConfig = config.pop('probes', None)
self._options.add_back(config)
# Pick up any config file options for this board.
if (probe is not None) and (probesConfig is not None):
for uid, settings in probesConfig.items():
if str(uid).lower() in probe.unique_id.lower():
LOG.info("Using config settings for probe %s" % (probe.unique_id))
self._options.add_back(settings)
# Merge in lowest priority options.
self._options.add_back(option_defaults)
# Logging config.
self._configure_logging()
# Bail early if we weren't provided a probe.
if probe is None:
self._board = None
return
# Load the user script.
self._load_user_script()
# Ask the probe if it has an associated board, and if not then we create a generic one.
self._board = probe.create_associated_board() \
or Board(self, self.options.get('target_override'))
def _get_config(self):
# Load config file if one was provided via options, and no_config option was not set.
if not self.options.get('no_config'):
configPath = self.find_user_file('config_file', _CONFIG_FILE_NAMES)
if configPath is not None:
try:
with open(configPath, 'r') as configFile:
LOG.debug("Loading config from: %s", configPath)
config = yaml.safe_load(configFile)
if not isinstance(config, dict):
raise exceptions.Error("configuration file %s does not contain a top-level dictionary"
% configPath)
return config
except IOError as err:
LOG.warning("Error attempting to access config file '%s': %s", configPath, err)
return {}
def find_user_file(self, option_name, filename_list):
"""! @brief Search the project directory for a file.
@retval None No matching file was found.
@retval string An absolute path to the requested file.
"""
if option_name is not None:
filePath = self.options.get(option_name)
else:
filePath = None
# Look for default filenames if a path wasn't provided.
if filePath is None:
for filename in filename_list:
thisPath = os.path.join(self.project_dir, filename)
if os.path.isfile(thisPath):
filePath = thisPath
break
# Use the path passed in options, which may be absolute, relative to the
# home directory, or relative to the project directory.
else:
filePath = os.path.expanduser(filePath)
if not os.path.isabs(filePath):
filePath = os.path.join(self.project_dir, filePath)
return filePath
def _configure_logging(self):
"""! @brief Load a logging config dict or file."""
# Get logging config that could have been loaded from the config file.
config = self.options.get('logging')
# Allow logging setting to refer to another file.
if isinstance(config, six.string_types):
loggingConfigPath = self.find_user_file(None, [config])
if loggingConfigPath is not None:
try:
with open(loggingConfigPath, 'r') as configFile:
config = yaml.safe_load(configFile)
LOG.debug("Using logging configuration from: %s", config)
except IOError as err:
LOG.warning("Error attempting to load logging config file '%s': %s", config, err)
return
if config is not None:
# Stuff a version key if it's missing, to make it easier to use.
if 'version' not in config:
config['version'] = 1
# Set a different default for disabling existing loggers.
if 'disable_existing_loggers' not in config:
config['disable_existing_loggers'] = False
# Remove an empty 'loggers' key.
if ('loggers' in config) and (config['loggers'] is None):
del config['loggers']
try:
logging.config.dictConfig(config)
except (ValueError, TypeError, AttributeError, ImportError) as err:
LOG.warning("Error applying logging configuration: %s", err)
@property
def is_open(self):
"""! @brief Boolean of whether the session has been opened."""
return self._inited and not self._closed
@property
def probe(self):
"""! @brief The @ref pyocd.probe.debug_probe.DebugProbe "DebugProbe" instance."""
return self._probe
@property
def board(self):
"""! @brief The @ref pyocd.board.board.Board "Board" object."""
return self._board
@property
def target(self):
"""! @brief The @ref pyocd.core.target.soc_target "SoCTarget" object representing the SoC.
This is the @ref pyocd.core.target.soc_target "SoCTarget" instance owned by the board.
"""
return self.board.target
@property
def options(self):
"""! @brief The @ref pyocd.core.options_manager.OptionsManager "OptionsManager" object."""
return self._options
@property
def project_dir(self):
"""! @brief Path to the project directory."""
return self._project_dir
@property
def delegate(self):
"""! @brief An optional delegate object for customizing behaviour."""
return self._delegate
@delegate.setter
def delegate(self, new_delegate):
"""! @brief Setter for the `delegate` property."""
self._delegate = new_delegate
@property
def user_script_proxy(self):
"""! @brief The UserScriptDelegateProxy object for a loaded user script."""
return self._user_script_proxy
@property
def gdbservers(self):
"""! @brief Dictionary of core numbers to @ref pyocd.gdbserver.gdbserver.GDBServer "GDBServer" instances."""
return self._gdbservers
@property
def probeserver(self):
"""! @brief A @ref pyocd.probe.tcp_probe_server.DebugProbeServer "DebugProbeServer" instance."""
return self._probeserver
@probeserver.setter
def probeserver(self, server):
"""! @brief Setter for the `probeserver` property."""
self._probeserver = server
@property
def log_tracebacks(self):
"""! @brief Quick access to debug.traceback option since it is widely used."""
return self.options.get('debug.traceback')
def __enter__(self):
assert self._probe is not None
if self._auto_open:
try:
self.open()
except Exception:
self.close()
raise
return self
def __exit__(self, type, value, traceback):
self.close()
return False
def _init_user_script_namespace(self, user_script_path):
"""! @brief Create the namespace dict used for user scripts.
This initial namespace has only those objects that are available very early in the
session init process. For instance, the Target instance isn't available yet. The
_update_user_script_namespace() method is used to add such objects to the namespace
later on.
"""
import pyocd
import pyocd.flash.file_programmer
self._user_script_namespace = {
# Modules and classes
'pyocd': pyocd,
'exceptions': pyocd.core.exceptions,
'Error': pyocd.core.exceptions.Error,
'TransferError': pyocd.core.exceptions.TransferError,
'TransferFaultError': pyocd.core.exceptions.TransferFaultError,
'Target': pyocd.core.target.Target,
'State': pyocd.core.target.Target.State,
'SecurityState': pyocd.core.target.Target.SecurityState,
'BreakpointType': pyocd.core.target.Target.BreakpointType,
'WatchpointType': pyocd.core.target.Target.WatchpointType,
'VectorCatch': pyocd.core.target.Target.VectorCatch,
'Event': pyocd.core.target.Target.Event,
'RunType': pyocd.core.target.Target.RunType,
'HaltReason': pyocd.core.target.Target.HaltReason,
'ResetType': pyocd.core.target.Target.ResetType,
'MemoryType': pyocd.core.memory_map.MemoryType,
'MemoryMap': pyocd.core.memory_map.MemoryMap,
'RamRegion': pyocd.core.memory_map.RamRegion,
'RomRegion': pyocd.core.memory_map.RomRegion,
'FlashRegion': pyocd.core.memory_map.FlashRegion,
'DeviceRegion': pyocd.core.memory_map.DeviceRegion,
'FileProgrammer': pyocd.flash.file_programmer.FileProgrammer,
'FlashEraser': pyocd.flash.eraser.FlashEraser,
'FlashLoader': pyocd.flash.loader.FlashLoader,
# User script info
'__name__': os.path.splitext(os.path.basename(user_script_path))[0],
'__file__': user_script_path,
# Objects
'session': self,
'options': self.options,
'LOG': logging.getLogger('pyocd.user_script'),
}
def _update_user_script_namespace(self):
"""! @brief Add objects available only after init to the user script namespace."""
if self._user_script_namespace is not None:
self._user_script_namespace.update({
'probe': self.probe,
'board': self.board,
'target': self.target,
'dp': self.target.dp,
'aps': self.target.aps,
})
def _load_user_script(self):
scriptPath = self.find_user_file('user_script', _USER_SCRIPT_NAMES)
if scriptPath is not None:
try:
# Read the script source.
with open(scriptPath, 'r') as scriptFile:
LOG.debug("Loading user script: %s", scriptPath)
scriptSource = scriptFile.read()
self._init_user_script_namespace(scriptPath)
scriptCode = compile(scriptSource, scriptPath, 'exec')
# Executing the code will create definitions in the namespace for any
# functions or classes. A single namespace is shared for both globals and
# locals so that script-level definitions are available within the
# script functions.
six.exec_(scriptCode, self._user_script_namespace, self._user_script_namespace)
# Create the proxy for the user script. It becomes the delegate unless
# another delegate was already set.
self._user_script_proxy = UserScriptDelegateProxy(self._user_script_namespace)
if self._delegate is None:
self._delegate = self._user_script_proxy
except IOError as err:
LOG.warning("Error attempting to load user script '%s': %s", scriptPath, err)
def open(self, init_board=True):
"""! @brief Open the session.
This method does everything necessary to begin a debug session. It first loads the user
script, if there is one. The user script will be available via the _user_script_proxy_
property. Then it opens the debug probe and sets the clock rate from the `frequency` user
option. Finally, it inits the board (which will init the target, which performs the
full target init sequence).
@param self
@param init_board This parameter lets you prevent the board from being inited, which can
be useful in board bringup situations. It's also used by pyocd commander's "no init"
feature.
"""
if not self._inited:
assert self._probe is not None, "Cannot open a session without a probe."
assert self._board is not None, "Must have a board to open a session."
# Add in the full set of objects for the user script.
self._update_user_script_namespace()
self._probe.open()
self._closed = False
self._probe.set_clock(self.options.get('frequency'))
if init_board:
self._board.init()
self._inited = True
def close(self):
"""! @brief Close the session.
Uninits the board and disconnects then closes the probe.
"""
if self._closed:
return
self._closed = True
LOG.debug("uninit session %s", self)
if self._inited:
try:
self.board.uninit()
self._inited = False
except:
LOG.error("exception during board uninit:", exc_info=self.log_tracebacks)
if self._probe.is_open:
try:
self._probe.disconnect()
except:
LOG.error("probe exception during disconnect:", exc_info=self.log_tracebacks)
try:
self._probe.close()
except:
LOG.error("probe exception during close:", exc_info=self.log_tracebacks)
class UserScriptFunctionProxy(object):
"""! @brief Proxy for user script functions.
This proxy makes arguments to user script functions optional.
"""
def __init__(self, fn):
self._fn = fn
self._spec = getargspec(fn)
def __call__(self, **kwargs):
args = {}
for arg in self._spec.args:
if arg in kwargs:
args[arg] = kwargs[arg]
self._fn(**args)
class UserScriptDelegateProxy(object):
"""! @brief Delegate proxy for user scripts."""
def __init__(self, script_namespace):
super(UserScriptDelegateProxy, self).__init__()
self._script = script_namespace
def __getattr__(self, name):
if name in self._script:
fn = self._script[name]
return UserScriptFunctionProxy(fn)
else:
raise AttributeError(name)
|
qPCR4vir/orange | refs/heads/master | docs/reference/rst/code/testing-example.py | 6 | import Orange
iris = Orange.data.Table("iris")
learners = [Orange.classification.bayes.NaiveLearner(),
Orange.classification.majority.MajorityLearner()]
cv = Orange.evaluation.testing.cross_validation(learners, iris, folds=5)
print ["%.4f" % score for score in Orange.evaluation.scoring.CA(cv)]
|
GovCERT-CZ/dionaea | refs/heads/master | modules/python/scripts/hpfeeds.py | 1 | #*************************************************************************
#* Dionaea
#* - catches bugs -
#*
#*
#*
#* Copyright (C) 2010 Mark Schloesser
#*
#* This program is free software; you can redistribute it and/or
#* modify it under the terms of the GNU General Public License
#* as published by the Free Software Foundation; either version 2
#* of the License, or (at your option) any later version.
#*
#* This program is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#* GNU General Public License for more details.
#*
#* You should have received a copy of the GNU General Public License
#* along with this program; if not, write to the Free Software
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#*
#*
#* contact nepenthesdev@gmail.com
#*
#*******************************************************************************/
from dionaea.core import ihandler, incident, connection
from dionaea.util import sha512file
import os
import logging
import struct
import hashlib
import json
import datetime
from time import gmtime, strftime
try: import pyev
except: pyev = None
logger = logging.getLogger('hpfeeds')
logger.setLevel(logging.DEBUG)
#def DEBUGPERF(msg):
# print(msg)
#logger.debug = DEBUGPERF
#logger.critical = DEBUGPERF
BUFSIZ = 16384
OP_ERROR = 0
OP_INFO = 1
OP_AUTH = 2
OP_PUBLISH = 3
OP_SUBSCRIBE = 4
MAXBUF = 1024**2
SIZES = {
OP_ERROR: 5+MAXBUF,
OP_INFO: 5+256+20,
OP_AUTH: 5+256+20,
OP_PUBLISH: 5+MAXBUF,
OP_SUBSCRIBE: 5+256*2,
}
CAPTURECHAN = 'dionaea.capture'
DCECHAN = 'dionaea.dcerpcrequests'
SCPROFCHAN = 'dionaea.shellcodeprofiles'
UNIQUECHAN = 'mwbinary.dionaea.sensorunique'
class BadClient(Exception):
pass
def timestr():
dt = datetime.datetime.now()
my_time = dt.strftime("%Y-%m-%d %H:%M:%S.%f")
timezone = strftime("%Z %z", gmtime())
return my_time + " " + timezone
# packs a string with 1 byte length field
def strpack8(x):
if isinstance(x, str): x = x.encode('latin1')
return struct.pack('!B', len(x)%0xff) + x
# unpacks a string with 1 byte length field
def strunpack8(x):
l = x[0]
return x[1:1+l], x[1+l:]
def msghdr(op, data):
return struct.pack('!iB', 5+len(data), op) + data
def msgpublish(ident, chan, data):
return msghdr(OP_PUBLISH, strpack8(ident) + strpack8(chan) + data)
def msgsubscribe(ident, chan):
if isinstance(chan, str): chan = chan.encode('latin1')
return msghdr(OP_SUBSCRIBE, strpack8(ident) + chan)
def msgauth(rand, ident, secret):
hash = hashlib.sha1(bytes(rand)+secret).digest()
return msghdr(OP_AUTH, strpack8(ident) + hash)
class FeedUnpack(object):
def __init__(self):
self.buf = bytearray()
def __iter__(self):
return self
def __next__(self):
return self.unpack()
def feed(self, data):
self.buf.extend(data)
def unpack(self):
if len(self.buf) < 5:
raise StopIteration('No message.')
ml, opcode = struct.unpack('!iB', self.buf[:5])
if ml > SIZES.get(opcode, MAXBUF):
raise BadClient('Not respecting MAXBUF.')
if len(self.buf) < ml:
raise StopIteration('No message.')
data = self.buf[5:ml]
del self.buf[:ml]
return opcode, data
class hpclient(connection):
def __init__(self, server, port, ident, secret):
logger.debug('hpclient init')
connection.__init__(self, 'tcp')
self.unpacker = FeedUnpack()
self.ident, self.secret = ident.encode(
'latin1'), secret.encode('latin1')
self.connect(server, port)
self.timeouts.reconnect = 10.0
self.sendfiles = []
self.msgqueue = []
self.filehandle = None
self.connected = False
def handle_established(self):
self.connected = True
logger.debug('hpclient established')
def handle_io_in(self, indata):
self.unpacker.feed(indata)
# if we are currently streaming a file, delay handling incoming
# messages
if self.filehandle:
return len(indata)
try:
for opcode, data in self.unpacker:
logger.debug(
'hpclient msg opcode {0} data {1}'.format(opcode, data))
if opcode == OP_INFO:
name, rand = strunpack8(data)
logger.debug(
'hpclient server name {0} rand {1}'.format(name, rand))
self.send(msgauth(rand, self.ident, self.secret))
elif opcode == OP_PUBLISH:
ident, data = strunpack8(data)
chan, data = strunpack8(data)
logger.debug(
'publish to {0} by {1}: {2}'.format(chan, ident, data))
elif opcode == OP_ERROR:
logger.debug('errormessage from server: {0}'.format(data))
else:
logger.debug('unknown opcode message: {0}'.format(opcode))
except BadClient:
logger.critical('unpacker error, disconnecting.')
self.close()
return len(indata)
def handle_io_out(self):
if self.filehandle: self.sendfiledata()
else:
if self.msgqueue:
m = self.msgqueue.pop(0)
self.send(m)
def publish(self, channel, **kwargs):
if self.filehandle: self.msgqueue.append(
msgpublish(self.ident, channel, json.dumps(kwargs).encode('latin1')))
else: self.send(msgpublish(self.ident, channel, json.dumps(kwargs).encode('latin1')))
def sendfile(self, filepath):
# does not read complete binary into memory, read and send chunks
if not self.filehandle:
self.sendfileheader(filepath)
self.sendfiledata()
else: self.sendfiles.append(filepath)
def sendfileheader(self, filepath):
self.filehandle = open(filepath, 'rb')
fsize = os.stat(filepath).st_size
headc = strpack8(self.ident) + strpack8(UNIQUECHAN)
headh = struct.pack('!iB', 5+len(headc)+fsize, OP_PUBLISH)
self.send(headh + headc)
def sendfiledata(self):
tmp = self.filehandle.read(BUFSIZ)
if not tmp:
if self.sendfiles:
fp = self.sendfiles.pop(0)
self.sendfileheader(fp)
else:
self.filehandle = None
self.handle_io_in(b'')
else:
self.send(tmp)
def handle_timeout_idle(self):
pass
def handle_disconnect(self):
logger.info('hpclient disconnect')
self.connected = False
return 1
def handle_error(self, err):
logger.warn('hpclient error {0}'.format(err))
self.connected = False
return 1
class hpfeedihandler(ihandler):
def __init__(self, config):
logger.debug('hpfeedhandler init')
self.client = hpclient(
config['server'], int(config['port']), config['ident'], config['secret'])
ihandler.__init__(self, '*')
self.dynip_resolve = config.get('dynip_resolve', '')
self.dynip_timer = None
self.ownip = None
if self.dynip_resolve and 'http' in self.dynip_resolve:
if pyev == None:
logger.debug(
'You are missing the python pyev binding in your dionaea installation.')
else:
logger.debug('hpfeedihandler will use dynamic IP resolving!')
self.loop = pyev.default_loop()
self.dynip_timer = pyev.Timer(
2., 300, self.loop, self._dynip_resolve)
self.dynip_timer.start()
def stop(self):
if self.dynip_timer:
self.dynip_timer.stop()
self.dynip_timer = None
self.loop = None
def _ownip(self, icd):
if self.dynip_resolve and 'http' in self.dynip_resolve and pyev != None:
if self.ownip: return self.ownip
else: raise Exception('Own IP not yet resolved!')
return icd.con.local.host
def __del__(self):
#self.client.close()
pass
def handle_incident(self, i):
pass
def handle_incident_dionaea_download_complete_unique(self, i):
self.handle_incident_dionaea_download_complete_again(i)
if not hasattr(i, 'con') or not self.client.connected: return
logger.debug(
'unique complete, publishing md5 {0}, path {1}'.format(i.md5hash, i.file))
try:
self.client.sendfile(i.file)
except Exception as e:
logger.warn('exception when publishing: {0}'.format(e))
def handle_incident_dionaea_download_complete_again(self, i):
if not hasattr(i, 'con') or not self.client.connected: return
logger.debug(
'hash complete, publishing md5 {0}, path {1}'.format(i.md5hash, i.file))
try:
tstamp = timestr()
sha512 = sha512file(i.file)
self.client.publish(CAPTURECHAN, time=tstamp,
saddr=i.con.remote.host,
sport=str(i.con.remote.port), daddr=self._ownip(i),
dport=str(i.con.local.port), md5=i.md5hash, sha512=sha512,
url=i.url
)
except Exception as e:
logger.warn('exception when publishing: {0}'.format(e))
def handle_incident_dionaea_modules_python_smb_dcerpc_request(self, i):
if not hasattr(i, 'con') or not self.client.connected: return
logger.debug(
'dcerpc request, publishing uuid {0}, opnum {1}'.format(i.uuid, i.opnum))
try:
self.client.publish(DCECHAN, uuid=i.uuid, opnum=i.opnum,
saddr=i.con.remote.host, sport=str(
i.con.remote.port),
daddr=self._ownip(i), dport=str(i.con.local.port),
)
except Exception as e:
logger.warn('exception when publishing: {0}'.format(e))
def handle_incident_dionaea_module_emu_profile(self, icd):
if not hasattr(icd, 'con') or not self.client.connected: return
logger.debug(
'emu profile, publishing length {0}'.format(len(icd.profile)))
try:
self.client.publish(SCPROFCHAN, profile=icd.profile)
except Exception as e:
logger.warn('exception when publishing: {0}'.format(e))
def _dynip_resolve(self, events, data):
i = incident("dionaea.upload.request")
i._url = self.dynip_resolve
i._callback = "dionaea.modules.python.hpfeeds.dynipresult"
i.report()
def handle_incident_dionaea_modules_python_hpfeeds_dynipresult(self, icd):
fh = open(icd.path, mode="rb")
self.ownip = fh.read().strip()
logger.debug('resolved own IP to: {0}'.format(self.ownip))
fh.close()
|
dave-shawley/setupext-janitor | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
import setuptools
import setupext_janitor.janitor
setuptools.setup(
name='setupext-janitor',
version=setupext_janitor.version,
author='Dave Shawley',
author_email='daveshawley@gmail.com',
url='http://github.com/dave-shawley/setupext-janitor',
description='Making setup.py clean more useful.',
long_description=open('README.rst').read(),
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
zip_safe=True,
platforms='any',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Framework :: Setuptools Plugin',
'Development Status :: 5 - Production/Stable',
],
entry_points={
'distutils.commands': [
'clean = setupext_janitor.janitor:CleanCommand',
],
},
cmdclass={
'clean': setupext_janitor.janitor.CleanCommand,
},
extras_require={
'dev': [
'coverage==4.5.3',
'flake8==3.7.7',
'mock==1.0.1; python_version<"3"',
'nose==1.3.7',
'sphinx==1.8.5',
'sphinx-rtd-theme==0.4.3',
'tox==3.9.0',
],
},
)
|
NicoSantangelo/sublime-text-trello | refs/heads/master | lib/requests/packages/urllib3/request.py | 60 | # urllib3/request.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .filepost import encode_multipart_formdata
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are encoded
in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-orm-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
_encode_body_methods = set(['PATCH', 'POST', 'PUT', 'TRACE'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the option
to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **urlopen_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the
payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request signing,
such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example: ::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will be
overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if encode_multipart:
body, content_type = encode_multipart_formdata(fields or {},
boundary=multipart_boundary)
else:
body, content_type = (urlencode(fields or {}),
'application/x-www-form-urlencoded')
if headers is None:
headers = self.headers
headers_ = {'Content-Type': content_type}
headers_.update(headers)
return self.urlopen(method, url, body=body, headers=headers_,
**urlopen_kw)
|
Brainiq7/Ananse | refs/heads/master | ananse_dl/extractor/myvideo.py | 1 | from __future__ import unicode_literals
import binascii
import base64
import hashlib
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_ord,
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
)
class MyVideoIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?myvideo\.de/(?:[^/]+/)?watch/(?P<id>[0-9]+)/[^?/]+.*'
IE_NAME = 'myvideo'
_TEST = {
'url': 'http://www.myvideo.de/watch/8229274/bowling_fail_or_win',
'md5': '2d2753e8130479ba2cb7e0a37002053e',
'info_dict': {
'id': '8229274',
'ext': 'flv',
'title': 'bowling-fail-or-win',
}
}
# Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git
# Released into the Public Domain by Tristan Fischer on 2013-05-19
# https://github.com/rg3/ananse/pull/842
def __rc4crypt(self, data, key):
x = 0
box = list(range(256))
for i in list(range(256)):
x = (x + box[i] + compat_ord(key[i % len(key)])) % 256
box[i], box[x] = box[x], box[i]
x = 0
y = 0
out = ''
for char in data:
x = (x + 1) % 256
y = (y + box[x]) % 256
box[x], box[y] = box[y], box[x]
out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256])
return out
def __md5(self, s):
return hashlib.md5(s).hexdigest().encode()
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
GK = (
b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt'
b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3'
b'TnpsbA0KTVRkbU1tSTRNdz09'
)
# Get video webpage
webpage_url = 'http://www.myvideo.de/watch/%s' % video_id
webpage = self._download_webpage(webpage_url, video_id)
mobj = re.search('source src=\'(.+?)[.]([^.]+)\'', webpage)
if mobj is not None:
self.report_extraction(video_id)
video_url = mobj.group(1) + '.flv'
video_title = self._html_search_regex('<title>([^<]+)</title>',
webpage, 'title')
return {
'id': video_id,
'url': video_url,
'title': video_title,
}
mobj = re.search(r'data-video-service="/service/data/video/%s/config' % video_id, webpage)
if mobj is not None:
request = compat_urllib_request.Request('http://www.myvideo.de/service/data/video/%s/config' % video_id, '')
response = self._download_webpage(request, video_id,
'Downloading video info')
info = json.loads(base64.b64decode(response).decode('utf-8'))
return {
'id': video_id,
'title': info['title'],
'url': info['streaming_url'].replace('rtmpe', 'rtmpt'),
'play_path': info['filename'],
'ext': 'flv',
'thumbnail': info['thumbnail'][0]['url'],
}
# try encxml
mobj = re.search('var flashvars={(.+?)}', webpage)
if mobj is None:
raise ExtractorError('Unable to extract video')
params = {}
encxml = ''
sec = mobj.group(1)
for (a, b) in re.findall('(.+?):\'(.+?)\',?', sec):
if not a == '_encxml':
params[a] = b
else:
encxml = compat_urllib_parse.unquote(b)
if not params.get('domain'):
params['domain'] = 'www.myvideo.de'
xmldata_url = '%s?%s' % (encxml, compat_urllib_parse.urlencode(params))
if 'flash_playertype=MTV' in xmldata_url:
self._downloader.report_warning('avoiding MTV player')
xmldata_url = (
'http://www.myvideo.de/dynamic/get_player_video_xml.php'
'?flash_playertype=D&ID=%s&_countlimit=4&autorun=yes'
) % video_id
# get enc data
enc_data = self._download_webpage(xmldata_url, video_id).split('=')[1]
enc_data_b = binascii.unhexlify(enc_data)
sk = self.__md5(
base64.b64decode(base64.b64decode(GK)) +
self.__md5(
str(video_id).encode('utf-8')
)
)
dec_data = self.__rc4crypt(enc_data_b, sk)
# extracting infos
self.report_extraction(video_id)
video_url = None
mobj = re.search('connectionurl=\'(.*?)\'', dec_data)
if mobj:
video_url = compat_urllib_parse.unquote(mobj.group(1))
if 'myvideo2flash' in video_url:
self.report_warning(
'Rewriting URL to use unencrypted rtmp:// ...',
video_id)
video_url = video_url.replace('rtmpe://', 'rtmp://')
if not video_url:
# extract non rtmp videos
mobj = re.search('path=\'(http.*?)\' source=\'(.*?)\'', dec_data)
if mobj is None:
raise ExtractorError('unable to extract url')
video_url = compat_urllib_parse.unquote(mobj.group(1)) + compat_urllib_parse.unquote(mobj.group(2))
video_file = self._search_regex('source=\'(.*?)\'', dec_data, 'video file')
video_file = compat_urllib_parse.unquote(video_file)
if not video_file.endswith('f4m'):
ppath, prefix = video_file.split('.')
video_playpath = '%s:%s' % (prefix, ppath)
else:
video_playpath = ''
video_swfobj = self._search_regex('swfobject.embedSWF\(\'(.+?)\'', webpage, 'swfobj')
video_swfobj = compat_urllib_parse.unquote(video_swfobj)
video_title = self._html_search_regex("<h1(?: class='globalHd')?>(.*?)</h1>",
webpage, 'title')
return {
'id': video_id,
'url': video_url,
'tc_url': video_url,
'title': video_title,
'ext': 'flv',
'play_path': video_playpath,
'player_url': video_swfobj,
}
|
CAPTools/CAPCollector | refs/heads/master | core/migrations/0004_geocodepreviewpolygon.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0003_alert_updated'),
]
operations = [
migrations.CreateModel(
name='GeocodePreviewPolygon',
fields=[
('id', models.CharField(max_length=255, serialize=False, verbose_name=b'ID', primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Creation time')),
('last_modified_at', models.DateTimeField(auto_now=True, verbose_name='Last modification time')),
('content', models.TextField(verbose_name='Polygons')),
],
options={
'verbose_name': 'Geocode Preview Polygon',
'verbose_name_plural': 'Geocode Preview Polygon',
},
),
]
|
gabrielfalcao/lettuce | refs/heads/master | tests/integration/lib/Django-1.2.5/tests/regressiontests/forms/localflavor/ar.py | 87 | from django.contrib.localflavor.ar.forms import (ARProvinceSelect,
ARPostalCodeField, ARDNIField, ARCUITField)
from utils import LocalFlavorTestCase
class ARLocalFlavorTests(LocalFlavorTestCase):
def test_ARProvinceSelect(self):
f = ARProvinceSelect()
out = u'''<select name="provincias">
<option value="B">Buenos Aires</option>
<option value="K">Catamarca</option>
<option value="H">Chaco</option>
<option value="U">Chubut</option>
<option value="C">Ciudad Aut\xf3noma de Buenos Aires</option>
<option value="X">C\xf3rdoba</option>
<option value="W">Corrientes</option>
<option value="E">Entre R\xedos</option>
<option value="P">Formosa</option>
<option value="Y">Jujuy</option>
<option value="L">La Pampa</option>
<option value="F">La Rioja</option>
<option value="M">Mendoza</option>
<option value="N">Misiones</option>
<option value="Q">Neuqu\xe9n</option>
<option value="R">R\xedo Negro</option>
<option value="A" selected="selected">Salta</option>
<option value="J">San Juan</option>
<option value="D">San Luis</option>
<option value="Z">Santa Cruz</option>
<option value="S">Santa Fe</option>
<option value="G">Santiago del Estero</option>
<option value="V">Tierra del Fuego, Ant\xe1rtida e Islas del Atl\xe1ntico Sur</option>
<option value="T">Tucum\xe1n</option>
</select>'''
self.assertEqual(f.render('provincias', 'A'), out)
def test_ARPostalCodeField(self):
error_format = [u'Enter a postal code in the format NNNN or ANNNNAAA.']
error_atmost = [u'Ensure this value has at most 8 characters (it has 9).']
error_atleast = [u'Ensure this value has at least 4 characters (it has 3).']
valid = {
'5000': '5000',
'C1064AAB': 'C1064AAB',
'c1064AAB': 'C1064AAB',
'C1064aab': 'C1064AAB',
'4400': '4400',
u'C1064AAB': 'C1064AAB',
}
invalid = {
'C1064AABB': error_atmost + error_format,
'C1064AA': error_format,
'C1064AB': error_format,
'106AAB': error_format,
'500': error_atleast + error_format,
'5PPP': error_format,
}
self.assertFieldOutput(ARPostalCodeField, valid, invalid)
def test_ARDNIField(self):
error_length = [u'This field requires 7 or 8 digits.']
error_digitsonly = [u'This field requires only numbers.']
valid = {
'20123456': '20123456',
'20.123.456': '20123456',
u'20123456': '20123456',
u'20.123.456': '20123456',
'20.123456': '20123456',
'9123456': '9123456',
'9.123.456': '9123456',
}
invalid = {
'101234566': error_length,
'W0123456': error_digitsonly,
'10,123,456': error_digitsonly,
}
self.assertFieldOutput(ARDNIField, valid, invalid)
def test_ARCUITField(self):
error_format = [u'Enter a valid CUIT in XX-XXXXXXXX-X or XXXXXXXXXXXX format.']
error_invalid = [u'Invalid CUIT.']
valid = {
'20-10123456-9': '20-10123456-9',
u'20-10123456-9': '20-10123456-9',
'27-10345678-4': '27-10345678-4',
'20101234569': '20-10123456-9',
'27103456784': '27-10345678-4',
}
invalid = {
'2-10123456-9': error_format,
'210123456-9': error_format,
'20-10123456': error_format,
'20-10123456-': error_format,
'20-10123456-5': error_invalid,
'2-10123456-9': error_format,
'27-10345678-1': error_invalid,
u'27-10345678-1': error_invalid,
}
self.assertFieldOutput(ARCUITField, valid, invalid)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.