gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""Collection of builds related views
"""
import json
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.template.loader import render_to_string
import common
from helpers import builds_helper, systems_helper, tags_helper, environs_helper, deploys_helper
import random
import logging
log = logging.getLogger(__name__)
def builds_landing(request):
return get_build_names(request)
def get_build_names(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', common.DEFAULT_BUILD_SIZE))
build_names = builds_helper.get_build_names(request, start=index, size=size)
return render(request, 'builds/build_names.html', {
'build_names': build_names,
"pageIndex": index,
"pageSize": common.DEFAULT_BUILD_SIZE,
"disablePrevious": index <= 1,
"disableNext": len(build_names) < common.DEFAULT_BUILD_SIZE,
})
def get_build(request, id):
info = builds_helper.get_build_and_tag(request, id)
tag = info.get("tag")
if tag:
tag["build"]=json.loads(tag["metaInfo"])
return render(request, 'builds/build_details.html', {
"build": info["build"],
"tag": tag
})
def list_builds(request, name):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', common.DEFAULT_BUILD_SIZE))
builds = builds_helper.get_builds_and_tags(request, name=name, pageIndex=index, pageSize=size)
return render(request, 'builds/builds.html', {
'build_name': name,
'builds': builds,
"pageIndex": index,
"pageSize": common.DEFAULT_BUILD_SIZE,
"disablePrevious": index <= 1,
"disableNext": len(builds) < common.DEFAULT_BUILD_SIZE,
})
def get_all_builds(request):
name = request.GET.get('name')
branch = request.GET.get('branch')
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', common.DEFAULT_BUILD_SIZE))
builds = builds_helper.get_builds_and_tags(request, name=name, branch=branch, pageIndex=index,
pageSize=size)
deploy_state = None
current_build_id = request.GET.get('current_build_id', None)
override_policy = request.GET.get('override_policy')
deploy_id = request.GET.get('deploy_id')
current_build = None
scmType = ""
if current_build_id:
current_build = builds_helper.get_build_and_tag(request, current_build_id)
current_build = current_build.get('build')
scmType = current_build.get('type')
if deploy_id:
deploy_config = deploys_helper.get(request, deploy_id)
if deploy_config:
deploy_state = deploy_config.get('state', None)
scm_url = systems_helper.get_scm_url(request, scmType)
html = render_to_string('builds/pick_a_build.tmpl', {
"builds": builds,
"current_build": current_build,
"scm_url": scm_url,
"buildName": name,
"branch": branch,
"pageIndex": index,
"pageSize": common.DEFAULT_BUILD_SIZE,
"disablePrevious": index <= 1,
"disableNext": len(builds) < common.DEFAULT_BUILD_SIZE,
"overridePolicy": override_policy,
"deployState": deploy_state,
})
return HttpResponse(html)
# currently we only support search by git commit or SHA, 7 letters or longer
def search_commit(request, commit):
builds = builds_helper.get_builds_and_tags(request, commit=commit)
return render(request, 'builds/builds_by_commit.html', {
'commit': commit,
'builds': builds,
})
def list_build_branches(request, name):
branches = builds_helper.get_branches(request, name=name)
return HttpResponse(json.dumps(branches), content_type="application/json")
def get_more_commits(request):
startSha = request.GET.get('start_sha')
endSha = request.GET.get('end_sha')
repo = request.GET.get('repo')
scm = request.GET.get('scm')
commits, truncated, new_start_sha = common.get_commits_batch(request, scm, repo,
startSha, endSha,
keep_first=False)
show_checkbox_str = request.GET.get('show_checkbox', 'False')
show_checkbox = show_checkbox_str.lower() == 'true'
pagination_id = random.randint(0, 1000000)
rows = render_to_string('builds/commit_rows.tmpl', {
"commits": commits,
"show_checkbox": show_checkbox,
"pagination_id": pagination_id
})
return HttpResponse(json.dumps({'rows': rows, 'new_start_sha': new_start_sha,
'truncated': truncated}),
content_type="application/json")
def compare_commits(request):
startSha = request.GET.get('start_sha')
endSha = request.GET.get('end_sha')
repo = request.GET.get('repo')
scm = request.GET.get('scm')
commits, truncated, new_start_sha = common.get_commits_batch(request, scm, repo,
startSha, endSha,
keep_first=True)
html = render_to_string('builds/commits.tmpl', {
"commits": commits,
"start_sha": new_start_sha,
"end_sha": endSha,
"repo": repo,
"scm": scm,
"truncated": truncated,
"show_checkbox": False,
})
return HttpResponse(html)
def compare_commits_datatables(request):
startSha = request.GET.get('start_sha')
endSha = request.GET.get('end_sha')
repo = request.GET.get('repo')
scm = request.GET.get('scm')
commits, truncated, new_start_sha = common.get_commits_batch(request, scm, repo,
startSha, endSha,
size=2000,
keep_first=True)
html = render_to_string('builds/show_commits.tmpl', {
"commits": commits,
"start_sha": new_start_sha,
"end_sha": endSha,
"repo": repo,
"scm": scm,
"truncated": truncated,
"show_checkbox": False,
})
return HttpResponse(html)
def tag_build(request, id):
if request.method == "POST":
build_info = builds_helper.get_build_and_tag(request, id)
current_tag = build_info.get("tag")
if current_tag:
tagged_build = json.loads(current_tag["metaInfo"])
if tagged_build["id"] == id:
log.info("There is already a tag associated with the build. Remove it")
builds_helper.del_build_tag(request, current_tag["id"])
tag = {"targetId":id, "targetType":"Build", "comments":request.POST["comments"]}
value = request.POST["tag_value"]
if value.lower() == "good":
tag["value"] = tags_helper.TagValue.GOOD_BUILD
elif value.lower()=="bad":
tag["value"] = tags_helper.TagValue.BAD_BUILD
else:
return HttpResponse(status=400)
builds_helper.set_build_tag(request, tag)
return redirect("/builds/{0}/".format(id))
else:
return HttpResponse(status=405)
| |
import sys
import uuid
import gam
from gam.var import *
from gam import controlflow
from gam import display
from gam import gapi
from gam.gapi import directory as gapi_directory
from gam import utils
def delete():
cd = gapi_directory.build()
resourceId = sys.argv[3]
gapi.call(cd.mobiledevices(),
'delete',
resourceId=resourceId,
customerId=GC_Values[GC_CUSTOMER_ID])
def info():
cd = gapi_directory.build()
resourceId = sys.argv[3]
device_info = gapi.call(cd.mobiledevices(),
'get',
customerId=GC_Values[GC_CUSTOMER_ID],
resourceId=resourceId)
if 'deviceId' in device_info:
device_info['deviceId'] = device_info['deviceId'].encode('unicode-escape').decode(
UTF8)
attrib = 'securityPatchLevel'
if attrib in device_info and int(device_info[attrib]):
device_info[attrib] = utils.formatTimestampYMDHMS(device_info[attrib])
display.print_json(device_info)
def print_():
cd = gapi_directory.build()
todrive = False
titles = []
csvRows = []
fields = None
projection = orderBy = sortOrder = None
queries = [None]
delimiter = ' '
listLimit = 1
appsLimit = -1
i = 3
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'todrive':
todrive = True
i += 1
elif myarg in ['query', 'queries']:
queries = gam.getQueries(myarg, sys.argv[i + 1])
i += 2
elif myarg == 'delimiter':
delimiter = sys.argv[i + 1]
i += 2
elif myarg == 'listlimit':
listLimit = gam.getInteger(sys.argv[i + 1], myarg, minVal=-1)
i += 2
elif myarg == 'appslimit':
appsLimit = gam.getInteger(sys.argv[i + 1], myarg, minVal=-1)
i += 2
elif myarg == 'fields':
fields = f'nextPageToken,mobiledevices({sys.argv[i+1]})'
i += 2
elif myarg == 'orderby':
orderBy = sys.argv[i + 1].lower()
validOrderBy = [
'deviceid', 'email', 'lastsync', 'model', 'name', 'os',
'status', 'type'
]
if orderBy not in validOrderBy:
controlflow.expected_argument_exit('orderby',
', '.join(validOrderBy),
orderBy)
if orderBy == 'lastsync':
orderBy = 'lastSync'
elif orderBy == 'deviceid':
orderBy = 'deviceId'
i += 2
elif myarg in SORTORDER_CHOICES_MAP:
sortOrder = SORTORDER_CHOICES_MAP[myarg]
i += 1
elif myarg in PROJECTION_CHOICES_MAP:
projection = PROJECTION_CHOICES_MAP[myarg]
i += 1
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam print mobile')
for query in queries:
gam.printGettingAllItems('Mobile Devices', query)
page_message = gapi.got_total_items_msg('Mobile Devices', '...\n')
all_mobile = gapi.get_all_pages(cd.mobiledevices(),
'list',
'mobiledevices',
page_message=page_message,
customerId=GC_Values[GC_CUSTOMER_ID],
query=query,
projection=projection,
fields=fields,
orderBy=orderBy,
sortOrder=sortOrder)
for mobile in all_mobile:
row = {}
for attrib in mobile:
if attrib in ['kind', 'etag']:
continue
if attrib in ['name', 'email', 'otherAccountsInfo']:
if attrib not in titles:
titles.append(attrib)
if listLimit > 0:
row[attrib] = delimiter.join(
mobile[attrib][0:listLimit])
elif listLimit == 0:
row[attrib] = delimiter.join(mobile[attrib])
elif attrib == 'applications':
if appsLimit >= 0:
if attrib not in titles:
titles.append(attrib)
applications = []
j = 0
for app in mobile[attrib]:
j += 1
if appsLimit and (j > appsLimit):
break
appDetails = []
for field in [
'displayName', 'packageName', 'versionName'
]:
appDetails.append(app.get(field, '<None>'))
appDetails.append(
str(app.get('versionCode', '<None>')))
permissions = app.get('permission', [])
if permissions:
appDetails.append('/'.join(permissions))
else:
appDetails.append('<None>')
applications.append('-'.join(appDetails))
row[attrib] = delimiter.join(applications)
else:
if attrib not in titles:
titles.append(attrib)
if attrib == 'deviceId':
row[attrib] = mobile[attrib].encode(
'unicode-escape').decode(UTF8)
elif attrib == 'securityPatchLevel' and int(mobile[attrib]):
row[attrib] = utils.formatTimestampYMDHMS(
mobile[attrib])
else:
row[attrib] = mobile[attrib]
csvRows.append(row)
display.sort_csv_titles(
['resourceId', 'deviceId', 'serialNumber', 'name', 'email', 'status'],
titles)
display.write_csv_file(csvRows, titles, 'Mobile', todrive)
def update():
cd = gapi_directory.build()
resourceIds = sys.argv[3]
match_users = None
doit = False
if resourceIds[:6] == 'query:':
query = resourceIds[6:]
fields = 'nextPageToken,mobiledevices(resourceId,email)'
page_message = gapi.got_total_items_msg('Mobile Devices', '...\n')
devices = gapi.get_all_pages(cd.mobiledevices(),
'list',
page_message=page_message,
customerId=GC_Values[GC_CUSTOMER_ID],
items='mobiledevices',
query=query,
fields=fields)
else:
devices = [{'resourceId': resourceIds, 'email': ['not set']}]
doit = True
i = 4
body = {}
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'action':
body['action'] = sys.argv[i + 1].lower()
validActions = [
'wipe', 'wipeaccount', 'accountwipe', 'wipe_account',
'account_wipe', 'approve', 'block',
'cancel_remote_wipe_then_activate',
'cancel_remote_wipe_then_block'
]
if body['action'] not in validActions:
controlflow.expected_argument_exit('action',
', '.join(validActions),
body['action'])
if body['action'] == 'wipe':
body['action'] = 'admin_remote_wipe'
elif body['action'].replace('_',
'') in ['accountwipe', 'wipeaccount']:
body['action'] = 'admin_account_wipe'
i += 2
elif myarg in ['ifusers', 'matchusers']:
match_users = gam.getUsersToModify(entity_type=sys.argv[i + 1].lower(),
entity=sys.argv[i + 2])
i += 3
elif myarg == 'doit':
doit = True
i += 1
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam update mobile')
if body:
if doit:
print(f'Updating {len(devices)} devices')
describe_as = 'Performing'
else:
print(
f'Showing {len(devices)} changes that would be made, not actually making changes because doit argument not specified'
)
describe_as = 'Would perform'
for device in devices:
device_user = device.get('email', [''])[0]
if match_users and device_user not in match_users:
print(
f'Skipping device for user {device_user} that did not match match_users argument'
)
else:
print(
f'{describe_as} {body["action"]} on user {device_user} device {device["resourceId"]}'
)
if doit:
gapi.call(cd.mobiledevices(),
'action',
resourceId=device['resourceId'],
body=body,
customerId=GC_Values[GC_CUSTOMER_ID])
| |
import numpy
import chainer
from chainer.backends import cuda
from chainer.functions.activation import lstm
from chainer.functions.array import reshape
from chainer.functions.array import stack
from chainer.functions.connection import linear
from chainer.functions.connection import n_step_rnn
from chainer.functions.connection.n_step_rnn import get_random_state
from chainer.utils import argument
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.cuda.cudnn
def _stack_weight(ws):
# TODO(unno): Input of the current LSTM implementaiton is shuffled
w = stack.stack(ws, axis=1)
shape = w.shape
return reshape.reshape(w, (shape[0] * shape[1],) + shape[2:])
class NStepLSTM(n_step_rnn.BaseNStepRNN):
def __init__(self, n_layers, states, lengths):
n_step_rnn.BaseNStepRNN.__init__(
self, n_layers, states, lengths,
rnn_dir='uni', rnn_mode='lstm')
class NStepBiLSTM(n_step_rnn.BaseNStepRNN):
def __init__(self, n_layers, states, lengths):
n_step_rnn.BaseNStepRNN.__init__(
self, n_layers, states, lengths,
rnn_dir='bi', rnn_mode='lstm')
def n_step_lstm(
n_layers, dropout_ratio, hx, cx, ws, bs, xs, **kwargs):
"""n_step_lstm(n_layers, dropout_ratio, hx, cx, ws, bs, xs)
Stacked Uni-directional Long Short-Term Memory function.
This function calculates stacked Uni-directional LSTM with sequences.
This function gets an initial hidden state :math:`h_0`, an initial cell
state :math:`c_0`, an input sequence :math:`x`, weight matrices :math:`W`,
and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
i_t &= \\sigma(W_0 x_t + W_4 h_{t-1} + b_0 + b_4) \\\\
f_t &= \\sigma(W_1 x_t + W_5 h_{t-1} + b_1 + b_5) \\\\
o_t &= \\sigma(W_2 x_t + W_6 h_{t-1} + b_2 + b_6) \\\\
a_t &= \\tanh(W_3 x_t + W_7 h_{t-1} + b_3 + b_7) \\\\
c_t &= f_t \\cdot c_{t-1} + i_t \\cdot a_t \\\\
h_t &= o_t \\cdot \\tanh(c_t)
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Eight weight matrices and eight bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`8S` weigth matrices and :math:`8S` bias vectors.
If the number of layers ``n_layers`` is greater than :math:`1`, the input
of the ``k``-th layer is the hidden state ``h_t`` of the ``k-1``-th layer.
Note that all input variables except the first layer may have different
shape from the first layer.
.. warning::
``train`` and ``use_cudnn`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('use_cudnn', use_cudnn)`` respectively.
See :func:`chainer.using_config`.
Args:
n_layers(int): The number of layers.
dropout_ratio(float): Dropout ratio.
hx (~chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units.
cx (~chainer.Variable): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[i]`` represents the weights for the i-th layer.
Each ``ws[i]`` is a list containing eight matrices.
``ws[i][j]`` corresponds to :math:`W_j` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 4`` are ``(I, N)``-shaped as
they are multiplied with input variables, where ``I`` is the size
of the input and ``N`` is the dimension of the hidden units. All
other matrices are ``(N, N)``-shaped.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[i]`` represents the biases for the i-th layer.
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)`` where ``N`` is the dimension
of the hidden units.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``. The sequences must be transposed.
:func:`~chainer.functions.transpose_sequence` can be used to
transpose a list of :class:`~chainer.Variable`\\ s each
representing a sequence.
When sequences has different lengths, they must be
sorted in descending order of their lengths before transposing.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
the mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
.. note::
The dimension of hidden units is limited to only one size ``N``. If you
want to use variable dimension of hidden units, please use
:class:`chainer.functions.lstm`.
.. seealso::
:func:`chainer.functions.lstm`
.. admonition:: Example
>>> batchs = [3, 2, 1] # support variable length sequences
>>> in_size, out_size, n_layers = 3, 2, 2
>>> dropout_ratio = 0.0
>>> xs = [np.ones((b, in_size)).astype(np.float32) for b in batchs]
>>> [x.shape for x in xs]
[(3, 3), (2, 3), (1, 3)]
>>> h_shape = (n_layers, batchs[0], out_size)
>>> hx = np.ones(h_shape).astype(np.float32)
>>> cx = np.ones(h_shape).astype(np.float32)
>>> w_in = lambda i, j: in_size if i == 0 and j < 4 else out_size
>>> ws = []
>>> bs = []
>>> for n in range(n_layers):
... ws.append([np.ones((out_size, w_in(n, i))).astype(np.float32) \
for i in range(8)])
... bs.append([np.ones((out_size,)).astype(np.float32) \
for _ in range(8)])
...
>>> ws[0][0].shape # ws[0][:4].shape are (out_size, in_size)
(2, 3)
>>> ws[1][0].shape # others are (out_size, out_size)
(2, 2)
>>> bs[0][0].shape
(2,)
>>> hy, cy, ys = F.n_step_lstm(
... n_layers, dropout_ratio, hx, cx, ws, bs, xs)
>>> hy.shape
(2, 3, 2)
>>> cy.shape
(2, 3, 2)
>>> [y.shape for y in ys]
[(3, 2), (2, 2), (1, 2)]
"""
return n_step_lstm_base(n_layers, dropout_ratio, hx, cx, ws, bs, xs,
use_bi_direction=False, **kwargs)
def n_step_bilstm(
n_layers, dropout_ratio, hx, cx, ws, bs, xs, **kwargs):
"""n_step_bilstm(n_layers, dropout_ratio, hx, cx, ws, bs, xs)
Stacked Bi-directional Long Short-Term Memory function.
This function calculates stacked Bi-directional LSTM with sequences.
This function gets an initial hidden state :math:`h_0`, an initial cell
state :math:`c_0`, an input sequence :math:`x`, weight matrices :math:`W`,
and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
i^{f}_t &=& \\sigma(W^{f}_0 x_t + W^{f}_4 h_{t-1} + b^{f}_0 + b^{f}_4),
\\\\
f^{f}_t &=& \\sigma(W^{f}_1 x_t + W^{f}_5 h_{t-1} + b^{f}_1 + b^{f}_5),
\\\\
o^{f}_t &=& \\sigma(W^{f}_2 x_t + W^{f}_6 h_{t-1} + b^{f}_2 + b^{f}_6),
\\\\
a^{f}_t &=& \\tanh(W^{f}_3 x_t + W^{f}_7 h_{t-1} + b^{f}_3 + b^{f}_7),
\\\\
c^{f}_t &=& f^{f}_t \\cdot c^{f}_{t-1} + i^{f}_t \\cdot a^{f}_t,
\\\\
h^{f}_t &=& o^{f}_t \\cdot \\tanh(c^{f}_t),
\\\\
i^{b}_t &=& \\sigma(W^{b}_0 x_t + W^{b}_4 h_{t-1} + b^{b}_0 + b^{b}_4),
\\\\
f^{b}_t &=& \\sigma(W^{b}_1 x_t + W^{b}_5 h_{t-1} + b^{b}_1 + b^{b}_5),
\\\\
o^{b}_t &=& \\sigma(W^{b}_2 x_t + W^{b}_6 h_{t-1} + b^{b}_2 + b^{b}_6),
\\\\
a^{b}_t &=& \\tanh(W^{b}_3 x_t + W^{b}_7 h_{t-1} + b^{b}_3 + b^{b}_7),
\\\\
c^{b}_t &=& f^{b}_t \\cdot c^{b}_{t-1} + i^{b}_t \\cdot a^{b}_t, \\\\
h^{b}_t &=& o^{b}_t \\cdot \\tanh(c^{b}_t), \\\\
h_t &=& [h^{f}_t; h^{b}_t]
where :math:`W^{f}` is the weight matrices for forward-LSTM, :math:`W^{b}`
is weight matrices for backward-LSTM.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Eight weight matrices and eight bias vectors are
required for each layer of each direction. So, when :math:`S` layers
exist, you need to prepare :math:`16S` weigth matrices and :math:`16S`
bias vectors.
If the number of layers ``n_layers`` is greater than :math:`1`, the input
of the ``k``-th layer is the hidden state ``h_t`` of the ``k-1``-th layer.
Note that all input variables except the first layer may have different
shape from the first layer.
.. warning::
``train`` and ``use_cudnn`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('use_cudnn', use_cudnn)`` respectively.
See :func:`chainer.using_config`.
Args:
n_layers(int): The number of layers.
dropout_ratio(float): Dropout ratio.
hx (~chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(2S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units. Because of bi-direction, the
first dimension length is ``2S``.
cx (~chainer.Variable): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[2 * l + m]`` represents the weights for the l-th layer of
the m-th direction. (``m == 0`` means the forward direction and
``m == 1`` means the backward direction.) Each ``ws[i]`` is a
list containing eight matrices. ``ws[i][j]`` corresponds to
:math:`W_j` in the equation. ``ws[0][j]`` and ``ws[1][j]`` where
``0 <= j < 4`` are ``(I, N)``-shaped because they are multiplied
with input variables, where ``I`` is the size of the input.
``ws[i][j]`` where ``2 <= i`` and ``0 <= j < 4`` are
``(N, 2N)``-shaped because they are multiplied with two hidden
layers :math:`h_t = [h^{f}_t; h^{b}_t]`. All other matrices are
``(N, N)``-shaped.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[2 * l + m]`` represents the weights for the l-th layer of
m-th direction. (``m == 0`` means the forward direction and
``m == 1`` means the backward direction.)
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)``.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``. The sequences must be transposed.
:func:`~chainer.functions.transpose_sequence` can be used to
transpose a list of :class:`~chainer.Variable`\\ s each
representing a sequence.
When sequences has different lengths, they must be
sorted in descending order of their lengths before transposing.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, 2N)`` where ``B_t``
is the mini-batch size for time ``t``, and ``N`` is size of
hidden units. Note that ``B_t`` is the same value as ``xs[t]``.
.. admonition:: Example
>>> batchs = [3, 2, 1] # support variable length sequences
>>> in_size, out_size, n_layers = 3, 2, 2
>>> dropout_ratio = 0.0
>>> xs = [np.ones((b, in_size)).astype(np.float32) for b in batchs]
>>> [x.shape for x in xs]
[(3, 3), (2, 3), (1, 3)]
>>> h_shape = (n_layers * 2, batchs[0], out_size)
>>> hx = np.ones(h_shape).astype(np.float32)
>>> cx = np.ones(h_shape).astype(np.float32)
>>> def w_in(i, j):
... if i == 0 and j < 4:
... return in_size
... elif i > 0 and j < 4:
... return out_size * 2
... else:
... return out_size
...
>>> ws = []
>>> bs = []
>>> for n in range(n_layers):
... for direction in (0, 1):
... ws.append([np.ones((out_size, w_in(n, i))).\
astype(np.float32) for i in range(8)])
... bs.append([np.ones((out_size,)).astype(np.float32) \
for _ in range(8)])
...
>>> ws[0][0].shape # ws[0:2][:4].shape are (out_size, in_size)
(2, 3)
>>> ws[2][0].shape # ws[2:][:4].shape are (out_size, 2 * out_size)
(2, 4)
>>> ws[0][4].shape # others are (out_size, out_size)
(2, 2)
>>> bs[0][0].shape
(2,)
>>> hy, cy, ys = F.n_step_bilstm(
... n_layers, dropout_ratio, hx, cx, ws, bs, xs)
>>> hy.shape
(4, 3, 2)
>>> cy.shape
(4, 3, 2)
>>> [y.shape for y in ys]
[(3, 4), (2, 4), (1, 4)]
"""
return n_step_lstm_base(n_layers, dropout_ratio, hx, cx, ws, bs, xs,
use_bi_direction=True, **kwargs)
def n_step_lstm_base(
n_layers, dropout_ratio, hx, cx, ws, bs, xs, use_bi_direction,
**kwargs):
"""Base function for Stack LSTM/BiLSTM functions.
This function is used at :func:`chainer.functions.n_step_lstm` and
:func:`chainer.functions.n_step_bilstm`.
This function's behavior depends on following arguments,
``activation`` and ``use_bi_direction``.
Args:
n_layers(int): The number of layers.
dropout_ratio(float): Dropout ratio.
hx (~chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units.
cx (~chainer.Variable): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[i]`` represents the weights for the i-th layer.
Each ``ws[i]`` is a list containing eight matrices.
``ws[i][j]`` corresponds to :math:`W_j` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 4`` are ``(I, N)``-shape as they
are multiplied with input variables, where ``I`` is the size of
the input and ``N`` is the dimension of the hidden units. All
other matrices are ``(N, N)``-shaped.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[i]`` represents the biases for the i-th layer.
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)``.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``. The sequences must be transposed.
:func:`~chainer.functions.transpose_sequence` can be used to
transpose a list of :class:`~chainer.Variable`\\ s each
representing a sequence.
When sequences has different lengths, they must be
sorted in descending order of their lengths before transposing.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
use_bi_direction (bool): If ``True``, this function uses Bi-directional
LSTM.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
the mini-batch size for time ``t``. Note that ``B_t`` is the same
value as ``xs[t]``.
.. seealso::
:func:`chainer.functions.n_step_lstm`
:func:`chainer.functions.n_step_bilstm`
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config',
use_cudnn='use_cudnn argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
xp = cuda.get_array_module(hx, hx.data)
if xp is not numpy and chainer.should_use_cudnn('>=auto', 5000):
states = get_random_state().create_dropout_states(dropout_ratio)
lengths = [len(x) for x in xs]
xs = chainer.functions.concat(xs, axis=0)
w = n_step_rnn.cudnn_rnn_weight_concat(
n_layers, states, use_bi_direction, 'lstm', ws, bs)
if use_bi_direction:
rnn = NStepBiLSTM
else:
rnn = NStepLSTM
hy, cy, ys = rnn(n_layers, states, lengths)(hx, cx, w, xs)
sections = numpy.cumsum(lengths[:-1])
ys = chainer.functions.split_axis(ys, sections, 0)
return hy, cy, ys
else:
return n_step_rnn.n_step_rnn_impl(
_lstm, n_layers, dropout_ratio, hx, cx, ws, bs, xs,
use_bi_direction)
def _lstm(x, h, c, w, b):
xw = _stack_weight([w[2], w[0], w[1], w[3]])
hw = _stack_weight([w[6], w[4], w[5], w[7]])
xb = _stack_weight([b[2], b[0], b[1], b[3]])
hb = _stack_weight([b[6], b[4], b[5], b[7]])
lstm_in = linear.linear(x, xw, xb) + linear.linear(h, hw, hb)
c_bar, h_bar = lstm.lstm(c, lstm_in)
return h_bar, c_bar
| |
"""
Machine learning chunker for CoNLL 2000
"""
__author__ = "Pierre Nugues"
import time
import conll_reader
from sklearn.feature_extraction import DictVectorizer
from sklearn import svm
from sklearn import linear_model
from sklearn import metrics
from sklearn import tree
from sklearn.naive_bayes import GaussianNB
from sklearn.grid_search import GridSearchCV
import pickle
def extract_features(sentences, w_size, feature_names):
"""
Builds X matrix and y vector
X is a list of dictionaries and y is a list
:param sentences:
:param w_size:
:return:
"""
X_l = []
y_l = []
for sentence in sentences:
X, y = extract_features_sent(sentence, w_size, feature_names)
X_l.extend(X)
y_l.extend(y)
return X_l, y_l
def extract_features_sent(sentence, w_size, feature_names, prediction=False):
"""
Extract the features from one sentence
returns X and y, where X is a list of dictionaries and
y is a list of symbols
:param sentence:
:param w_size:
:return:
"""
# We pad the sentence to extract the context window more easily
start = "BOS BOS BOS\n"
end = "\nEOS EOS EOS"
start *= w_size
end *= w_size
sentence = start + sentence
sentence += end
# Each sentence is a list of rows
sentence = sentence.splitlines()
padded_sentence = list()
for line in sentence:
line = line.split()
padded_sentence.append(line)
# print(padded_sentence)
# We extract the features and the classes
# X contains is a list of features, where each feature vector is a dictionary
# y is the list of classes
X = list()
y = list()
for i in range(len(padded_sentence) - 2 * w_size):
# x is a row of X
x = list()
# The words in lower case
for j in range(2 * w_size + 1):
x.append(padded_sentence[i + j][0].lower())
# The POS
for j in range(2 * w_size + 1):
x.append(padded_sentence[i + j][1])
# The chunks (Up to the word)
if not prediction:
for j in range(w_size):
x.append(padded_sentence[i + j][2])
# We represent the feature vector as a dictionary
X.append(dict(zip(feature_names, x)))
#print(dict(zip(feature_names, x)))
# The classes are stored in a list
y.append(padded_sentence[i + w_size][2])
return X, y
def encode_classes(y_symbols):
"""
Encode the classes as numbers
:param y_symbols:
:return: the y vector and the lookup dictionaries
"""
# We extract the chunk names
classes = sorted(list(set(y_symbols)))
"""
Results in:
['B-ADJP', 'B-ADVP', 'B-CONJP', 'B-INTJ', 'B-LST', 'B-NP', 'B-PP',
'B-PRT', 'B-SBAR', 'B-UCP', 'B-VP', 'I-ADJP', 'I-ADVP', 'I-CONJP',
'I-INTJ', 'I-NP', 'I-PP', 'I-PRT', 'I-SBAR', 'I-UCP', 'I-VP', 'O']
"""
# We assign each name a number
dict_classes = dict(enumerate(classes))
"""
Results in:
{0: 'B-ADJP', 1: 'B-ADVP', 2: 'B-CONJP', 3: 'B-INTJ', 4: 'B-LST',
5: 'B-NP', 6: 'B-PP', 7: 'B-PRT', 8: 'B-SBAR', 9: 'B-UCP', 10: 'B-VP',
11: 'I-ADJP', 12: 'I-ADVP', 13: 'I-CONJP', 14: 'I-INTJ',
15: 'I-NP', 16: 'I-PP', 17: 'I-PRT', 18: 'I-SBAR',
19: 'I-UCP', 20: 'I-VP', 21: 'O'}
"""
# We build an inverted dictionary
inv_dict_classes = {v: k for k, v in dict_classes.items()}
"""
Results in:
{'B-SBAR': 8, 'I-NP': 15, 'B-PP': 6, 'I-SBAR': 18, 'I-PP': 16, 'I-ADVP': 12,
'I-INTJ': 14, 'I-PRT': 17, 'I-CONJP': 13, 'B-ADJP': 0, 'O': 21,
'B-VP': 10, 'B-PRT': 7, 'B-ADVP': 1, 'B-LST': 4, 'I-UCP': 19,
'I-VP': 20, 'B-NP': 5, 'I-ADJP': 11, 'B-CONJP': 2, 'B-INTJ': 3, 'B-UCP': 9}
"""
# We convert y_symbols into a numerical vector
y = [inv_dict_classes[i] for i in y_symbols]
return y, dict_classes, inv_dict_classes
def predict(test_sentences, feature_names, f_out):
for test_sentence in test_sentences:
y_test_predicted_symbols = ['BOS', 'BOS']
X_test_dict, y_test_symbols = extract_features_sent(test_sentence, w_size, feature_names,True)
for x in X_test_dict:
x["s_n2"] = y_test_predicted_symbols[-2]
x["s_n1"] = y_test_predicted_symbols[-1]
#print('chunks', x)
# Vectorize the test sentence and one hot encoding
X_test = vec.transform(x)
# Predicts the chunks and returns numbers
y_test_predicted = classifier.predict(X_test)
# Converts to chunk names
y_test_predicted_symbol = dict_classes[y_test_predicted[0]]
y_test_predicted_symbols.append(y_test_predicted_symbol)
#print(y_test_predicted_symbols)
# Appends the predicted chunks as a last column and saves the rows
y_test_predicted_symbols = y_test_predicted_symbols[2:]
rows = test_sentence.splitlines()
rows = [rows[i] + ' ' + y_test_predicted_symbols[i] for i in range(len(rows))]
for row in rows:
f_out.write(row + '\n')
f_out.write('\n')
f_out.close()
if __name__ == '__main__':
start_time = time.clock()
train_corpus = './train.txt'
test_corpus = './test.txt'
w_size = 2 # The size of the context window to the left and right of the word
feature_names = ['word_n2', 'word_n1', 'word', 'word_p1', 'word_p2',
'pos_n2', 'pos_n1', 'pos', 'pos_p1', 'pos_p2','s_n1','s_n2']
train_sentences = conll_reader.read_sentences(train_corpus)
print("Extracting the features...")
X_dict, y_symbols = extract_features(train_sentences, w_size, feature_names)
print("Encoding the features and classes...")
# Vectorize the feature matrix and carry out a one-hot encoding
vec = DictVectorizer(sparse=True)
X = vec.fit_transform(X_dict)
# The statement below will swallow a considerable memory
# X = vec.fit_transform(X_dict).toarray()
# print(vec.get_feature_names())
y, dict_classes, inv_dict_classes = encode_classes(y_symbols)
training_start_time = time.clock()
print("Training the model...")
classifier = linear_model.LogisticRegression(penalty='l2', dual=True, solver='liblinear')
try:
classifier = pickle.load(open("clf" + ".sav", "rb"))
except FileNotFoundError:
classifier.fit(X, y)
pickle.dump(classifier, open("clf" + ".sav", "wb"))
test_start_time = time.clock()
# We apply the model to the test set
test_sentences = list(conll_reader.read_sentences(test_corpus))
# Here we carry out a chunk tag prediction and we report the per tag error
# This is done for the whole corpus without regard for the sentence structure
print("Predicting the chunks in the test set...")
X_test_dict, y_test_symbols = extract_features(test_sentences, w_size, feature_names)
# Vectorize the test set and one-hot encoding
X_test = vec.transform(X_test_dict) # Possible to add: .toarray()
y_test = [inv_dict_classes[i] if i in y_symbols else 0 for i in y_test_symbols]
y_test_predicted = classifier.predict(X_test)
#print("test", y_test)
print("pred", y_test_predicted)
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(y_test, y_test_predicted)))
# Here we tag the test set and we save it.
# This prediction is redundant with the piece of code above,
# but we need to predict one sentence at a time to have the same
# corpus structure
print("Predicting the test set...")
f_out = open('out', 'w')
predict(test_sentences, feature_names, f_out)
end_time = time.clock()
print("Training time:", (test_start_time - training_start_time) / 60)
print("Test time:", (end_time - test_start_time) / 60)
| |
#!/usr/bin/python
##############################################################################
#
# Copyright 2014 Realm Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
# In the lldb shell, load with:
# command script import [Realm path]/plugin/rlm_lldb.py --allow-reload
# To load automatically, add that line to your ~/.lldbinit file (which you will
# have to create if you have not set up any previous lldb scripts), or run this
# file as a Python script outside of Xcode to install it automatically
if __name__ == '__main__':
# Script is being run directly, so install it
import errno
import shutil
import os
source = os.path.realpath(__file__)
destination = os.path.expanduser("~/Library/Application Support/Realm")
# Copy the file into place
try:
os.makedirs(destination, 0744)
except os.error as e:
# It's fine if the directory already exists
if e.errno != errno.EEXIST:
raise
shutil.copy2(source, destination + '/rlm_lldb.py')
# Add it to ~/.lldbinit
load_line = 'command script import "~/Library/Application Support/Realm/rlm_lldb.py" --allow-reload\n'
is_installed = False
try:
with open(os.path.expanduser('~/.lldbinit')) as f:
for line in f:
if line == load_line:
is_installed = True
break
except IOError as e:
if e.errno != errno.ENOENT:
raise
# File not existing yet is fine
if not is_installed:
with open(os.path.expanduser('~/.lldbinit'), 'a') as f:
f.write('\n' + load_line)
exit(0)
import lldb
property_types = {
0: 'int64_t',
10: 'double',
1: 'bool',
9: 'float',
}
def cache_lookup(cache, key, generator):
value = cache.get(key, None)
if not value:
value = generator(key)
cache[key] = value
return value
ivar_cache = {}
def get_ivar_info(obj, ivar):
def get_offset(ivar):
class_name, ivar_name = ivar.split('.')
frame = obj.GetThread().GetSelectedFrame()
ptr = frame.EvaluateExpression("&(({} *)0)->{}".format(class_name, ivar_name))
return (ptr.GetValueAsUnsigned(), ptr.deref.type, ptr.deref.size)
return cache_lookup(ivar_cache, ivar, get_offset)
def get_ivar(obj, addr, ivar):
offset, _, size = get_ivar_info(obj, ivar)
if isinstance(addr, lldb.SBAddress):
addr = int(str(addr), 16)
return obj.GetProcess().ReadUnsignedFromMemory(addr + offset, size, lldb.SBError())
object_table_ptr_offset = None
def is_object_deleted(obj):
addr = int(str(obj.GetAddress()), 16)
global object_table_ptr_offset
if not object_table_ptr_offset:
row, _, _ = get_ivar_info(obj, 'RLMObject._row')
table, _, _ = get_ivar_info(obj, 'realm::Row.m_table')
ptr, _, _ = get_ivar_info(obj, 'realm::TableRef.m_ptr')
object_table_ptr_offset = row + table + ptr
ptr = obj.GetProcess().ReadUnsignedFromMemory(addr + object_table_ptr_offset,
obj.target.addr_size, lldb.SBError())
return ptr == 0
class SyntheticChildrenProvider(object):
def _eval(self, expr):
frame = self.obj.GetThread().GetSelectedFrame()
return frame.EvaluateExpression(expr)
def _get_ivar(self, addr, ivar):
return get_ivar(self.obj, addr, ivar)
def _to_str(self, val):
return self.obj.GetProcess().ReadCStringFromMemory(val, 1024, lldb.SBError())
def _value_from_ivar(self, ivar):
offset, ivar_type, _ = get_ivar_info(self.obj, 'RLMObject._' + ivar)
return self.obj.CreateChildAtOffset(ivar, offset, ivar_type)
def RLMObject_SummaryProvider(obj, _):
if is_object_deleted(obj):
return '[Deleted object]'
return None
schema_cache = {}
class RLMObject_SyntheticChildrenProvider(SyntheticChildrenProvider):
def __init__(self, obj, _):
self.obj = obj
if not obj.GetAddress() or is_object_deleted(obj):
self.props = []
return
object_schema = self._get_ivar(self.obj.GetAddress(), 'RLMObject._objectSchema')
self.bool_type = obj.GetTarget().FindFirstType('BOOL')
self.realm_type = obj.GetTarget().FindFirstType('RLMRealm')
self.object_schema_type = obj.GetTarget().FindFirstType('RLMObjectSchema')
def get_schema(object_schema):
properties = self._get_ivar(object_schema, 'RLMObjectSchema._properties')
if not properties:
return None
count = self._eval("(NSUInteger)[((NSArray *){}) count]".format(properties)).GetValueAsUnsigned()
return [self._get_prop(properties, i) for i in range(count)]
self.props = cache_lookup(schema_cache, object_schema, get_schema)
def num_children(self):
return len(self.props) + 2
def has_children(self):
return not is_object_deleted(self.obj)
def get_child_index(self, name):
if name == 'realm':
return 0
if name == 'objectSchema':
return 1
return next(i for i, (prop_name, _) in enumerate(self.props) if prop_name == name)
def get_child_at_index(self, index):
if index == 0:
return self._value_from_ivar('realm')
if index == 1:
return self._value_from_ivar('objectSchema')
name, getter = self.props[index - 2]
value = self._eval(getter)
return self.obj.CreateValueFromData(name, value.GetData(), value.GetType())
def update(self):
pass
def _get_prop(self, props, i):
prop = self._eval("(NSUInteger)[((NSArray *){}) objectAtIndex:{}]".format(props, i)).GetValueAsUnsigned()
name = self._to_str(self._eval('[(NSString *){} UTF8String]'.format(self._get_ivar(prop, "RLMProperty._name"))).GetValueAsUnsigned())
type = self._get_ivar(prop, 'RLMProperty._type')
getter = "({})[(id){} {}]".format(property_types.get(type, 'id'), self.obj.GetAddress(), name)
return name, getter
class_name_cache = {}
def get_object_class_name(frame, obj, addr, ivar):
class_name_ptr = get_ivar(obj, addr, ivar)
def get_class_name(ptr):
utf8_addr = frame.EvaluateExpression('(const char *)[(NSString *){} UTF8String]'.format(class_name_ptr)).GetValueAsUnsigned()
return obj.GetProcess().ReadCStringFromMemory(utf8_addr, 1024, lldb.SBError())
return cache_lookup(class_name_cache, class_name_ptr, get_class_name)
def RLMArray_SummaryProvider(obj, _):
frame = obj.GetThread().GetSelectedFrame()
class_name = get_object_class_name(frame, obj, obj.GetAddress(), 'RLMArray._objectClassName')
count = frame.EvaluateExpression('(NSUInteger)[(RLMArray *){} count]'.format(obj.GetAddress())).GetValueAsUnsigned()
return "({}[{}])".format(class_name, count)
def RLMResults_SummaryProvider(obj, _):
frame = obj.GetThread().GetSelectedFrame()
addr = int(str(obj.GetAddress()), 16)
class_name = get_object_class_name(frame, obj, addr, 'RLMResults._objectClassName')
view_created = get_ivar(obj, addr, 'RLMResults._viewCreated')
if not view_created:
return 'Unevaluated query on ' + class_name
count = frame.EvaluateExpression('(NSUInteger)[(RLMResults *){} count]'.format(obj.GetAddress())).GetValueAsUnsigned()
return "({}[{}])".format(class_name, count)
class RLMArray_SyntheticChildrenProvider(SyntheticChildrenProvider):
def __init__(self, valobj, _):
self.obj = valobj
self.addr = self.obj.GetAddress()
def num_children(self):
if not self.count:
self.count = self._eval("(NSUInteger)[(RLMArray *){} count]".format(self.addr)).GetValueAsUnsigned()
return self.count + 1
def has_children(self):
return True
def get_child_index(self, name):
if name == 'realm':
return 0
if not name.startswith('['):
return None
return int(name.lstrip('[').rstrip(']')) + 1
def get_child_at_index(self, index):
if index == 0:
return self._value_from_ivar('realm')
value = self._eval('(id)[(id){} objectAtIndex:{}]'.format(self.addr, index - 1))
return self.obj.CreateValueFromData('[' + str(index - 1) + ']', value.GetData(), value.GetType())
def update(self):
self.count = None
def __lldb_init_module(debugger, _):
debugger.HandleCommand('type summary add RLMArray -F rlm_lldb.RLMArray_SummaryProvider')
debugger.HandleCommand('type summary add RLMArrayLinkView -F rlm_lldb.RLMArray_SummaryProvider')
debugger.HandleCommand('type summary add RLMResults -F rlm_lldb.RLMResults_SummaryProvider')
debugger.HandleCommand('type summary add -x RLMAccessor_ -F rlm_lldb.RLMObject_SummaryProvider')
debugger.HandleCommand('type synthetic add RLMArray --python-class rlm_lldb.RLMArray_SyntheticChildrenProvider')
debugger.HandleCommand('type synthetic add RLMArrayLinkView --python-class rlm_lldb.RLMArray_SyntheticChildrenProvider')
debugger.HandleCommand('type synthetic add RLMResults --python-class rlm_lldb.RLMArray_SyntheticChildrenProvider')
debugger.HandleCommand('type synthetic add -x RLMAccessor_.* --python-class rlm_lldb.RLMObject_SyntheticChildrenProvider')
| |
from cattle import ApiError
from common_fixtures import * # NOQA
def _create_virtual_machine(client, sim_context, **kw):
args = {
'imageUuid': sim_context['imageUuid'],
'requestedHostId': sim_context['host'].id,
}
args.update(kw)
return client.create_virtual_machine(**args)
def test_virtual_machine_create_cpu_memory(super_client, sim_context):
vm = _create_virtual_machine(super_client, sim_context,
vcpu=2, memoryMb=42)
vm = super_client.wait_success(vm)
assert vm.state == 'running'
assert vm.vcpu == 2
assert vm.memoryMb == 42
def test_virtual_machine_create(super_client, sim_context):
vm = _create_virtual_machine(super_client, sim_context)
vm = super_client.wait_success(vm)
assert vm.state == 'running'
assert vm.vcpu is None
assert vm.memoryMb is None
def test_virtual_machine_create_null_network_id(super_client, sim_context):
image_uuid = sim_context['imageUuid']
try:
super_client.create_virtual_machine(imageUuid=image_uuid,
networkIds=[None])
assert False
except ApiError as e:
assert e.error.code == 'NotNullable'
def test_virtual_machine_n_ids_s_ids(super_client, sim_context,
network, subnet):
image_uuid = sim_context['imageUuid']
try:
super_client.create_virtual_machine(imageUuid=image_uuid,
networkIds=[network.id],
subnetIds=[subnet.id])
except ApiError as e:
assert e.error.code == 'NetworkIdsSubnetIdsMutuallyExclusive'
def test_virtual_machine_network(super_client, sim_context, network,
subnet):
subnet_plain_id = get_plain_id(super_client, subnet)
vm = _create_virtual_machine(super_client, sim_context,
networkIds=[network.id])
vm = super_client.wait_success(vm)
assert vm.state == 'running'
assert 'networkIds' not in vm
nics = vm.nics()
assert len(nics) == 1
nic = nics[0]
assert nic.network().id == network.id
assert nic.state == 'active'
assert nic.macAddress is not None
assert nic.macAddress.startswith(network.macPrefix)
nic_admin = super_client.reload(nic)
vm_admin = super_client.reload(vm)
assert nic_admin.account().id == vm_admin.accountId
ips = nic.ipAddresses()
assert len(ips) == 1
assert super_client.reload(nic).ipAddressNicMaps()[0].state == 'active'
ip = ips[0]
ip_admin = super_client.reload(ip)
assert ip_admin.account().id == vm_admin.accountId
assert ip_admin.subnet().id == nic_admin.subnet().id
assert ip_admin.role == 'primary'
assert ip.address is not None
assert ip.address.startswith('192.168.0')
assert vm.primaryIpAddress is not None
assert vm.primaryIpAddress == ip.address
addresses = super_client.list_resource_pool(poolType='subnet',
poolId=subnet_plain_id)
assert ip.address in [x.item for x in addresses]
def test_virtual_machine_subnet(super_client, sim_context, subnet, vnet):
network = subnet.network()
vm = _create_virtual_machine(super_client, sim_context,
subnetIds=[subnet.id])
vm = super_client.wait_success(vm)
assert vm.state == 'running'
assert 'subnetIds' not in vm
nics = vm.nics()
assert len(nics) == 1
nic = nics[0]
assert nic.subnetId == subnet.id
assert nic.network().id == network.id
assert nic.state == 'active'
ips = nic.ipAddresses()
assert len(ips) == 1
ip = ips[0]
assert ip.address is not None
assert ip.address.startswith('192.168.0')
assert vm.primaryIpAddress is not None
assert vm.primaryIpAddress == ip.address
def test_virtual_machine_no_ip(super_client, sim_context):
network = super_client.create_network()
subnet = super_client.create_subnet(networkAddress='192.168.0.0',
isPublic=True,
cidrSize='16',
networkId=network.id,
startAddress='192.168.0.3',
endAddress='192.168.0.3')
subnet = super_client.wait_success(subnet)
assert subnet.state == 'active'
vm = _create_virtual_machine(super_client, sim_context,
subnetIds=[subnet.id])
vm = super_client.wait_success(vm)
assert vm.state == 'running'
assert vm.primaryIpAddress == '192.168.0.3'
vm = _create_virtual_machine(super_client, sim_context,
subnetIds=[subnet.id])
vm = super_client.wait_transitioning(vm)
assert vm.state == 'removed'
assert vm.transitioning == 'error'
assert vm.transitioningMessage == \
'Failed to allocate IP from subnet : IP allocation error'
def test_virtual_machine_stop_subnet(super_client, sim_context, subnet, vnet):
vm = _create_virtual_machine(super_client, sim_context,
subnetIds=[subnet.id])
vm = super_client.wait_success(vm)
assert vm.state == 'running'
assert len(vm.nics()) == 1
assert len(vm.nics()[0].ipAddresses()) == 1
assert vm.nics()[0].ipAddresses()[0].address.startswith('192.168')
vm = super_client.wait_success(vm.stop())
assert vm.state == 'stopped'
assert len(vm.nics()) == 1
assert len(vm.nics()[0].ipAddresses()) == 1
nic = vm.nics()[0]
ip_address = nic.ipAddresses()[0]
assert ip_address.state == 'active'
assert ip_address.address.startswith('192.168')
assert nic.state == 'inactive'
def test_virtual_machine_remove_subnet(super_client, sim_context,
subnet, vnet):
vm = _create_virtual_machine(super_client, sim_context,
subnetIds=[subnet.id])
vm = super_client.wait_success(vm)
assert vm.state == 'running'
assert len(vm.nics()) == 1
assert len(vm.nics()[0].ipAddresses()) == 1
assert vm.nics()[0].ipAddresses()[0].address.startswith('192.168')
vm = super_client.wait_success(vm.stop(remove=True))
assert vm.state == 'removed'
assert len(vm.nics()) == 1
assert len(vm.nics()[0].ipAddresses()) == 1
nic = vm.nics()[0]
ip_address = nic.ipAddresses()[0]
assert ip_address.state == 'active'
assert ip_address.address.startswith('192.168')
assert nic.state == 'removed'
def test_virtual_machine_purge_subnet(super_client, sim_context, subnet, vnet):
subnet_plain_id = get_plain_id(super_client, subnet)
vm = _create_virtual_machine(super_client, sim_context,
subnetIds=[subnet.id])
vm = super_client.wait_success(vm)
assert vm.state == 'running'
addresses = super_client.list_resource_pool(poolType='subnet',
poolId=subnet_plain_id)
assert vm.primaryIpAddress in [x.item for x in addresses]
assert len(vm.nics()) == 1
assert len(vm.nics()[0].ipAddresses()) == 1
assert vm.nics()[0].ipAddresses()[0].address.startswith('192.168')
vm = super_client.wait_success(vm.stop(remove=True))
assert vm.state == 'removed'
assert len(vm.nics()) == 1
assert len(vm.nics()[0].ipAddresses()) == 1
nic = vm.nics()[0]
ip_address = nic.ipAddresses()[0]
assert ip_address.state == 'active'
assert ip_address.address.startswith('192.168')
assert nic.state == 'removed'
vm = super_client.wait_success(vm.purge())
assert vm.state == 'purged'
nics = vm.nics()
assert len(nics) == 1
nic = nics[0]
assert nic.state == 'removed'
assert nic.macAddress is not None
nic = super_client.wait_success(nic.purge())
assert nic.state == 'purged'
assert nic.macAddress is None
assert len(nic.ipAddressNicMaps()) == 1
assert nic.ipAddressNicMaps()[0].state == 'removed'
assert len(nic.ipAddresses()) == 0
ip_address = super_client.reload(ip_address)
assert ip_address.state == 'removed'
assert ip_address.address is not None
addresses = super_client.list_resource_pool(poolType='subnet',
poolId=subnet_plain_id)
assert vm.primaryIpAddress not in [x.item for x in addresses]
def test_virtual_machine_restore_subnet(super_client, sim_context,
subnet, vnet):
subnet_plain_id = get_plain_id(super_client, subnet)
vm = _create_virtual_machine(super_client, sim_context,
subnetIds=[subnet.id])
vm = super_client.wait_success(vm)
assert vm.state == 'running'
addresses = super_client.list_resource_pool(poolType='subnet',
poolId=subnet_plain_id)
assert vm.primaryIpAddress in [x.item for x in addresses]
vm = super_client.wait_success(vm.stop())
assert vm.state == 'stopped'
vm = super_client.wait_success(super_client.delete(vm))
assert vm.state == 'removed'
assert vm.state == 'removed'
nic = vm.nics()[0]
ip_address = nic.ipAddresses()[0]
address = ip_address.address
assert ip_address.address.startswith('192.168')
vm = vm.restore()
assert vm.state == 'restoring'
vm = super_client.wait_success(vm)
assert vm.state == 'stopped'
assert len(vm.nics()) == 1
nic = vm.nics()[0]
assert nic.state == 'inactive'
assert len(nic.ipAddresses()) == 1
ip_address = nic.ipAddresses()[0]
assert ip_address.state == 'active'
vm = super_client.wait_success(vm.start())
assert vm.state == 'running'
assert vm.nics()[0].ipAddresses()[0].address == address
def test_virtual_machine_console(super_client, sim_context):
vm = _create_virtual_machine(super_client, sim_context)
vm = super_client.wait_success(vm)
assert vm.state == 'running'
assert 'console' not in vm
vm = super_client.update(vm, data={
'fields': {
'capabilities': ['console']
}
})
assert 'console' in vm
assert 'console' in vm and callable(vm.console)
console = vm.console()
assert console is not None
assert console.kind == 'fake'
assert console.url == 'http://localhost/console'
def test_virtual_machine_console_visibility(super_client, sim_context):
vm = _create_virtual_machine(super_client, sim_context)
vm = super_client.wait_success(vm)
assert vm.state == 'running'
assert 'console' not in vm
vm = super_client.update(vm, data={
'fields': {
'capabilities': ['console']
}
})
assert 'console' in vm
assert 'console' in vm and callable(vm.console)
vm = super_client.wait_success(vm.stop())
assert vm.state == 'stopped'
assert 'console' not in vm
def test_virtual_machine_account_defaults(super_client, sim_context):
key = 'io.cattle.platform.allocator.constraint.AccountConstraintsProvider'
data = {key: {'accountScoped': False}}
account = create_and_activate(super_client, 'account',
data=data,
kind='user')
cred = create_and_activate(super_client, 'credential',
accountId=account.id)
network = create_and_activate(super_client, 'network')
assert network.accountId != account.id
network2 = create_and_activate(super_client, 'network')
assert network2.accountId != account.id
account = super_client.update(account, defaultCredentialIds=[cred.id],
defaultNetworkIds=[network.id, network2.id])
assert account.state == 'active'
assert account.defaultCredentialIds == [cred.id]
assert account.defaultNetworkIds == [network.id, network2.id]
vm = _create_virtual_machine(super_client, sim_context,
accountId=account.id)
vm = super_client.wait_success(vm)
assert vm.state == 'running'
assert len(vm.credentials()) == 1
assert vm.credentials()[0].id == cred.id
network_ids = set([x.networkId for x in vm.nics()])
assert network_ids == set([network.id, network2.id])
| |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility methods to upload source to GCS and call Cloud Build service."""
import gzip
import os
import StringIO
import tarfile
from apitools.base.py import encoding
from docker import docker
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.storage import storage_api
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import times
# Paths that shouldn't be ignored client-side.
# Behavioral parity with github.com/docker/docker-py.
BLACKLISTED_DOCKERIGNORE_PATHS = ['Dockerfile', '.dockerignore']
class UploadFailedError(exceptions.Error):
"""Raised when the source fails to upload to GCS."""
def _CreateTar(source_dir, gen_files, paths, gz):
"""Create tarfile for upload to GCS.
The third-party code closes the tarfile after creating, which does not
allow us to write generated files after calling docker.utils.tar
since gzipped tarfiles can't be opened in append mode.
Args:
source_dir: the directory to be archived
gen_files: Generated files to write to the tar
paths: allowed paths in the tarfile
gz: gzipped tarfile object
"""
root = os.path.abspath(source_dir)
t = tarfile.open(mode='w', fileobj=gz)
for path in sorted(paths):
full_path = os.path.join(root, path)
t.add(full_path, arcname=path, recursive=False)
for name, contents in gen_files.iteritems():
genfileobj = StringIO.StringIO(contents)
tar_info = tarfile.TarInfo(name=name)
tar_info.size = len(genfileobj.buf)
t.addfile(tar_info, fileobj=genfileobj)
genfileobj.close()
t.close()
def _GetDockerignoreExclusions(source_dir, gen_files):
"""Helper function to read the .dockerignore on disk or in generated files.
Args:
source_dir: the path to the root directory.
gen_files: dict of filename to contents of generated files.
Returns:
Set of exclusion expressions from the dockerignore file.
"""
dockerignore = os.path.join(source_dir, '.dockerignore')
exclude = set()
ignore_contents = None
if os.path.exists(dockerignore):
with open(dockerignore) as f:
ignore_contents = f.read()
else:
ignore_contents = gen_files.get('.dockerignore')
if ignore_contents:
# Read the exclusions from the dockerignore, filtering out blank lines.
exclude = set(filter(bool, ignore_contents.splitlines()))
# Remove paths that shouldn't be excluded on the client.
exclude -= set(BLACKLISTED_DOCKERIGNORE_PATHS)
return exclude
def _GetIncludedPaths(source_dir, exclude, skip_files=None):
"""Helper function to filter paths in root using dockerignore and skip_files.
We iterate separately to filter on skip_files in order to preserve expected
behavior (standard deployment skips directories if they contain only files
ignored by skip_files).
Args:
source_dir: the path to the root directory.
exclude: the .dockerignore file exclusions.
skip_files: the regex for files to skip. If None, only dockerignore is used
to filter.
Returns:
Set of paths (relative to source_dir) to include.
"""
# See docker.utils.tar
root = os.path.abspath(source_dir)
# Get set of all paths other than exclusions from dockerignore.
paths = docker.utils.exclude_paths(root, exclude)
# Also filter on the ignore regex from the app.yaml.
if skip_files:
included_paths = set(util.FileIterator(source_dir, skip_files))
# FileIterator replaces all path separators with '/', so reformat
# the results to compare with the first set.
included_paths = {
p.replace('/', os.path.sep) for p in included_paths}
paths.intersection_update(included_paths)
return paths
def UploadSource(source_dir, object_ref, gen_files=None, skip_files=None):
"""Upload a gzipped tarball of the source directory to GCS.
Note: To provide parity with docker's behavior, we must respect .dockerignore.
Args:
source_dir: the directory to be archived.
object_ref: storage_util.ObjectReference, the Cloud Storage location to
upload the source tarball to.
gen_files: dict of filename to (str) contents of generated config and
source context files.
skip_files: optional, a parsed regex for paths and files to skip, from
the service yaml.
Raises:
UploadFailedError: when the source fails to upload to GCS.
"""
gen_files = gen_files or {}
dockerignore_contents = _GetDockerignoreExclusions(source_dir, gen_files)
included_paths = _GetIncludedPaths(source_dir,
dockerignore_contents,
skip_files)
# We can't use tempfile.NamedTemporaryFile here because ... Windows.
# See https://bugs.python.org/issue14243. There are small cleanup races
# during process termination that will leave artifacts on the filesystem.
# eg, CTRL-C on windows leaves both the directory and the file. Unavoidable.
# On Posix, `kill -9` has similar behavior, but CTRL-C allows cleanup.
with files.TemporaryDirectory() as temp_dir:
f = open(os.path.join(temp_dir, 'src.tgz'), 'w+b')
with gzip.GzipFile(mode='wb', fileobj=f) as gz:
_CreateTar(source_dir, gen_files, included_paths, gz)
f.close()
storage_client = storage_api.StorageClient()
storage_client.CopyFileToGCS(object_ref.bucket_ref, f.name, object_ref.name)
def GetServiceTimeoutString(timeout_property_str):
if timeout_property_str is not None:
try:
# A bare number is interpreted as seconds.
build_timeout_secs = int(timeout_property_str)
except ValueError:
build_timeout_duration = times.ParseDuration(timeout_property_str)
build_timeout_secs = int(build_timeout_duration.total_seconds)
return str(build_timeout_secs) + 's'
return None
class InvalidBuildError(ValueError):
"""Error indicating that ExecuteCloudBuild was given a bad Build message."""
def __init__(self, field):
super(InvalidBuildError, self).__init__(
'Field [{}] was provided, but should not have been. '
'You may be using an improper Cloud Build pipeline.'.format(field))
def _ValidateBuildFields(build, fields):
"""Validates that a Build message doesn't have fields that we populate."""
for field in fields:
if getattr(build, field, None) is not None:
raise InvalidBuildError(field)
def GetDefaultBuild(output_image):
"""Get the default build for this runtime.
This build just uses the latest docker builder image (location pulled from the
app/container_builder_image property) to run a `docker build` with the given
tag.
Args:
output_image: GCR location for the output docker image (e.g.
`gcr.io/test-gae/hardcoded-output-tag`)
Returns:
Build, a CloudBuild Build message with the given steps (ready to be given to
FixUpBuild).
"""
messages = cloudbuild_util.GetMessagesModule()
builder = properties.VALUES.app.container_builder_image.Get()
log.debug('Using builder image: [{0}]'.format(builder))
return messages.Build(
steps=[messages.BuildStep(name=builder,
args=['build', '-t', output_image, '.'])],
images=[output_image])
def FixUpBuild(build, object_ref):
"""Return a modified Build object with run-time values populated.
Specifically:
- `source` is pulled from the given object_ref
- `timeout` comes from the app/cloud_build_timeout property
- `logsBucket` uses the bucket from object_ref
Args:
build: cloudbuild Build message. The Build to modify. Fields 'timeout',
'source', and 'logsBucket' will be added and may not be given.
object_ref: storage_util.ObjectReference, the Cloud Storage location of the
source tarball.
Returns:
Build, (copy) of the given Build message with the specified fields
populated.
Raises:
InvalidBuildError: if the Build message had one of the fields this function
sets pre-populated
"""
messages = cloudbuild_util.GetMessagesModule()
# Make a copy, so we don't modify the original
build = encoding.CopyProtoMessage(build)
# Check that nothing we're expecting to fill in has been set already
_ValidateBuildFields(build, ('source', 'timeout', 'logsBucket'))
build.timeout = GetServiceTimeoutString(
properties.VALUES.app.cloud_build_timeout.Get())
build.logsBucket = object_ref.bucket
build.source = messages.Source(
storageSource=messages.StorageSource(
bucket=object_ref.bucket,
object=object_ref.name,
),
)
return build
| |
# -*- test-case-name: admin.test.test_release -*-
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Helper utilities for the Flocker release process.
XXX This script is not automatically checked by buildbot. See
https://clusterhq.atlassian.net/browse/FLOC-397
"""
import json
import os
import sys
import tempfile
from datetime import datetime
from subprocess import check_call
from effect import (
Effect, sync_perform, ComposedDispatcher)
from effect.do import do
from characteristic import attributes
from git import GitCommandError, Repo
from pytz import UTC
import requests
from twisted.python.filepath import FilePath
from twisted.python.usage import Options, UsageError
from twisted.python.constants import Names, NamedConstant
from twisted.web import template
import flocker
from flocker.common.version import get_package_key_suffix
from flocker.provision._effect import sequence, dispatcher as base_dispatcher
from flocker.common.version import (
get_doc_version,
get_pre_release,
is_pre_release,
is_release,
is_weekly_release,
target_release,
UnparseableVersion,
)
from flocker.provision._install import ARCHIVE_BUCKET
from .aws import (
boto_dispatcher,
UpdateS3RoutingRule,
UpdateS3ErrorPage,
ListS3Keys,
DeleteS3Keys,
CopyS3Keys,
DownloadS3KeyRecursively,
UploadToS3,
UploadToS3Recursively,
CreateCloudFrontInvalidation,
)
from .yum import (
yum_dispatcher,
CreateRepo,
DownloadPackagesFromRepository,
)
from .vagrant import vagrant_version
from .homebrew import make_recipe
from .packaging import available_distributions, DISTRIBUTION_NAME_MAP
DEV_ARCHIVE_BUCKET = 'clusterhq-dev-archive'
class NotTagged(Exception):
"""
Raised if publishing to production and the version being published version
isn't tagged.
"""
class NotARelease(Exception):
"""
Raised if trying to publish documentation to, or packages for a version
that isn't a release.
"""
class DocumentationRelease(Exception):
"""
Raised if trying to upload packages for a documentation release.
"""
class Environments(Names):
"""
The environments that documentation can be published to.
"""
PRODUCTION = NamedConstant()
STAGING = NamedConstant()
class TagExists(Exception):
"""
Raised if trying to release a version for which a tag already exists.
"""
class BranchExists(Exception):
"""
Raised if trying to release a version for which a branch already exists.
"""
class MissingPreRelease(Exception):
"""
Raised if trying to release a pre-release for which the previous expected
pre-release does not exist.
"""
class NoPreRelease(Exception):
"""
Raised if trying to release a marketing release if no pre-release exists.
"""
class PushFailed(Exception):
"""
Raised if pushing to Git fails.
"""
@attributes([
'documentation_bucket',
'cloudfront_cname',
'dev_bucket',
])
class DocumentationConfiguration(object):
"""
The configuration for publishing documentation.
:ivar bytes documentation_bucket: The bucket to publish documentation to.
:ivar bytes cloudfront_cname: a CNAME associated to the cloudfront
distribution pointing at the documentation bucket.
:ivar bytes dev_bucket: The bucket buildbot uploads documentation to.
"""
DOCUMENTATION_CONFIGURATIONS = {
Environments.PRODUCTION:
DocumentationConfiguration(
documentation_bucket="clusterhq-docs",
cloudfront_cname="docs.clusterhq.com",
dev_bucket="clusterhq-dev-docs"),
Environments.STAGING:
DocumentationConfiguration(
documentation_bucket="clusterhq-staging-docs",
cloudfront_cname="docs.staging.clusterhq.com",
dev_bucket="clusterhq-dev-docs"),
}
@do
def publish_docs(flocker_version, doc_version, environment):
"""
Publish the Flocker documentation. The documentation for each version of
Flocker is uploaded to a development bucket on S3 by the build server and
this copies the documentation for a particular ``flocker_version`` and
publishes it as ``doc_version``. Attempting to publish documentation to a
staging environment as a documentation version publishes it as the version
being updated.
:param bytes flocker_version: The version of Flocker to publish the
documentation for.
:param bytes doc_version: The version to publish the documentation as.
:param Environments environment: The environment to publish the
documentation to.
:raises NotARelease: Raised if trying to publish to a version that isn't a
release.
:raises NotTagged: Raised if publishing to production and the version being
published version isn't tagged.
"""
if not (is_release(doc_version)
or is_weekly_release(doc_version)
or is_pre_release(doc_version)):
raise NotARelease()
if environment == Environments.PRODUCTION:
if get_doc_version(flocker_version) != doc_version:
raise NotTagged()
configuration = DOCUMENTATION_CONFIGURATIONS[environment]
dev_prefix = '%s/' % (flocker_version,)
version_prefix = 'en/%s/' % (get_doc_version(doc_version),)
is_dev = not is_release(doc_version)
if is_dev:
stable_prefix = "en/devel/"
else:
stable_prefix = "en/latest/"
# Get the list of keys in the new documentation.
new_version_keys = yield Effect(
ListS3Keys(bucket=configuration.dev_bucket,
prefix=dev_prefix))
# Get the list of keys already existing for the given version.
# This should only be non-empty for documentation releases.
existing_version_keys = yield Effect(
ListS3Keys(bucket=configuration.documentation_bucket,
prefix=version_prefix))
# Copy the new documentation to the documentation bucket.
yield Effect(
CopyS3Keys(source_bucket=configuration.dev_bucket,
source_prefix=dev_prefix,
destination_bucket=configuration.documentation_bucket,
destination_prefix=version_prefix,
keys=new_version_keys))
# Delete any keys that aren't in the new documentation.
yield Effect(
DeleteS3Keys(bucket=configuration.documentation_bucket,
prefix=version_prefix,
keys=existing_version_keys - new_version_keys))
# Update the key used for error pages if we're publishing to staging or if
# we're publishing a marketing release to production.
if ((environment is Environments.STAGING) or
(environment is Environments.PRODUCTION and not is_dev)):
yield Effect(
UpdateS3ErrorPage(bucket=configuration.documentation_bucket,
target_prefix=version_prefix))
# Update the redirect for the stable URL (en/latest/ or en/devel/)
# to point to the new version. Returns the old target.
old_prefix = yield Effect(
UpdateS3RoutingRule(bucket=configuration.documentation_bucket,
prefix=stable_prefix,
target_prefix=version_prefix))
# If we have changed versions, get all the keys from the old version
if old_prefix:
previous_version_keys = yield Effect(
ListS3Keys(bucket=configuration.documentation_bucket,
prefix=old_prefix))
else:
previous_version_keys = set()
# The changed keys are the new keys, the keys that were deleted from this
# version, and the keys for the previous version.
changed_keys = (new_version_keys |
existing_version_keys |
previous_version_keys)
# S3 serves /index.html when given /, so any changed /index.html means
# that / changed as well.
# Note that we check for '/index.html' but remove 'index.html'
changed_keys |= {key_name[:-len('index.html')]
for key_name in changed_keys
if key_name.endswith('/index.html')}
# Always update the root.
changed_keys |= {''}
# The full paths are all the changed keys under the stable prefix, and
# the new version prefix. This set is slightly bigger than necessary.
changed_paths = {prefix + key_name
for key_name in changed_keys
for prefix in [stable_prefix, version_prefix]}
# Invalidate all the changed paths in cloudfront.
yield Effect(
CreateCloudFrontInvalidation(cname=configuration.cloudfront_cname,
paths=changed_paths))
class PublishDocsOptions(Options):
"""
Arguments for ``publish-docs`` script.
"""
optParameters = [
["flocker-version", None, flocker.__version__,
"The version of flocker from which the documentation was built."],
["doc-version", None, None,
"The version to publish the documentation as.\n"
"This will differ from \"flocker-version\" for staging uploads."
"Attempting to publish documentation as a documentation version "
"publishes it as the version being updated.\n"
"``doc-version`` is set to 0.3.0.post1 the documentation will be "
"published as 0.3.0.\n"],
]
optFlags = [
["production", None, "Publish documentation to production."],
]
environment = Environments.STAGING
def parseArgs(self):
if self['doc-version'] is None:
self['doc-version'] = get_doc_version(self['flocker-version'])
if self['production']:
self.environment = Environments.PRODUCTION
def publish_docs_main(args, base_path, top_level):
"""
:param list args: The arguments passed to the script.
:param FilePath base_path: The executable being run.
:param FilePath top_level: The top-level of the flocker repository.
"""
options = PublishDocsOptions()
try:
options.parseOptions(args)
except UsageError as e:
sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
raise SystemExit(1)
try:
sync_perform(
dispatcher=ComposedDispatcher([boto_dispatcher, base_dispatcher]),
effect=publish_docs(
flocker_version=options['flocker-version'],
doc_version=options['doc-version'],
environment=options.environment,
))
except NotARelease:
sys.stderr.write("%s: Can't publish non-release.\n"
% (base_path.basename(),))
raise SystemExit(1)
except NotTagged:
sys.stderr.write(
"%s: Can't publish non-tagged version to production.\n"
% (base_path.basename(),))
raise SystemExit(1)
class UploadOptions(Options):
"""
Options for uploading artifacts.
"""
optParameters = [
["flocker-version", None, flocker.__version__,
"The version of Flocker to upload artifacts for."
"Python packages for " + flocker.__version__ + "will be uploaded.\n"],
["target", None, ARCHIVE_BUCKET,
"The bucket to upload artifacts to.\n"],
["build-server", None,
b'http://build.clusterhq.com',
"The URL of the build-server.\n"],
["homebrew-tap", None, "git@github.com:ClusterHQ/homebrew-tap.git",
"The Git repository to add a Homebrew recipe to.\n"],
]
def parseArgs(self):
version = self['flocker-version']
if not (is_release(version)
or is_weekly_release(version)
or is_pre_release(version)):
raise NotARelease()
if get_doc_version(version) != version:
raise DocumentationRelease()
FLOCKER_PACKAGES = [
b'clusterhq-python-flocker',
b'clusterhq-flocker-cli',
b'clusterhq-flocker-node',
]
def publish_homebrew_recipe(homebrew_repo_url, version, source_bucket,
scratch_directory, top_level):
"""
Publish a Homebrew recipe to a Git repository.
:param git.Repo homebrew_repo: Homebrew tap Git repository. This should
be an SSH URL so as not to require a username and password.
:param bytes version: Version of Flocker to publish a recipe for.
:param bytes source_bucket: S3 bucket to get source distribution from.
:param FilePath scratch_directory: Temporary directory to create a recipe
in.
:param FilePath top_level: The top-level of the flocker repository.
"""
url_template = 'https://{bucket}.s3.amazonaws.com/python/Flocker-{version}.tar.gz' # noqa
sdist_url = url_template.format(bucket=source_bucket, version=version)
requirements_path = top_level.child('requirements.txt')
content = make_recipe(
version=version,
sdist_url=sdist_url,
requirements_path=requirements_path,
)
homebrew_repo = Repo.clone_from(
url=homebrew_repo_url,
to_path=scratch_directory.path)
recipe = 'flocker-{version}.rb'.format(version=version)
FilePath(homebrew_repo.working_dir).child(recipe).setContent(content)
homebrew_repo.index.add([recipe])
homebrew_repo.index.commit('Add recipe for Flocker version ' + version)
# Sometimes this raises an index error, and it seems to be a race
# condition. There should probably be a loop until push succeeds or
# whatever condition is necessary for it to succeed is met. FLOC-2043.
push_info = homebrew_repo.remotes.origin.push(homebrew_repo.head)[0]
if (push_info.flags & push_info.ERROR) != 0:
raise PushFailed()
@do
def publish_vagrant_metadata(version, box_url, scratch_directory, box_name,
target_bucket):
"""
Publish Vagrant metadata for a given version of a given box.
:param bytes version: The version of the Vagrant box to publish metadata
for.
:param bytes box_url: The URL of the Vagrant box.
:param FilePath scratch_directory: A directory to create Vagrant metadata
files in before uploading.
:param bytes box_name: The name of the Vagrant box to publish metadata for.
:param bytes target_bucket: S3 bucket to upload metadata to.
"""
metadata_filename = '{box_name}.json'.format(box_name=box_name)
# Download recursively because there may not be a metadata file
yield Effect(DownloadS3KeyRecursively(
source_bucket=target_bucket,
source_prefix='vagrant',
target_path=scratch_directory,
filter_extensions=(metadata_filename,)))
metadata = {
"description": "clusterhq/{box_name} box.".format(box_name=box_name),
"name": "clusterhq/{box_name}".format(box_name=box_name),
"versions": [],
}
try:
existing_metadata_file = scratch_directory.children()[0]
except IndexError:
pass
else:
existing_metadata = json.loads(existing_metadata_file.getContent())
for version_metadata in existing_metadata['versions']:
# In the future we may want to have multiple providers for the
# same version but for now we discard any current providers for
# the version being added.
if version_metadata['version'] != vagrant_version(version):
metadata['versions'].append(version_metadata)
metadata['versions'].append({
"version": vagrant_version(version),
"providers": [
{
"url": box_url,
"name": "virtualbox",
},
],
})
# If there is an existing file, overwrite it. Else create a new file.
new_metadata_file = scratch_directory.child(metadata_filename)
new_metadata_file.setContent(json.dumps(metadata))
yield Effect(UploadToS3(
source_path=scratch_directory,
target_bucket=target_bucket,
target_key='vagrant/' + metadata_filename,
file=new_metadata_file,
content_type='application/json',
))
@do
def update_repo(package_directory, target_bucket, target_key, source_repo,
packages, flocker_version, distribution):
"""
Update ``target_bucket`` yum repository with ``packages`` from
``source_repo`` repository.
:param FilePath package_directory: Temporary directory to download
repository to.
:param bytes target_bucket: S3 bucket to upload repository to.
:param bytes target_key: Path within S3 bucket to upload repository to.
:param bytes source_repo: Repository on the build server to get packages
from.
:param list packages: List of bytes, each specifying the name of a package
to upload to the repository.
:param bytes flocker_version: The version of flocker to upload packages
for.
:param Distribution distribution: The distribution to upload packages for.
"""
package_directory.createDirectory()
package_type = distribution.package_type()
yield Effect(DownloadS3KeyRecursively(
source_bucket=target_bucket,
source_prefix=target_key,
target_path=package_directory,
filter_extensions=('.' + package_type.value,)))
downloaded_packages = yield Effect(DownloadPackagesFromRepository(
source_repo=source_repo,
target_path=package_directory,
packages=packages,
flocker_version=flocker_version,
distribution=distribution,
))
new_metadata = yield Effect(CreateRepo(
repository_path=package_directory,
distribution=distribution,
))
yield Effect(UploadToS3Recursively(
source_path=package_directory,
target_bucket=target_bucket,
target_key=target_key,
files=downloaded_packages | new_metadata,
))
@do
def upload_packages(scratch_directory, target_bucket, version, build_server,
top_level):
"""
The ClusterHQ yum and deb repositories contain packages for Flocker, as
well as the dependencies which aren't available in CentOS 7. It is
currently hosted on Amazon S3. When doing a release, we want to add the
new Flocker packages, while preserving the existing packages in the
repository. To do this, we download the current repository, add the new
package, update the metadata, and then upload the repository.
:param FilePath scratch_directory: Temporary directory to download
repository to.
:param bytes target_bucket: S3 bucket to upload repository to.
:param bytes version: Version to download packages for.
:param bytes build_server: Server to download new packages from.
:param FilePath top_level: The top-level of the flocker repository.
"""
distribution_names = available_distributions(
flocker_source_path=top_level,
)
for distribution_name in distribution_names:
distribution = DISTRIBUTION_NAME_MAP[distribution_name]
architecture = distribution.native_package_architecture()
yield update_repo(
package_directory=scratch_directory.child(
b'{}-{}-{}'.format(
distribution.name,
distribution.version,
architecture)),
target_bucket=target_bucket,
target_key=os.path.join(
distribution.name + get_package_key_suffix(version),
distribution.version,
architecture),
source_repo=os.path.join(
build_server, b'results/omnibus',
version,
b'{}-{}'.format(distribution.name, distribution.version)),
packages=FLOCKER_PACKAGES,
flocker_version=version,
distribution=distribution,
)
packages_template = (
'<html xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">\n'
'This is an index for pip\n'
'<div t:render="packages"><a>'
'<t:attr name="href"><t:slot name="package_name" /></t:attr>'
'<t:slot name="package_name" />'
'</a><br />\n</div>'
'</html>'
)
class PackagesElement(template.Element):
"""A Twisted Web template element to render the Pip index file."""
def __init__(self, packages):
template.Element.__init__(self, template.XMLString(packages_template))
self._packages = packages
@template.renderer
def packages(self, request, tag):
for package in self._packages:
if package != 'index.html':
yield tag.clone().fillSlots(package_name=package)
def create_pip_index(scratch_directory, packages):
"""
Create an index file for pip.
:param FilePath scratch_directory: Temporary directory to create index in.
:param list packages: List of bytes, filenames of packages to be in the
index.
"""
index_file = scratch_directory.child('index.html')
with index_file.open('w') as f:
# Although this returns a Deferred, it works without the reactor
# because there are no Deferreds in the template evaluation.
# See this cheat described at
# https://twistedmatrix.com/documents/15.0.0/web/howto/twisted-templates.html
template.flatten(None, PackagesElement(packages), f.write)
return index_file
@do
def upload_pip_index(scratch_directory, target_bucket):
"""
Upload an index file for pip to S3.
:param FilePath scratch_directory: Temporary directory to create index in.
:param bytes target_bucket: S3 bucket to upload index to.
"""
packages = yield Effect(
ListS3Keys(bucket=target_bucket,
prefix='python/'))
index_path = create_pip_index(
scratch_directory=scratch_directory,
packages=packages)
yield Effect(
UploadToS3(
source_path=scratch_directory,
target_bucket=target_bucket,
target_key='python/index.html',
file=index_path,
))
@do
def upload_python_packages(scratch_directory, target_bucket, top_level,
output, error):
"""
The repository contains source distributions and binary distributions
(wheels) for Flocker. It is currently hosted on Amazon S3.
:param FilePath scratch_directory: Temporary directory to create packages
in.
:param bytes target_bucket: S3 bucket to upload packages to.
:param FilePath top_level: The top-level of the flocker repository.
"""
# XXX This has a side effect so it should be an Effect
# https://clusterhq.atlassian.net/browse/FLOC-1731
check_call([
'python', 'setup.py',
'sdist', '--dist-dir={}'.format(scratch_directory.path),
'bdist_wheel', '--dist-dir={}'.format(scratch_directory.path)],
cwd=top_level.path, stdout=output, stderr=error)
files = set([file.basename() for file in scratch_directory.children()])
yield Effect(UploadToS3Recursively(
source_path=scratch_directory,
target_bucket=target_bucket,
target_key='python',
files=files,
))
def publish_artifacts_main(args, base_path, top_level):
"""
Publish release artifacts.
:param list args: The arguments passed to the scripts.
:param FilePath base_path: The executable being run.
:param FilePath top_level: The top-level of the flocker repository.
"""
options = UploadOptions()
try:
options.parseOptions(args)
except UsageError as e:
sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
raise SystemExit(1)
except NotARelease:
sys.stderr.write("%s: Can't publish artifacts for a non-release.\n"
% (base_path.basename(),))
raise SystemExit(1)
except DocumentationRelease:
sys.stderr.write("%s: Can't publish artifacts for a documentation "
"release.\n" % (base_path.basename(),))
raise SystemExit(1)
dispatcher = ComposedDispatcher([boto_dispatcher, yum_dispatcher,
base_dispatcher])
scratch_directory = FilePath(tempfile.mkdtemp(
prefix=b'flocker-upload-'))
scratch_directory.child('packages').createDirectory()
scratch_directory.child('python').createDirectory()
scratch_directory.child('pip').createDirectory()
scratch_directory.child('vagrant').createDirectory()
scratch_directory.child('homebrew').createDirectory()
box_type = "flocker-tutorial"
vagrant_prefix = 'vagrant/tutorial/'
box_name = "{box_type}-{version}.box".format(
box_type=box_type,
version=options['flocker-version'],
)
box_url = "https://{bucket}.s3.amazonaws.com/{key}".format(
bucket=options['target'],
key=vagrant_prefix + box_name,
)
try:
sync_perform(
dispatcher=dispatcher,
effect=sequence([
upload_packages(
scratch_directory=scratch_directory.child('packages'),
target_bucket=options['target'],
version=options['flocker-version'],
build_server=options['build-server'],
top_level=top_level,
),
upload_python_packages(
scratch_directory=scratch_directory.child('python'),
target_bucket=options['target'],
top_level=top_level,
output=sys.stdout,
error=sys.stderr,
),
upload_pip_index(
scratch_directory=scratch_directory.child('pip'),
target_bucket=options['target'],
),
Effect(
CopyS3Keys(
source_bucket=DEV_ARCHIVE_BUCKET,
source_prefix=vagrant_prefix,
destination_bucket=options['target'],
destination_prefix=vagrant_prefix,
keys=[box_name],
)
),
publish_vagrant_metadata(
version=options['flocker-version'],
box_url=box_url,
scratch_directory=scratch_directory.child('vagrant'),
box_name=box_type,
target_bucket=options['target'],
),
]),
)
publish_homebrew_recipe(
homebrew_repo_url=options['homebrew-tap'],
version=options['flocker-version'],
source_bucket=options['target'],
scratch_directory=scratch_directory.child('homebrew'),
top_level=top_level,
)
finally:
scratch_directory.remove()
def calculate_base_branch(version, path):
"""
The branch a release branch is created from depends on the release
type and sometimes which pre-releases have preceeded this.
:param bytes version: The version of Flocker to get a base branch for.
:param bytes path: See :func:`git.Repo.init`.
:returns: The base branch from which the new release branch was created.
"""
if not (is_release(version)
or is_weekly_release(version)
or is_pre_release(version)):
raise NotARelease()
repo = Repo(path=path, search_parent_directories=True)
existing_tags = [tag for tag in repo.tags if tag.name == version]
if existing_tags:
raise TagExists()
release_branch_prefix = 'release/flocker-'
if is_weekly_release(version):
base_branch_name = 'master'
elif is_pre_release(version) and get_pre_release(version) == 1:
base_branch_name = 'master'
elif get_doc_version(version) != version:
base_branch_name = release_branch_prefix + get_doc_version(version)
else:
if is_pre_release(version):
target_version = target_release(version)
else:
target_version = version
pre_releases = []
for tag in repo.tags:
try:
if (is_pre_release(tag.name) and
target_version == target_release(tag.name)):
pre_releases.append(tag.name)
except UnparseableVersion:
# The Flocker repository contains versions which are not
# currently considered valid versions.
pass
if not pre_releases:
raise NoPreRelease()
latest_pre_release = sorted(
pre_releases,
key=lambda pre_release: get_pre_release(pre_release))[-1]
if (is_pre_release(version) and get_pre_release(version) >
get_pre_release(latest_pre_release) + 1):
raise MissingPreRelease()
base_branch_name = release_branch_prefix + latest_pre_release
# We create a new branch from a branch, not a tag, because a maintenance
# or documentation change may have been applied to the branch and not the
# tag.
# The branch must be available locally for the next step.
repo.git.checkout(base_branch_name)
return (
branch for branch in repo.branches if
branch.name == base_branch_name).next()
def create_release_branch(version, base_branch):
"""
checkout a new Git branch to make changes on and later tag as a release.
:param bytes version: The version of Flocker to create a release branch
for.
:param base_branch: See :func:`git.Head`. The branch to create the release
branch from.
"""
try:
base_branch.checkout(b='release/flocker-' + version)
except GitCommandError:
raise BranchExists()
class CreateReleaseBranchOptions(Options):
"""
Arguments for ``create-release-branch`` script.
"""
optParameters = [
["flocker-version", None, None,
"The version of Flocker to create a release branch for."],
]
def parseArgs(self):
if self['flocker-version'] is None:
raise UsageError("`--flocker-version` must be specified.")
def create_release_branch_main(args, base_path, top_level):
"""
Create a release branch.
:param list args: The arguments passed to the script.
:param FilePath base_path: The executable being run.
:param FilePath top_level: The top-level of the flocker repository.
"""
options = CreateReleaseBranchOptions()
try:
options.parseOptions(args)
except UsageError as e:
sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
raise SystemExit(1)
version = options['flocker-version']
path = FilePath(__file__).path
try:
base_branch = calculate_base_branch(version=version, path=path)
create_release_branch(version=version, base_branch=base_branch)
except NotARelease:
sys.stderr.write("%s: Can't create a release branch for non-release.\n"
% (base_path.basename(),))
raise SystemExit(1)
except TagExists:
sys.stderr.write("%s: Tag already exists for this release.\n"
% (base_path.basename(),))
raise SystemExit(1)
except NoPreRelease:
sys.stderr.write("%s: No (previous) pre-release exists for this "
"release.\n" % (base_path.basename(),))
raise SystemExit(1)
except BranchExists:
sys.stderr.write("%s: The release branch already exists.\n"
% (base_path.basename(),))
raise SystemExit(1)
class TestRedirectsOptions(Options):
"""
Arguments for ``test-redirects`` script.
"""
optParameters = [
["doc-version", None, flocker.__version__,
"The version which the documentation sites are expected to redirect "
"to.\n"
],
]
optFlags = [
["production", None, "Check the production documentation site."],
]
environment = Environments.STAGING
def parseArgs(self):
if self['production']:
self.environment = Environments.PRODUCTION
def get_expected_redirects(flocker_version):
"""
Get the expected redirects for a given version of Flocker, if that version
has been published successfully. Documentation versions (e.g. 0.3.0.post2)
are published to their release version counterparts (e.g. 0.3.0).
:param bytes flocker_version: The version of Flocker for which to get
expected redirects.
:return: Dictionary mapping paths to the path to which they are expected to
redirect.
"""
published_version = get_doc_version(flocker_version)
if is_release(published_version):
expected_redirects = {
'/': '/en/' + published_version + '/',
'/en/': '/en/' + published_version + '/',
'/en/latest': '/en/' + published_version + '/',
'/en/latest/faq/index.html':
'/en/' + published_version + '/faq/index.html',
}
else:
expected_redirects = {
'/en/devel': '/en/' + published_version + '/',
'/en/devel/faq/index.html':
'/en/' + published_version + '/faq/index.html',
}
return expected_redirects
def test_redirects_main(args, base_path, top_level):
"""
Tests redirects to Flocker documentation.
:param list args: The arguments passed to the script.
:param FilePath base_path: The executable being run.
:param FilePath top_level: The top-level of the flocker repository.
"""
options = TestRedirectsOptions()
try:
options.parseOptions(args)
except UsageError as e:
sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
raise SystemExit(1)
expected_redirects = get_expected_redirects(
flocker_version=options['doc-version'])
document_configuration = DOCUMENTATION_CONFIGURATIONS[options.environment]
base_url = 'https://' + document_configuration.cloudfront_cname
failed_redirects = []
for path in expected_redirects:
original_url = base_url + path
expected_url = base_url + expected_redirects[path]
final_url = requests.get(original_url).url
if expected_url != final_url:
failed_redirects.append(original_url)
message = (
"'{original_url}' expected to redirect to '{expected_url}', "
"instead redirects to '{final_url}'.\n").format(
original_url=original_url,
expected_url=expected_url,
final_url=final_url,
)
sys.stderr.write(message)
if len(failed_redirects):
raise SystemExit(1)
else:
print 'All tested redirects work correctly.'
class PublishDevBoxOptions(Options):
"""
Options for publishing a Vagrant development box.
"""
optParameters = [
["flocker-version", None, flocker.__version__,
"The version of Flocker to upload a development box for.\n"],
["target", None, ARCHIVE_BUCKET,
"The bucket to upload a development box to.\n"],
]
def publish_dev_box_main(args, base_path, top_level):
"""
Publish a development Vagrant box.
:param list args: The arguments passed to the script.
:param FilePath base_path: The executable being run.
:param FilePath top_level: The top-level of the flocker repository.
"""
options = PublishDevBoxOptions()
try:
options.parseOptions(args)
except UsageError as e:
sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
raise SystemExit(1)
scratch_directory = FilePath(tempfile.mkdtemp(
prefix=b'flocker-upload-'))
scratch_directory.child('vagrant').createDirectory()
box_type = "flocker-dev"
prefix = 'vagrant/dev/'
box_name = "{box_type}-{version}.box".format(
box_type=box_type,
version=options['flocker-version'],
)
box_url = "https://{bucket}.s3.amazonaws.com/{key}".format(
bucket=options['target'],
key=prefix + box_name,
)
sync_perform(
dispatcher=ComposedDispatcher([boto_dispatcher, base_dispatcher]),
effect=sequence([
Effect(
CopyS3Keys(
source_bucket=DEV_ARCHIVE_BUCKET,
source_prefix=prefix,
destination_bucket=options['target'],
destination_prefix=prefix,
keys=[box_name],
)
),
publish_vagrant_metadata(
version=options['flocker-version'],
box_url=box_url,
scratch_directory=scratch_directory.child('vagrant'),
box_name=box_type,
target_bucket=options['target'],
),
]),
)
def update_license_file(args, top_level, year=datetime.now(UTC).year):
"""
Update the LICENSE file to include the current year.
:param list args: The arguments passed to the script.
:param FilePath top_level: The top-level of the flocker repository.
"""
license_template = top_level.child('admin').child('LICENSE.template')
with license_template.open() as input_file:
with top_level.child('LICENSE').open('w') as output_file:
output_file.write(input_file.read().format(current_year=year))
| |
"""A generic class to build line-oriented command interpreters.
Interpreters constructed with this class obey the following conventions:
1. End of file on input is processed as the command 'EOF'.
2. A command is parsed out of each line by collecting the prefix composed
of characters in the identchars member.
3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
is passed a single argument consisting of the remainder of the line.
4. Typing an empty line repeats the last command. (Actually, it calls the
method `emptyline', which may be overridden in a subclass.)
5. There is a predefined `help' method. Given an argument `topic', it
calls the command `help_topic'. With no arguments, it lists all topics
with defined help_ functions, broken into up to three topics; documented
commands, miscellaneous help topics, and undocumented commands.
6. The command '?' is a synonym for `help'. The command '!' is a synonym
for `shell', if a do_shell method exists.
7. If completion is enabled, completing commands will be done automatically,
and completing of commands args is done by calling complete_foo() with
arguments text, line, begidx, endidx. text is string we are matching
against, all returned matches must begin with it. line is the current
input line (lstripped), begidx and endidx are the beginning and end
indexes of the text being matched, which could be used to provide
different completion depending upon which position the argument is in.
The `default' method may be overridden to intercept commands for which there
is no do_ method.
The `completedefault' method may be overridden to intercept completions for
commands that have no complete_ method.
The data member `self.ruler' sets the character used to draw separator lines
in the help messages. If empty, no ruler line is drawn. It defaults to "=".
If the value of `self.intro' is nonempty when the cmdloop method is called,
it is printed out on interpreter startup. This value may be overridden
via an optional argument to the cmdloop() method.
The data members `self.doc_header', `self.misc_header', and
`self.undoc_header' set the headers used for the help function's
listings of documented functions, miscellaneous topics, and undocumented
functions respectively.
"""
import string, sys
__all__ = ["Cmd"]
PROMPT = '(Cmd) '
IDENTCHARS = string.ascii_letters + string.digits + '_'
class Cmd:
"""A simple framework for writing line-oriented command interpreters.
These are often useful for test harnesses, administrative tools, and
prototypes that will later be wrapped in a more sophisticated interface.
A Cmd instance or subclass instance is a line-oriented interpreter
framework. There is no good reason to instantiate Cmd itself; rather,
it's useful as a superclass of an interpreter class you define yourself
in order to inherit Cmd's methods and encapsulate action methods.
"""
prompt = PROMPT
identchars = IDENTCHARS
ruler = '='
lastcmd = ''
intro = None
doc_leader = ""
doc_header = "Documented commands (type help <topic>):"
misc_header = "Miscellaneous help topics:"
undoc_header = "Undocumented commands:"
nohelp = "*** No help on %s"
use_rawinput = 1
def __init__(self, completekey='tab', stdin=None, stdout=None):
"""Instantiate a line-oriented interpreter framework.
The optional argument 'completekey' is the readline name of a
completion key; it defaults to the Tab key. If completekey is
not None and the readline module is available, command completion
is done automatically. The optional arguments stdin and stdout
specify alternate input and output file objects; if not specified,
sys.stdin and sys.stdout are used.
"""
import sys
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
self.cmdqueue = []
self.completekey = completekey
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey+": complete")
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = input(self.prompt)
except EOFError:
line = 'EOF'
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line[:-1] # chop \n
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
def precmd(self, line):
"""Hook method executed just before the command line is
interpreted, but after the input prompt is generated and issued.
"""
return line
def postcmd(self, stop, line):
"""Hook method executed just after a command dispatch is finished."""
return stop
def preloop(self):
"""Hook method executed once when the cmdloop() method is called."""
pass
def postloop(self):
"""Hook method executed once when the cmdloop() method is about to
return.
"""
pass
def parseline(self, line):
"""Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed.
"""
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
return None, None, line
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
This may be overridden, but should not normally need to be;
see the precmd() and postcmd() methods for useful execution hooks.
The return value is a flag indicating whether interpretation of
commands by the interpreter should stop.
"""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
def emptyline(self):
"""Called when an empty line is entered in response to the prompt.
If this method is not overridden, it repeats the last nonempty
command entered.
"""
if self.lastcmd:
return self.onecmd(self.lastcmd)
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
If this method is not overridden, it prints an error message and
returns.
"""
self.stdout.write('*** Unknown syntax: %s\n'%line)
def completedefault(self, *ignored):
"""Method called to complete an input line when no command-specific
complete_*() method is available.
By default, it returns an empty list.
"""
return []
def completenames(self, text, *ignored):
dotext = 'do_'+text
return [a[3:] for a in self.get_names() if a.startswith(dotext)]
def complete(self, text, state):
"""Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if state == 0:
import readline
origline = readline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = readline.get_begidx() - stripped
endidx = readline.get_endidx() - stripped
if begidx>0:
cmd, args, foo = self.parseline(line)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd)
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
def get_names(self):
# Inheritance says we have to look in class and
# base classes; order is not important.
names = []
classes = [self.__class__]
while classes:
aclass = classes.pop(0)
if aclass.__bases__:
classes = classes + list(aclass.__bases__)
names = names + dir(aclass)
return names
def complete_help(self, *args):
return self.completenames(*args)
def do_help(self, arg):
if arg:
# XXX check arg syntax
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
return
func()
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
self.stdout.write("%s\n"%str(self.doc_leader))
self.print_topics(self.doc_header, cmds_doc, 15,80)
self.print_topics(self.misc_header, list(help.keys()),15,80)
self.print_topics(self.undoc_header, cmds_undoc, 15,80)
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
self.stdout.write("%s\n"%str(header))
if self.ruler:
self.stdout.write("%s\n"%str(self.ruler * len(header)))
self.columnize(cmds, maxcol-1)
self.stdout.write("\n")
def columnize(self, list, displaywidth=80):
"""Display a list of strings as a compact set of columns.
Each column is only as wide as necessary.
Columns are separated by two spaces (one was not legible enough).
"""
if not list:
self.stdout.write("<empty>\n")
return
nonstrings = [i for i in range(len(list))
if not isinstance(list[i], str)]
if nonstrings:
raise TypeError("list[i] not a string for i in %s"
% ", ".join(map(str, nonstrings)))
size = len(list)
if size == 1:
self.stdout.write('%s\n'%str(list[0]))
return
# Try every row count from 1 upwards
for nrows in range(1, len(list)):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -2
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = row + nrows*col
if i >= size:
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += colwidth + 2
if totwidth > displaywidth:
break
if totwidth <= displaywidth:
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
for row in range(nrows):
texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
else:
x = list[i]
texts.append(x)
while texts and not texts[-1]:
del texts[-1]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
self.stdout.write("%s\n"%str(" ".join(texts)))
| |
#!/usr/bin/env python
#
# WvTest:
# Copyright (C)2007-2012 Versabanq Innovations Inc. and contributors.
# Licensed under the GNU Library General Public License, version 2.
# See the included file named LICENSE for license information.
# You can get wvtest from: http://github.com/apenwarr/wvtest
#
import atexit
import inspect
import os
import re
import sys
import traceback
# NOTE
# Why do we do we need the "!= main" check? Because if you run
# wvtest.py as a main program and it imports your test files, then
# those test files will try to import the wvtest module recursively.
# That actually *works* fine, because we don't run this main program
# when we're imported as a module. But you end up with two separate
# wvtest modules, the one that gets imported, and the one that's the
# main program. Each of them would have duplicated global variables
# (most importantly, wvtest._registered), and so screwy things could
# happen. Thus, we make the main program module *totally* different
# from the imported module. Then we import wvtest (the module) into
# wvtest (the main program) here and make sure to refer to the right
# versions of global variables.
#
# All this is done just so that wvtest.py can be a single file that's
# easy to import into your own applications.
if __name__ != '__main__': # we're imported as a module
_registered = []
_tests = 0
_fails = 0
def wvtest(func, innerfunc=None):
""" Use this decorator (@wvtest) in front of any function you want to
run as part of the unit test suite. Then run:
python wvtest.py path/to/yourtest.py [other test.py files...]
to run all the @wvtest functions in the given file(s).
"""
_registered.append((func, innerfunc or func))
return func
def _result(msg, tb, code):
global _tests, _fails
_tests += 1
if code != 'ok':
_fails += 1
(filename, line, func, text) = tb
filename = os.path.basename(filename)
msg = re.sub(r'\s+', ' ', str(msg))
sys.stderr.flush()
print '! %-70s %s' % ('%s:%-4d %s' % (filename, line, msg),
code)
sys.stdout.flush()
def _check(cond, msg, xdepth):
tb = traceback.extract_stack()[-3 - xdepth]
if cond:
_result(msg, tb, 'ok')
else:
_result(msg, tb, 'FAILED')
return cond
def _code(xdepth):
(filename, line, func, text) = traceback.extract_stack()[-3 - xdepth]
text = re.sub(r'^[\w\.]+\((.*)\)(\s*#.*)?$', r'\1', str(text));
return text
def WVPASS(cond = True, xdepth = 0):
''' Counts a test failure unless cond is true. '''
return _check(cond, _code(xdepth), xdepth)
def WVFAIL(cond = True, xdepth = 0):
''' Counts a test failure unless cond is false. '''
return _check(not cond, 'NOT(%s)' % _code(xdepth), xdepth)
def WVPASSIS(a, b, xdepth = 0):
''' Counts a test failure unless a is b. '''
return _check(a is b, '%s is %s' % (repr(a), repr(b)), xdepth)
def WVPASSISNOT(a, b, xdepth = 0):
''' Counts a test failure unless a is not b. '''
return _check(a is not b, '%s is not %s' % (repr(a), repr(b)), xdepth)
def WVPASSEQ(a, b, xdepth = 0):
''' Counts a test failure unless a == b. '''
return _check(a == b, '%s == %s' % (repr(a), repr(b)), xdepth)
def WVPASSNE(a, b, xdepth = 0):
''' Counts a test failure unless a != b. '''
return _check(a != b, '%s != %s' % (repr(a), repr(b)), xdepth)
def WVPASSLT(a, b, xdepth = 0):
''' Counts a test failure unless a < b. '''
return _check(a < b, '%s < %s' % (repr(a), repr(b)), xdepth)
def WVPASSLE(a, b, xdepth = 0):
''' Counts a test failure unless a <= b. '''
return _check(a <= b, '%s <= %s' % (repr(a), repr(b)), xdepth)
def WVPASSGT(a, b, xdepth = 0):
''' Counts a test failure unless a > b. '''
return _check(a > b, '%s > %s' % (repr(a), repr(b)), xdepth)
def WVPASSGE(a, b, xdepth = 0):
''' Counts a test failure unless a >= b. '''
return _check(a >= b, '%s >= %s' % (repr(a), repr(b)), xdepth)
def WVPASSNEAR(a, b, places = 7, delta = None, xdepth = 0):
''' Counts a test failure unless a ~= b. '''
if delta:
return _check(abs(a - b) <= abs(delta),
'%s ~= %s' % (repr(a), repr(b)), xdepth)
else:
return _check(round(a, places) == round(b, places),
'%s ~= %s' % (repr(a), repr(b)), xdepth)
def WVPASSFAR(a, b, places = 7, delta = None, xdepth = 0):
''' Counts a test failure unless a ~!= b. '''
if delta:
return _check(abs(a - b) > abs(delta),
'%s ~= %s' % (repr(a), repr(b)), xdepth)
else:
return _check(round(a, places) != round(b, places),
'%s ~= %s' % (repr(a), repr(b)), xdepth)
def _except_report(cond, code, xdepth):
return _check(cond, 'EXCEPT(%s)' % code, xdepth + 1)
class _ExceptWrapper(object):
def __init__(self, etype, xdepth):
self.etype = etype
self.xdepth = xdepth
self.code = None
def __enter__(self):
self.code = _code(self.xdepth)
def __exit__(self, etype, value, traceback):
if etype == self.etype:
_except_report(True, self.code, self.xdepth)
return 1 # success, got the expected exception
elif etype is None:
_except_report(False, self.code, self.xdepth)
return 0
else:
_except_report(False, self.code, self.xdepth)
def _WVEXCEPT(etype, xdepth, func, *args, **kwargs):
if func:
code = _code(xdepth + 1)
try:
func(*args, **kwargs)
except etype, e:
return _except_report(True, code, xdepth + 1)
except:
_except_report(False, code, xdepth + 1)
raise
else:
return _except_report(False, code, xdepth + 1)
else:
return _ExceptWrapper(etype, xdepth)
def WVEXCEPT(etype, func=None, *args, **kwargs):
''' Counts a test failure unless func throws an 'etype' exception.
You have to spell out the function name and arguments, rather than
calling the function yourself, so that WVEXCEPT can run before
your test code throws an exception.
'''
return _WVEXCEPT(etype, 0, func, *args, **kwargs)
def _check_unfinished():
if _registered:
for func, innerfunc in _registered:
print 'WARNING: not run: %r' % (innerfunc,)
WVFAIL('wvtest_main() not called')
if _fails:
sys.exit(1)
atexit.register(_check_unfinished)
def _run_in_chdir(path, func, *args, **kwargs):
oldwd = os.getcwd()
oldpath = sys.path
try:
if path: os.chdir(path)
sys.path += [path, os.path.split(path)[0]]
return func(*args, **kwargs)
finally:
os.chdir(oldwd)
sys.path = oldpath
def _runtest(fname, f, innerfunc):
import wvtest as _wvtestmod
mod = inspect.getmodule(innerfunc)
relpath = os.path.relpath(mod.__file__, os.getcwd()).replace('.pyc', '.py')
print
print 'Testing "%s" in %s:' % (fname, relpath)
sys.stdout.flush()
try:
_run_in_chdir(os.path.split(mod.__file__)[0], f)
except Exception, e:
print
print traceback.format_exc()
tb = sys.exc_info()[2]
_wvtestmod._result(repr(e), traceback.extract_tb(tb)[-1], 'EXCEPTION')
def _run_registered_tests():
import wvtest as _wvtestmod
while _wvtestmod._registered:
func, innerfunc = _wvtestmod._registered.pop(0)
_runtest(innerfunc.func_name, func, innerfunc)
print
def wvtest_main(extra_testfiles=[]):
import wvtest as _wvtestmod
_run_registered_tests()
for modname in extra_testfiles:
if not os.path.exists(modname):
print 'Skipping: %s' % modname
continue
if modname.endswith('.py'):
modname = modname[:-3]
print 'Importing: %s' % modname
path, mod = os.path.split(os.path.abspath(modname))
nicename = modname.replace(os.path.sep, '.')
while nicename.startswith('.'):
nicename = modname[1:]
_run_in_chdir(path, __import__, nicename, None, None, [])
_run_registered_tests()
print
print 'WvTest: %d tests, %d failures.' % (_wvtestmod._tests,
_wvtestmod._fails)
if __name__ == '__main__':
import wvtest as _wvtestmod
sys.modules['wvtest'] = _wvtestmod
sys.modules['wvtest.wvtest'] = _wvtestmod
wvtest_main(sys.argv[1:])
| |
# Copyright 2013 OpenStack Foundation
# Copyright 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from manila.common import constants
from manila import exception
from manila import network
from manila.network.neutron import api as neutron_api
from manila.network.neutron import constants as neutron_constants
from manila import utils
neutron_single_network_plugin_opts = [
cfg.StrOpt(
'neutron_net_id',
help="Default Neutron network that will be used for share server "
"creation. This opt is used only with "
"class 'NeutronSingleNetworkPlugin'.",
deprecated_group='DEFAULT'),
cfg.StrOpt(
'neutron_subnet_id',
help="Default Neutron subnet that will be used for share server "
"creation. Should be assigned to network defined in opt "
"'neutron_net_id'. This opt is used only with "
"class 'NeutronSingleNetworkPlugin'.",
deprecated_group='DEFAULT'),
]
CONF = cfg.CONF
class NeutronNetworkPlugin(network.NetworkBaseAPI):
def __init__(self, *args, **kwargs):
db_driver = kwargs.pop('db_driver', None)
super(NeutronNetworkPlugin, self).__init__(db_driver=db_driver)
self._neutron_api = None
self._neutron_api_args = args
self._neutron_api_kwargs = kwargs
@property
@utils.synchronized("instantiate_neutron_api")
def neutron_api(self):
if not self._neutron_api:
self._neutron_api = neutron_api.API(*self._neutron_api_args,
**self._neutron_api_kwargs)
return self._neutron_api
def allocate_network(self, context, share_server, share_network, **kwargs):
"""Allocate network resources using given network information.
Create neutron ports for a given neutron network and subnet,
create manila db records for allocated neutron ports.
:param context: RequestContext object
:param share_network: share network data
:param kwargs: allocations parameters given by the back-end
driver. Supported params:
'count' - how many allocations should be created
'device_owner' - set owner for network allocations
:rtype: list of :class: 'dict'
"""
if not self._has_provider_network_extension():
msg = "%s extension required" % neutron_constants.PROVIDER_NW_EXT
raise exception.NetworkBadConfigurationException(reason=msg)
self._save_neutron_network_data(context, share_network)
self._save_neutron_subnet_data(context, share_network)
allocation_count = kwargs.get('count', 1)
device_owner = kwargs.get('device_owner', 'share')
ports = []
for __ in range(0, allocation_count):
ports.append(self._create_port(context, share_server,
share_network, device_owner))
return ports
def deallocate_network(self, context, share_server_id):
"""Deallocate neutron network resources for the given share server.
Delete previously allocated neutron ports, delete manila db
records for deleted ports.
:param context: RequestContext object
:param share_server_id: id of share server
:rtype: None
"""
ports = self.db.network_allocations_get_for_share_server(
context, share_server_id)
for port in ports:
self._delete_port(context, port)
def _create_port(self, context, share_server, share_network, device_owner):
port = self.neutron_api.create_port(
share_network['project_id'],
network_id=share_network['neutron_net_id'],
subnet_id=share_network['neutron_subnet_id'],
device_owner='manila:' + device_owner)
port_dict = {
'id': port['id'],
'share_server_id': share_server['id'],
'ip_address': port['fixed_ips'][0]['ip_address'],
'mac_address': port['mac_address'],
'status': constants.STATUS_ACTIVE,
}
return self.db.network_allocation_create(context, port_dict)
def _delete_port(self, context, port):
try:
self.neutron_api.delete_port(port['id'])
except exception.NetworkException:
self.db.network_allocation_update(
context, port['id'], {'status': constants.STATUS_ERROR})
raise
else:
self.db.network_allocation_delete(context, port['id'])
def _has_provider_network_extension(self):
extensions = self.neutron_api.list_extensions()
return neutron_constants.PROVIDER_NW_EXT in extensions
def _save_neutron_network_data(self, context, share_network):
net_info = self.neutron_api.get_network(
share_network['neutron_net_id'])
provider_nw_dict = {
'network_type': net_info['provider:network_type'],
'segmentation_id': net_info['provider:segmentation_id']
}
self.db.share_network_update(context,
share_network['id'],
provider_nw_dict)
def _save_neutron_subnet_data(self, context, share_network):
subnet_info = self.neutron_api.get_subnet(
share_network['neutron_subnet_id'])
subnet_values = {
'cidr': subnet_info['cidr'],
'ip_version': subnet_info['ip_version']
}
self.db.share_network_update(context,
share_network['id'],
subnet_values)
class NeutronSingleNetworkPlugin(NeutronNetworkPlugin):
def __init__(self, *args, **kwargs):
super(NeutronSingleNetworkPlugin, self).__init__(*args, **kwargs)
CONF.register_opts(
neutron_single_network_plugin_opts,
group=self.neutron_api.config_group_name)
self.net = self.neutron_api.configuration.neutron_net_id
self.subnet = self.neutron_api.configuration.neutron_subnet_id
self._verify_net_and_subnet()
def allocate_network(self, context, share_server, share_network, **kwargs):
share_network = self._update_share_network_net_data(
context, share_network)
super(NeutronSingleNetworkPlugin, self).allocate_network(
context, share_server, share_network, **kwargs)
def _verify_net_and_subnet(self):
data = dict(net=self.net, subnet=self.subnet)
if self.net and self.subnet:
net = self.neutron_api.get_network(self.net)
if not (net.get('subnets') and data['subnet'] in net['subnets']):
raise exception.NetworkBadConfigurationException(
"Subnet '%(subnet)s' does not belong to "
"network '%(net)s'." % data)
else:
raise exception.NetworkBadConfigurationException(
"Neutron net and subnet are expected to be both set. "
"Got: net=%(net)s and subnet=%(subnet)s." % data)
def _update_share_network_net_data(self, context, share_network):
upd = dict()
if share_network.get('nova_net_id') is not None:
raise exception.NetworkBadConfigurationException(
"Share network has nova_net_id set.")
if not share_network.get('neutron_net_id') == self.net:
if share_network.get('neutron_net_id') is not None:
raise exception.NetworkBadConfigurationException(
"Using neutron net id different from None or value "
"specified in the config is forbidden for "
"NeutronSingleNetworkPlugin. Allowed values: (%(net)s, "
"None), received value: %(err)s" % {
"net": self.net,
"err": share_network.get('neutron_net_id')})
upd['neutron_net_id'] = self.net
if not share_network.get('neutron_subnet_id') == self.subnet:
if share_network.get('neutron_subnet_id') is not None:
raise exception.NetworkBadConfigurationException(
"Using neutron subnet id different from None or value "
"specified in the config is forbidden for "
"NeutronSingleNetworkPlugin. Allowed values: (%(snet)s, "
"None), received value: %(err)s" % {
"snet": self.subnet,
"err": share_network.get('neutron_subnet_id')})
upd['neutron_subnet_id'] = self.subnet
if upd:
share_network = self.db.share_network_update(
context, share_network['id'], upd)
return share_network
| |
#!/usr/bin/env python3
import argparse
from datetime import datetime
from dateutil import tz
import json
import sys
import requests
class Updater(object):
def __init__(self, server, year):
self._server = server
self._year = year
self._current_round = 0
self._teams = {}
self._matchups = {}
self.load()
def run(self):
if self._current_round == 0:
self._teams = self.get_teams()
self._matchups = self.create_matchups_tree()
if len(list(self._teams.values())) > 0:
standings = self.get_standings(list(self._teams.values()))
self.create_matchups(standings)
if self.is_season_finished():
print('Playoff starting')
self._current_round = 1
self.store()
else:
print("Storing prelim results")
self._current_round = 0
self.store()
else:
self.update_matchups()
self.store()
def is_season_finished(self):
for team in list(self._teams.values()):
remaining = 82 - team['standings']['gamesPlayed']
if remaining > 0:
return False
return True
def create_matchups(self, standings):
ealeader = standings['Eastern']['Atlantic'][0]
emleader = standings['Eastern']['Metropolitan'][0]
wcleader = standings['Western']['Central'][0]
wpleader = standings['Western']['Pacific'][0]
for team in standings['Eastern']['teams']:
if int(team['standings']['wildCardRank']) == 1:
e1wild = team
if int(team['standings']['wildCardRank']) == 2:
e2wild = team
for team in standings['Western']['teams']:
if int(team['standings']['wildCardRank']) == 1:
w1wild = team
if int(team['standings']['wildCardRank']) == 2:
w2wild = team
a1 = self._matchups['a1']
m1 = self._matchups['m1']
self.update_matchup(a1, ealeader['info']['id'])
self.update_matchup(m1, emleader['info']['id'])
if int(ealeader['standings']['conferenceRank']) < int(emleader['standings']['conferenceRank']):
self.update_matchup(a1, e2wild['info']['id'])
self.update_matchup(m1, e1wild['info']['id'])
else:
self.update_matchup(a1, e1wild['info']['id'])
self.update_matchup(m1, e2wild['info']['id'])
a2 = self._matchups['a2']
self.update_matchup(a2, standings['Eastern']['Atlantic'][1]['info']['id'], standings['Eastern']['Atlantic'][2]['info']['id'])
m2 = self._matchups['m2']
self.update_matchup(m2, standings['Eastern']['Metropolitan'][1]['info']['id'], standings['Eastern']['Metropolitan'][2]['info']['id'])
c1 = self._matchups['c1']
p1 = self._matchups['p1']
self.update_matchup(c1, wcleader['info']['id'])
self.update_matchup(p1, wpleader['info']['id'])
if int(wcleader['standings']['conferenceRank']) < int(wpleader['standings']['conferenceRank']):
self.update_matchup(c1, w2wild['info']['id'])
self.update_matchup(p1, w1wild['info']['id'])
else:
self.update_matchup(c1, w1wild['info']['id'])
self.update_matchup(p1, w2wild['info']['id'])
c2 = self._matchups['c2']
self.update_matchup(c2, standings['Western']['Central'][1]['info']['id'], standings['Western']['Central'][2]['info']['id'])
p2 = self._matchups['p2']
self.update_matchup(p2, standings['Western']['Pacific'][1]['info']['id'], standings['Western']['Pacific'][2]['info']['id'])
def set_matchup_childs(self, matchup, right, left):
matchup['left'] = left
matchup['right'] = right
def create_matchup(self, id, round, next):
matchup = {'id': id, 'home': 0, 'away': 0, 'round': round, 'start': '', 'result': {}, 'schedule': [], 'season': {}, 'next': next}
matchup['left'] = None
matchup['right'] = None
matchup['result'] = {'home_win': 0, 'away_win': 0}
return matchup
def create_matchups_tree(self):
matchups = {}
sc = self.create_matchup('sc', 4, None)
matchups[sc['id']] = sc
e = self.create_matchup('e', 3, sc)
w = self.create_matchup('w', 3, sc)
matchups[e['id']] = e
matchups[w['id']] = w
a = self.create_matchup('a', 2, e)
m = self.create_matchup('m', 2, e)
c = self.create_matchup('c', 2, w)
p = self.create_matchup('p', 2, w)
matchups[a['id']] = a
matchups[m['id']] = m
matchups[c['id']] = c
matchups[p['id']] = p
a1 = self.create_matchup('a1', 1, a)
a2 = self.create_matchup('a2', 1, a)
m1 = self.create_matchup('m1', 1, m)
m2 = self.create_matchup('m2', 1, m)
c1 = self.create_matchup('c1', 1, c)
c2 = self.create_matchup('c2', 1, c)
p1 = self.create_matchup('p1', 1, p)
p2 = self.create_matchup('p2', 1, p)
matchups[a1['id']] = a1
matchups[a2['id']] = a2
matchups[m1['id']] = m1
matchups[m2['id']] = m2
matchups[c1['id']] = c1
matchups[c2['id']] = c2
matchups[p1['id']] = p1
matchups[p2['id']] = p2
# build tree
self.set_matchup_childs(sc, e, w)
self.set_matchup_childs(w, p, c)
self.set_matchup_childs(e, m, a)
self.set_matchup_childs(c, c2, c1)
self.set_matchup_childs(p, p2, p1)
self.set_matchup_childs(a, a2, a1)
self.set_matchup_childs(m, m2, m1)
return matchups
def get_live_result(self, link):
url = 'https://statsapi.web.nhl.com' + link
live = requests.get(url).json()
return live
def get_team(self, id):
url = 'https://statsapi.web.nhl.com/api/v1/teams/' + str(id)
team = requests.get(url).json()
return team['teams'][0]
def get_teams(self):
ystr = str(self._year) + str(self._year + 1)
url = 'https://statsapi.web.nhl.com/api/v1/standings?season=' + ystr
standings = requests.get(url).json()
teams = {}
for record in standings["records"]:
for team in record['teamRecords']:
info = self.get_team(team['team']['id'])
team_record = {'info': info, 'standings': team, 'schedule': []}
teams[team['team']['id']] = team_record
return teams
def get_standings(self, teams):
standings = {'Eastern': {'Atlantic': [], 'Metropolitan': [], 'teams': []},
'Western': {'Central': [], 'Pacific': [], 'teams': []},
'teams': []}
league = sorted(teams, key=lambda k: int(k['standings']['divisionRank']))
for team in league:
standings['teams'].append(team)
standings[team['info']['conference']['name']]['teams'].append(team)
standings[team['info']['conference']['name']][team['info']['division']['name']].append(team)
standings['teams'] = sorted(standings['teams'], key=lambda k: int(k['standings']['leagueRank']))
standings['Eastern']['teams'] = sorted(standings['Eastern']['teams'], key=lambda k: int(k['standings']['conferenceRank']))
standings['Western']['teams'] = sorted(standings['Western']['teams'], key=lambda k: int(k['standings']['conferenceRank']))
standings['Eastern']['Atlantic'] = sorted(standings['Eastern']['Atlantic'], key=lambda k: int(k['standings']['divisionRank']))
standings['Eastern']['Metropolitan'] = sorted(standings['Eastern']['Metropolitan'], key=lambda k: int(k['standings']['divisionRank']))
standings['Western']['Central'] = sorted(standings['Western']['Central'], key=lambda k: int(k['standings']['divisionRank']))
standings['Western']['Pacific'] = sorted(standings['Western']['Pacific'], key=lambda k: int(k['standings']['divisionRank']))
return standings
def parse_time(self, timestamp):
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('America/New_York')
utc = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
utc = utc.replace(tzinfo=from_zone)
return utc.astimezone(to_zone)
def get_matchup_schedule(self, matchup, schedules=None):
home_id = matchup['home']
away_id = matchup['away']
result = []
s = schedules
if schedules is None:
s = self.get_playoff_schedule(int(home_id))
if 'dates' in s:
for date in s['dates']:
game = date['games'][0]
game_home_id = game['teams']['home']['team']['id']
game_away_id = game['teams']['away']['team']['id']
if game['gameType'] == 'P':
if game_home_id == away_id or game_away_id == away_id:
result.append(game)
else:
print('No date in get matchup')
result = sorted(result, key=lambda k: self.parse_time(k['gameDate']))
return result
def get_matchup_start(self, matchup):
if len(matchup['schedule']) == 0:
return ''
return matchup['schedule'][0]['gameDate']
def get_matchup_season_result(self, home, away):
result = {'home_win': 0, 'away_win': 0, 'matchs': []}
schedule = self._teams[home]['schedule']
if len(schedule) == 0:
schedule = self.get_schedule(home)
# self._teams[home]['schedule'] = schedule
if 'dates' in schedule:
for date in schedule['dates']:
game = date['games'][0]
game_home_id = game['teams']['home']['team']['id']
game_away_id = game['teams']['away']['team']['id']
if game_home_id == away:
print(game['gameDate'], game['teams']['away']['score'], game['teams']['home']['score'])
if int(game['teams']['home']['score']) > int(game['teams']['away']['score']):
result['away_win'] = result['away_win'] + 1
elif int(game['teams']['home']['score']) < int(game['teams']['away']['score']):
result['home_win'] = result['home_win'] + 1
result['matchs'].append({'home': int(game['teams']['away']['score']), 'away': int(game['teams']['home']['score'])})
if game_away_id == away:
print(game['gameDate'], game['teams']['home']['score'], game['teams']['away']['score'])
if int(game['teams']['home']['score']) > int(game['teams']['away']['score']):
result['home_win'] = result['home_win'] + 1
elif int(game['teams']['home']['score']) < int(game['teams']['away']['score']):
result['away_win'] = result['away_win'] + 1
result['matchs'].append({'home': int(game['teams']['home']['score']), 'away': int(game['teams']['away']['score'])})
else:
print('No date in get matchup season')
return result
def get_matchup_result(self, matchup):
# statuscode = {}
# statuscode[1] = 'Scheduled'
# statuscode[2] = 'Pre-Game'
# statuscode[3] = 'In Progress'
# statuscode[4] = 'In Progress - Critical'
# statuscode[5] = 'Game Over'
# statuscode[6] = 'Final'
# statuscode[7] = 'Final'
result = {}
# home_id = matchup['home']
away_id = matchup['away']
home_win = 0
away_win = 0
for game in matchup['schedule']:
game_home_id = game['teams']['home']['team']['id']
game_away_id = game['teams']['away']['team']['id']
if game['gameType'] == 'P':
if game_home_id == away_id or game_away_id == away_id:
if game_home_id == away_id: # reverse
away_score = game['teams']['home']['score']
home_score = game['teams']['away']['score']
away_shots = game['linescore']['teams']['home']['shotsOnGoal']
home_shots = game['linescore']['teams']['away']['shotsOnGoal']
else:
away_score = game['teams']['away']['score']
home_score = game['teams']['home']['score']
away_shots = game['linescore']['teams']['away']['shotsOnGoal']
home_shots = game['linescore']['teams']['home']['shotsOnGoal']
if int(game['status']['statusCode']) == 7:
if home_score > away_score:
home_win = home_win + 1
elif home_score < away_score:
away_win = away_win + 1
elif int(game['status']['statusCode']) in [3, 4, 5, 6]:
hi = self._teams[matchup['home']]
ai = self._teams[matchup['away']]
# period = game['linescore']['currentPeriod']
result = self.get_live_result(game['link'])
if game_home_id == away_id:
away_stats = result['liveData']['boxscore']['teams']['home']['teamStats']['teamSkaterStats']
home_stats = result['liveData']['boxscore']['teams']['away']['teamStats']['teamSkaterStats']
else:
away_stats = result['liveData']['boxscore']['teams']['away']['teamStats']['teamSkaterStats']
home_stats = result['liveData']['boxscore']['teams']['home']['teamStats']['teamSkaterStats']
period = game['linescore']['currentPeriodOrdinal']
rtime = game['linescore']['currentPeriodTimeRemaining']
print("Game {status} \033[0;94m{h}\033[0m {hsc}-{asc} \033[0;94m{a}\033[0m - {t} of {p} - Shots:\033[0;94m{h}\033[0m {hsh}-{ash} \033[0;94m{a}\033[0m - Faceoff:\033[0;94m{h}\033[0m {hf}-{af} \033[0;94m{a}\033[0m".format(hf=home_stats['faceOffWinPercentage'], af=away_stats['faceOffWinPercentage'], status=game['status']['detailedState'], h=hi['info']['abbreviation'], hsc=home_score, asc=away_score, a=ai['info']['abbreviation'], p=period, t=rtime, hsh=home_shots, ash=away_shots))
result['home_win'] = home_win
result['away_win'] = away_win
return result
def is_matchup_finished(self, matchup):
return matchup['result']['home_win'] == 4 or matchup['result']['away_win'] == 4
def get_matchup_winner(self, matchup):
if matchup['result']['home_win'] == 4:
return matchup['home']
if matchup['result']['away_win'] == 4:
return matchup['away']
return 0
def update_matchup(self, matchup, home=0, away=0):
if self.is_matchup_finished(matchup):
return
if matchup['home'] != 0 and matchup['away'] != 0:
# update result and maybe pass to next stage
matchup['schedule'] = self.get_matchup_schedule(matchup)
if matchup['start'] == '':
matchup['start'] = self.get_matchup_start(matchup)
matchup['result'] = self.get_matchup_result(matchup)
if self.is_matchup_finished(matchup) and matchup['next'] is not None:
print('Finished', matchup['id'])
self.update_matchup(matchup['next'], self.get_matchup_winner(matchup))
else:
if matchup['home'] == 0:
matchup['home'] = home
matchup['away'] = away
else:
matchup['away'] = home
if matchup['home'] != 0 and matchup['away'] != 0:
# Begin matchup
hi = self._teams[matchup['home']]
ai = self._teams[matchup['away']]
if int(hi['standings']['leagueRank']) > int(ai['standings']['leagueRank']):
matchup['home'] = ai['info']['id']
matchup['away'] = hi['info']['id']
hi = self._teams[matchup['home']]
ai = self._teams[matchup['away']]
matchup['season'] = self.get_matchup_season_result(matchup['home'], matchup['away'])
matchup['schedule'] = self.get_matchup_schedule(matchup)
matchup['start'] = self.get_matchup_start(matchup)
def update_matchups(self):
ms = list(self._matchups.values())
ms = sorted(ms, key=lambda k: k['round'])
for matchup in ms:
self.update_matchup(matchup)
def get_schedule(self, team):
# print('Get schedule for ' + str(team))
url = 'https://statsapi.web.nhl.com/api/v1/schedule?startDate=' + str(self._year) + '-10-01&endDate=' + str(self._year + 1) + '-05-29&expand=schedule.teams,schedule.linescore,schedule.broadcasts,schedule.ticket,schedule.game.content.media.epg&leaderCategories=&site=en_nhlCA&teamId=' + str(team)
team_schedule = requests.get(url)
return team_schedule.json()
def get_playoff_schedule(self, team):
url = 'https://statsapi.web.nhl.com/api/v1/schedule?startDate=' + str(self._year + 1) + '-04-01&endDate=' + str(self._year + 1) + '-06-15&expand=schedule.teams,schedule.linescore,&site=en_nhlCA&teamId=' + str(team)
# print(url)
team_schedule = requests.get(url)
return team_schedule.json()
def fetch_data(self):
data = requests.get('http://' + self._server + '/nhlplayoffs/api/v3.0/' + str(self._year) + '/data').json()
return data
def update_data(self, data):
url = 'http://' + self._server + '/nhlplayoffs/api/v3.0/' + str(self._year) + '/data'
headers = {'content-type': 'application/json'}
requests.post(url, data=json.dumps(data), headers=headers)
def build_matchup_tree(self, raw_matchups):
matchups = {}
for matchup_raw in list(raw_matchups.values()):
matchup = matchup_raw.copy()
matchups[matchup['id']] = matchup
for matchup in list(matchups.values()):
next = matchup['next']
right = matchup['right']
left = matchup['left']
if next in raw_matchups:
matchup['next'] = matchups[next]
if right in raw_matchups:
matchup['right'] = matchups[right]
if left in raw_matchups:
matchup['left'] = matchups[left]
return matchups
def store_matchup_tree(self, matchups):
raw_matchups = {}
for matchup in list(matchups.values()):
raw_matchup = matchup.copy()
if matchup['next'] is not None:
raw_matchup['next'] = matchup['next']['id']
if matchup['right'] is not None:
raw_matchup['right'] = matchup['right']['id']
if matchup['left'] is not None:
raw_matchup['left'] = matchup['left']['id']
raw_matchups[raw_matchup['id']] = raw_matchup
return raw_matchups
def load(self):
data = self.fetch_data()
self._teams = {}
for m in data['teams'].items():
self._teams[int(m[0])] = m[1]
self._current_round = data['current_round']
self._matchups = self.build_matchup_tree(data['matchups'])
def store(self):
data = {}
data['teams'] = self._teams
data['current_round'] = self._current_round
data['matchups'] = self.store_matchup_tree(self._matchups)
self.update_data(data)
def display(self):
nb_round = 4
width = (nb_round * 2) - 1
heigh = (2**(nb_round - 1)) - 1
display = [['' for x in range(width)] for y in range(heigh)]
def walk_matchup_tree(root, x, y, dx):
display[x][y] = root['id']
if root['left'] is not None:
walk_matchup_tree(root['left'], x + dx, y - (root['round'] - 1), dx)
if root['right'] is not None:
walk_matchup_tree(root['right'], x + dx, y + (root['round'] - 1), dx)
display[3][2] = 'sc'
walk_matchup_tree(self._matchups['w'], 2, 3, -1)
walk_matchup_tree(self._matchups['e'], 4, 3, 1)
for y in range(7):
for x in range(7):
id = display[x][y]
if id != '':
matchup = self._matchups[id]
if matchup['home'] == 0:
sys.stdout.write('{0:15}'.format(id))
else:
home = self._teams[matchup['home']]['info']['abbreviation']
away = '?'
if matchup['away'] != 0:
away = self._teams[matchup['away']]['info']['abbreviation']
sys.stdout.write('\033[0;94m{0:3}\033[0m-{2} vs {3}-\033[0;94m{1:3}\033[0m'.format(home, away, matchup['result']['home_win'], matchup['result']['away_win']))
else:
sys.stdout.write('{0:15}'.format(id))
sys.stdout.write('\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Update the nhlpool database')
parser.add_argument('-y', '--year', metavar='year', default='2019', nargs='?',
help='The year to work with')
parser.add_argument('-s', '--server', metavar='server', default='debug', nargs='?',
help='The server to use')
args = parser.parse_args()
if args.server == 'prod':
print('Using production server')
server = 'nhlpool.roblab.net/'
else:
print('Using debug server')
server = 'localhost:5000'
upd = Updater(server, int(args.year))
upd.run()
upd.display()
| |
""""
Folium Features Tests
---------------------
"""
import os
import warnings
from branca.element import Element
import folium
from folium import Map, Popup, GeoJson, ClickForMarker
import pytest
@pytest.fixture
def tmpl():
yield ("""
<!DOCTYPE html>
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
</head>
<body>
</body>
<script>
</script>
""") # noqa
# Root path variable
rootpath = os.path.abspath(os.path.dirname(__file__))
# Figure
def test_figure_creation():
f = folium.Figure()
assert isinstance(f, Element)
bounds = f.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_figure_rendering():
f = folium.Figure()
out = f.render()
assert type(out) is str
bounds = f.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_figure_html(tmpl):
f = folium.Figure()
out = f.render()
out = os.linesep.join([s.strip() for s in out.splitlines() if s.strip()])
tmpl = os.linesep.join([s.strip() for s in tmpl.splitlines() if s.strip()])
assert out == tmpl, '\n' + out + '\n' + '-' * 80 + '\n' + tmpl
bounds = f.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_figure_double_rendering():
f = folium.Figure()
out = f.render()
out2 = f.render()
assert out == out2
bounds = f.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_marker_popups():
m = Map()
folium.Marker([45, -180], popup='-180').add_to(m)
folium.Marker([45, -120], popup=Popup('-120')).add_to(m)
folium.RegularPolygonMarker([45, -60], popup='-60').add_to(m)
folium.RegularPolygonMarker([45, 0], popup=Popup('0')).add_to(m)
folium.CircleMarker([45, 60], popup='60').add_to(m)
folium.CircleMarker([45, 120], popup=Popup('120')).add_to(m)
folium.CircleMarker([45, 90], popup=Popup('90'), weight=0).add_to(m)
m._repr_html_()
bounds = m.get_bounds()
assert bounds == [[45, -180], [45, 120]], bounds
# DivIcon.
def test_divicon():
html = """<svg height="100" width="100">
<circle cx="50" cy="50" r="40" stroke="black" stroke-width="3" fill="red" />
</svg>""" # noqa
div = folium.DivIcon(html=html)
assert isinstance(div, Element)
assert div.options['className'] == 'empty'
assert div.options['html'] == html
# ColorLine.
def test_color_line():
m = Map([22.5, 22.5], zoom_start=3)
color_line = folium.ColorLine(
[[0, 0], [0, 45], [45, 45], [45, 0], [0, 0]],
[0, 1, 2, 3],
colormap=['b', 'g', 'y', 'r'],
nb_steps=4,
weight=10,
opacity=1)
m.add_child(color_line)
m._repr_html_()
def test_get_vegalite_major_version():
spec_v2 = {'$schema': 'https://vega.github.io/schema/vega-lite/v2.6.0.json',
'config': {'view': {'height': 300, 'width': 400}},
'data': {'name': 'data-aac17e868e23f98b5e0830d45504be45'},
'datasets': {'data-aac17e868e23f98b5e0830d45504be45': [{'folium usage': 0,
'happiness': 1.0},
{'folium usage': 1,
'happiness': 2.718281828459045},
{'folium usage': 2,
'happiness': 7.38905609893065},
{'folium usage': 3,
'happiness': 20.085536923187668},
{'folium usage': 4,
'happiness': 54.598150033144236},
{'folium usage': 5,
'happiness': 148.4131591025766},
{'folium usage': 6,
'happiness': 403.4287934927351},
{'folium usage': 7,
'happiness': 1096.6331584284585},
{'folium usage': 8,
'happiness': 2980.9579870417283},
{'folium usage': 9,
'happiness': 8103.083927575384}]},
'encoding': {'x': {'field': 'folium usage', 'type': 'quantitative'},
'y': {'field': 'happiness', 'type': 'quantitative'}},
'mark': 'point'}
vegalite_v2 = folium.features.VegaLite(spec_v2)
assert vegalite_v2._get_vegalite_major_versions(spec_v2) == '2'
spec_v1 = {'$schema': 'https://vega.github.io/schema/vega-lite/v1.3.1.json',
'data': {'values': [{'folium usage': 0, 'happiness': 1.0},
{'folium usage': 1, 'happiness': 2.718281828459045},
{'folium usage': 2, 'happiness': 7.38905609893065},
{'folium usage': 3, 'happiness': 20.085536923187668},
{'folium usage': 4, 'happiness': 54.598150033144236},
{'folium usage': 5, 'happiness': 148.4131591025766},
{'folium usage': 6, 'happiness': 403.4287934927351},
{'folium usage': 7, 'happiness': 1096.6331584284585},
{'folium usage': 8, 'happiness': 2980.9579870417283},
{'folium usage': 9, 'happiness': 8103.083927575384}]},
'encoding': {'x': {'field': 'folium usage', 'type': 'quantitative'},
'y': {'field': 'happiness', 'type': 'quantitative'}},
'height': 300,
'mark': 'point',
'width': 400}
vegalite_v1 = folium.features.VegaLite(spec_v1)
assert vegalite_v1._get_vegalite_major_versions(spec_v1) == '1'
spec_no_version = {
'config': {
'view': {'height': 300, 'width': 400}},
'data': {'name': 'data-aac17e868e23f98b5e0830d45504be45'},
'datasets': {
'data-aac17e868e23f98b5e0830d45504be45': [
{'folium usage': 0,
'happiness': 1.0},
{'folium usage': 1,
'happiness': 2.718281828459045},
{'folium usage': 2,
'happiness': 7.38905609893065},
{'folium usage': 3,
'happiness': 20.085536923187668},
{'folium usage': 4,
'happiness': 54.598150033144236},
{'folium usage': 5,
'happiness': 148.4131591025766},
{'folium usage': 6,
'happiness': 403.4287934927351},
{'folium usage': 7,
'happiness': 1096.6331584284585},
{'folium usage': 8,
'happiness': 2980.9579870417283},
{'folium usage': 9,
'happiness': 8103.083927575384}]},
'encoding': {'x': {'field': 'folium usage', 'type': 'quantitative'},
'y': {'field': 'happiness', 'type': 'quantitative'}},
'mark': 'point'
}
vegalite_no_version = folium.features.VegaLite(spec_no_version)
assert vegalite_no_version._get_vegalite_major_versions(spec_no_version) is None
# GeoJsonTooltip GeometryCollection
def test_geojson_tooltip():
m = folium.Map([30.5, -97.5], zoom_start=10)
folium.GeoJson(os.path.join(rootpath, 'kuntarajat.geojson'),
tooltip=folium.GeoJsonTooltip(fields=['code', 'name'])
).add_to(m)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
m._repr_html_()
assert issubclass(w[-1].category, UserWarning), 'GeoJsonTooltip GeometryCollection test failed.'
# GeoJsonMarker type validation.
def test_geojson_marker():
m = folium.Map([30.4, -97.5], zoom_start=10)
with pytest.raises(TypeError):
folium.GeoJson(
os.path.join(rootpath, 'subwaystations.geojson'),
marker=ClickForMarker()
).add_to(m)
def test_geojson_find_identifier():
def _create(*properties):
return {"type": "FeatureCollection", "features": [
{"type": "Feature", "properties": item}
for item in properties]}
def _assert_id_got_added(data):
_geojson = GeoJson(data)
assert _geojson.find_identifier() == 'feature.id'
assert _geojson.data['features'][0]['id'] == '0'
data_with_id = _create(None, None)
data_with_id['features'][0]['id'] = 'this-is-an-id'
data_with_id['features'][1]['id'] = 'this-is-another-id'
geojson = GeoJson(data_with_id)
assert geojson.find_identifier() == 'feature.id'
assert geojson.data['features'][0]['id'] == 'this-is-an-id'
data_with_unique_properties = _create(
{'property-key': 'some-value'},
{'property-key': 'another-value'},
)
geojson = GeoJson(data_with_unique_properties)
assert geojson.find_identifier() == 'feature.properties.property-key'
data_with_unique_properties = _create(
{'property-key': 42},
{'property-key': 43},
{'property-key': 'or a string'},
)
geojson = GeoJson(data_with_unique_properties)
assert geojson.find_identifier() == 'feature.properties.property-key'
# The test cases below have no id field or unique property,
# so an id will be added to the data.
data_with_identical_ids = _create(None, None)
data_with_identical_ids['features'][0]['id'] = 'identical-ids'
data_with_identical_ids['features'][1]['id'] = 'identical-ids'
_assert_id_got_added(data_with_identical_ids)
data_with_some_missing_ids = _create(None, None)
data_with_some_missing_ids['features'][0]['id'] = 'this-is-an-id'
# the second feature doesn't have an id
_assert_id_got_added(data_with_some_missing_ids)
data_with_identical_properties = _create(
{'property-key': 'identical-value'},
{'property-key': 'identical-value'},
)
_assert_id_got_added(data_with_identical_properties)
data_bare = _create(None)
_assert_id_got_added(data_bare)
data_empty_dict = _create({})
_assert_id_got_added(data_empty_dict)
data_without_properties = _create(None)
del data_without_properties['features'][0]['properties']
_assert_id_got_added(data_without_properties)
data_some_without_properties = _create({'key': 'value'}, 'will be deleted')
# the first feature has properties, but the second doesn't
del data_some_without_properties['features'][1]['properties']
_assert_id_got_added(data_some_without_properties)
data_with_nested_properties = _create({
"summary": {"distance": 343.2},
"way_points": [3, 5],
})
_assert_id_got_added(data_with_nested_properties)
data_with_incompatible_properties = _create({
"summary": {"distances": [0, 6], "durations": None},
"way_points": [3, 5],
})
_assert_id_got_added(data_with_incompatible_properties)
data_loose_geometry = {"type": "LineString", "coordinates": [
[3.961389, 43.583333], [3.968056, 43.580833], [3.974722, 43.578333],
[3.986389, 43.575278], [3.998333, 43.5725], [4.163333, 43.530556],
]}
geojson = GeoJson(data_loose_geometry)
geojson.convert_to_feature_collection()
assert geojson.find_identifier() == 'feature.id'
assert geojson.data['features'][0]['id'] == '0'
| |
from warnings import warn
import numpy as np
from numpy import (atleast_2d, ComplexWarning, arange, zeros_like, imag, diag,
iscomplexobj, tril, triu, argsort, empty_like)
from .decomp import _asarray_validated
from .lapack import get_lapack_funcs, _compute_lwork
__all__ = ['ldl']
def ldl(A, lower=True, hermitian=True, overwrite_a=False, check_finite=True):
""" Computes the LDLt or Bunch-Kaufman factorization of a symmetric/
hermitian matrix.
This function returns a block diagonal matrix D consisting blocks of size
at most 2x2 and also a possibly permuted unit lower triangular matrix
``L`` such that the factorization ``A = L D L^H`` or ``A = L D L^T``
holds. If ``lower`` is False then (again possibly permuted) upper
triangular matrices are returned as outer factors.
The permutation array can be used to triangularize the outer factors
simply by a row shuffle, i.e., ``lu[perm, :]`` is an upper/lower
triangular matrix. This is also equivalent to multiplication with a
permutation matrix ``P.dot(lu)``, where ``P`` is a column-permuted
identity matrix ``I[:, perm]``.
Depending on the value of the boolean ``lower``, only upper or lower
triangular part of the input array is referenced. Hence, a triangular
matrix on entry would give the same result as if the full matrix is
supplied.
Parameters
----------
a : array_like
Square input array
lower : bool, optional
This switches between the lower and upper triangular outer factors of
the factorization. Lower triangular (``lower=True``) is the default.
hermitian : bool, optional
For complex-valued arrays, this defines whether ``a = a.conj().T`` or
``a = a.T`` is assumed. For real-valued arrays, this switch has no
effect.
overwrite_a : bool, optional
Allow overwriting data in ``a`` (may enhance performance). The default
is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
lu : ndarray
The (possibly) permuted upper/lower triangular outer factor of the
factorization.
d : ndarray
The block diagonal multiplier of the factorization.
perm : ndarray
The row-permutation index array that brings lu into triangular form.
Raises
------
ValueError
If input array is not square.
ComplexWarning
If a complex-valued array with nonzero imaginary parts on the
diagonal is given and hermitian is set to True.
Examples
--------
Given an upper triangular array `a` that represents the full symmetric
array with its entries, obtain `l`, 'd' and the permutation vector `perm`:
>>> import numpy as np
>>> from scipy.linalg import ldl
>>> a = np.array([[2, -1, 3], [0, 2, 0], [0, 0, 1]])
>>> lu, d, perm = ldl(a, lower=0) # Use the upper part
>>> lu
array([[ 0. , 0. , 1. ],
[ 0. , 1. , -0.5],
[ 1. , 1. , 1.5]])
>>> d
array([[-5. , 0. , 0. ],
[ 0. , 1.5, 0. ],
[ 0. , 0. , 2. ]])
>>> perm
array([2, 1, 0])
>>> lu[perm, :]
array([[ 1. , 1. , 1.5],
[ 0. , 1. , -0.5],
[ 0. , 0. , 1. ]])
>>> lu.dot(d).dot(lu.T)
array([[ 2., -1., 3.],
[-1., 2., 0.],
[ 3., 0., 1.]])
Notes
-----
This function uses ``?SYTRF`` routines for symmetric matrices and
``?HETRF`` routines for Hermitian matrices from LAPACK. See [1]_ for
the algorithm details.
Depending on the ``lower`` keyword value, only lower or upper triangular
part of the input array is referenced. Moreover, this keyword also defines
the structure of the outer factors of the factorization.
.. versionadded:: 1.1.0
See also
--------
cholesky, lu
References
----------
.. [1] J.R. Bunch, L. Kaufman, Some stable methods for calculating
inertia and solving symmetric linear systems, Math. Comput. Vol.31,
1977. :doi:`10.2307/2005787`
"""
a = atleast_2d(_asarray_validated(A, check_finite=check_finite))
if a.shape[0] != a.shape[1]:
raise ValueError('The input array "a" should be square.')
# Return empty arrays for empty square input
if a.size == 0:
return empty_like(a), empty_like(a), np.array([], dtype=int)
n = a.shape[0]
r_or_c = complex if iscomplexobj(a) else float
# Get the LAPACK routine
if r_or_c is complex and hermitian:
s, sl = 'hetrf', 'hetrf_lwork'
if np.any(imag(diag(a))):
warn('scipy.linalg.ldl():\nThe imaginary parts of the diagonal'
'are ignored. Use "hermitian=False" for factorization of'
'complex symmetric arrays.', ComplexWarning, stacklevel=2)
else:
s, sl = 'sytrf', 'sytrf_lwork'
solver, solver_lwork = get_lapack_funcs((s, sl), (a,))
lwork = _compute_lwork(solver_lwork, n, lower=lower)
ldu, piv, info = solver(a, lwork=lwork, lower=lower,
overwrite_a=overwrite_a)
if info < 0:
raise ValueError('{} exited with the internal error "illegal value '
'in argument number {}". See LAPACK documentation '
'for the error codes.'.format(s.upper(), -info))
swap_arr, pivot_arr = _ldl_sanitize_ipiv(piv, lower=lower)
d, lu = _ldl_get_d_and_l(ldu, pivot_arr, lower=lower, hermitian=hermitian)
lu, perm = _ldl_construct_tri_factor(lu, swap_arr, pivot_arr, lower=lower)
return lu, d, perm
def _ldl_sanitize_ipiv(a, lower=True):
"""
This helper function takes the rather strangely encoded permutation array
returned by the LAPACK routines ?(HE/SY)TRF and converts it into
regularized permutation and diagonal pivot size format.
Since FORTRAN uses 1-indexing and LAPACK uses different start points for
upper and lower formats there are certain offsets in the indices used
below.
Let's assume a result where the matrix is 6x6 and there are two 2x2
and two 1x1 blocks reported by the routine. To ease the coding efforts,
we still populate a 6-sized array and fill zeros as the following ::
pivots = [2, 0, 2, 0, 1, 1]
This denotes a diagonal matrix of the form ::
[x x ]
[x x ]
[ x x ]
[ x x ]
[ x ]
[ x]
In other words, we write 2 when the 2x2 block is first encountered and
automatically write 0 to the next entry and skip the next spin of the
loop. Thus, a separate counter or array appends to keep track of block
sizes are avoided. If needed, zeros can be filtered out later without
losing the block structure.
Parameters
----------
a : ndarray
The permutation array ipiv returned by LAPACK
lower : bool, optional
The switch to select whether upper or lower triangle is chosen in
the LAPACK call.
Returns
-------
swap_ : ndarray
The array that defines the row/column swap operations. For example,
if row two is swapped with row four, the result is [0, 3, 2, 3].
pivots : ndarray
The array that defines the block diagonal structure as given above.
"""
n = a.size
swap_ = arange(n)
pivots = zeros_like(swap_, dtype=int)
skip_2x2 = False
# Some upper/lower dependent offset values
# range (s)tart, r(e)nd, r(i)ncrement
x, y, rs, re, ri = (1, 0, 0, n, 1) if lower else (-1, -1, n-1, -1, -1)
for ind in range(rs, re, ri):
# If previous spin belonged already to a 2x2 block
if skip_2x2:
skip_2x2 = False
continue
cur_val = a[ind]
# do we have a 1x1 block or not?
if cur_val > 0:
if cur_val != ind+1:
# Index value != array value --> permutation required
swap_[ind] = swap_[cur_val-1]
pivots[ind] = 1
# Not.
elif cur_val < 0 and cur_val == a[ind+x]:
# first neg entry of 2x2 block identifier
if -cur_val != ind+2:
# Index value != array value --> permutation required
swap_[ind+x] = swap_[-cur_val-1]
pivots[ind+y] = 2
skip_2x2 = True
else: # Doesn't make sense, give up
raise ValueError('While parsing the permutation array '
'in "scipy.linalg.ldl", invalid entries '
'found. The array syntax is invalid.')
return swap_, pivots
def _ldl_get_d_and_l(ldu, pivs, lower=True, hermitian=True):
"""
Helper function to extract the diagonal and triangular matrices for
LDL.T factorization.
Parameters
----------
ldu : ndarray
The compact output returned by the LAPACK routing
pivs : ndarray
The sanitized array of {0, 1, 2} denoting the sizes of the pivots. For
every 2 there is a succeeding 0.
lower : bool, optional
If set to False, upper triangular part is considered.
hermitian : bool, optional
If set to False a symmetric complex array is assumed.
Returns
-------
d : ndarray
The block diagonal matrix.
lu : ndarray
The upper/lower triangular matrix
"""
is_c = iscomplexobj(ldu)
d = diag(diag(ldu))
n = d.shape[0]
blk_i = 0 # block index
# row/column offsets for selecting sub-, super-diagonal
x, y = (1, 0) if lower else (0, 1)
lu = tril(ldu, -1) if lower else triu(ldu, 1)
diag_inds = arange(n)
lu[diag_inds, diag_inds] = 1
for blk in pivs[pivs != 0]:
# increment the block index and check for 2s
# if 2 then copy the off diagonals depending on uplo
inc = blk_i + blk
if blk == 2:
d[blk_i+x, blk_i+y] = ldu[blk_i+x, blk_i+y]
# If Hermitian matrix is factorized, the cross-offdiagonal element
# should be conjugated.
if is_c and hermitian:
d[blk_i+y, blk_i+x] = ldu[blk_i+x, blk_i+y].conj()
else:
d[blk_i+y, blk_i+x] = ldu[blk_i+x, blk_i+y]
lu[blk_i+x, blk_i+y] = 0.
blk_i = inc
return d, lu
def _ldl_construct_tri_factor(lu, swap_vec, pivs, lower=True):
"""
Helper function to construct explicit outer factors of LDL factorization.
If lower is True the permuted factors are multiplied as L(1)*L(2)*...*L(k).
Otherwise, the permuted factors are multiplied as L(k)*...*L(2)*L(1). See
LAPACK documentation for more details.
Parameters
----------
lu : ndarray
The triangular array that is extracted from LAPACK routine call with
ones on the diagonals.
swap_vec : ndarray
The array that defines the row swapping indices. If the kth entry is m
then rows k,m are swapped. Notice that the mth entry is not necessarily
k to avoid undoing the swapping.
pivs : ndarray
The array that defines the block diagonal structure returned by
_ldl_sanitize_ipiv().
lower : bool, optional
The boolean to switch between lower and upper triangular structure.
Returns
-------
lu : ndarray
The square outer factor which satisfies the L * D * L.T = A
perm : ndarray
The permutation vector that brings the lu to the triangular form
Notes
-----
Note that the original argument "lu" is overwritten.
"""
n = lu.shape[0]
perm = arange(n)
# Setup the reading order of the permutation matrix for upper/lower
rs, re, ri = (n-1, -1, -1) if lower else (0, n, 1)
for ind in range(rs, re, ri):
s_ind = swap_vec[ind]
if s_ind != ind:
# Column start and end positions
col_s = ind if lower else 0
col_e = n if lower else ind+1
# If we stumble upon a 2x2 block include both cols in the perm.
if pivs[ind] == (0 if lower else 2):
col_s += -1 if lower else 0
col_e += 0 if lower else 1
lu[[s_ind, ind], col_s:col_e] = lu[[ind, s_ind], col_s:col_e]
perm[[s_ind, ind]] = perm[[ind, s_ind]]
return lu, argsort(perm)
| |
from __future__ import unicode_literals
from django import forms
from django.contrib.contenttypes.forms import generic_inlineformset_factory
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db import IntegrityError
from django.db.models import Q
from django.test import SimpleTestCase, TestCase
from django.utils import six
from .models import (
AllowsNullGFK, Animal, Carrot, Comparison, ConcreteRelatedModel,
ForConcreteModelModel, ForProxyModelModel, Gecko, ManualPK, Mineral,
ProxyRelatedModel, Rock, TaggedItem, ValuableRock, ValuableTaggedItem,
Vegetable,
)
class GenericRelationsTests(TestCase):
def setUp(self):
self.lion = Animal.objects.create(
common_name="Lion", latin_name="Panthera leo")
self.platypus = Animal.objects.create(
common_name="Platypus", latin_name="Ornithorhynchus anatinus")
Vegetable.objects.create(name="Eggplant", is_yucky=True)
self.bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
self.quartz = Mineral.objects.create(name="Quartz", hardness=7)
# Tagging stuff.
self.bacon.tags.create(tag="fatty")
self.bacon.tags.create(tag="salty")
self.lion.tags.create(tag="yellow")
self.lion.tags.create(tag="hairy")
# Original list of tags:
self.comp_func = lambda obj: (
obj.tag, obj.content_type.model_class(), obj.object_id
)
def test_generic_update_or_create_when_created(self):
"""
Should be able to use update_or_create from the generic related manager
to create a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag, created = self.bacon.tags.update_or_create(tag='stinky')
self.assertTrue(created)
self.assertEqual(count + 1, self.bacon.tags.count())
def test_generic_update_or_create_when_updated(self):
"""
Should be able to use update_or_create from the generic related manager
to update a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag = self.bacon.tags.create(tag='stinky')
self.assertEqual(count + 1, self.bacon.tags.count())
tag, created = self.bacon.tags.update_or_create(defaults={'tag': 'juicy'}, id=tag.id)
self.assertFalse(created)
self.assertEqual(count + 1, self.bacon.tags.count())
self.assertEqual(tag.tag, 'juicy')
def test_generic_get_or_create_when_created(self):
"""
Should be able to use get_or_create from the generic related manager
to create a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag, created = self.bacon.tags.get_or_create(tag='stinky')
self.assertTrue(created)
self.assertEqual(count + 1, self.bacon.tags.count())
def test_generic_get_or_create_when_exists(self):
"""
Should be able to use get_or_create from the generic related manager
to get a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag = self.bacon.tags.create(tag="stinky")
self.assertEqual(count + 1, self.bacon.tags.count())
tag, created = self.bacon.tags.get_or_create(id=tag.id, defaults={'tag': 'juicy'})
self.assertFalse(created)
self.assertEqual(count + 1, self.bacon.tags.count())
# shouldn't had changed the tag
self.assertEqual(tag.tag, 'stinky')
def test_generic_relations_m2m_mimic(self):
"""
Objects with declared GenericRelations can be tagged directly -- the
API mimics the many-to-many API.
"""
self.assertQuerysetEqual(self.lion.tags.all(), [
"<TaggedItem: hairy>",
"<TaggedItem: yellow>"
])
self.assertQuerysetEqual(self.bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>"
])
def test_access_content_object(self):
"""
Test accessing the content object like a foreign key.
"""
tagged_item = TaggedItem.objects.get(tag="salty")
self.assertEqual(tagged_item.content_object, self.bacon)
def test_query_content_object(self):
qs = TaggedItem.objects.filter(
animal__isnull=False).order_by('animal__common_name', 'tag')
self.assertQuerysetEqual(
qs, ["<TaggedItem: hairy>", "<TaggedItem: yellow>"]
)
mpk = ManualPK.objects.create(id=1)
mpk.tags.create(tag='mpk')
qs = TaggedItem.objects.filter(
Q(animal__isnull=False) | Q(manualpk__id=1)).order_by('tag')
self.assertQuerysetEqual(
qs, ["hairy", "mpk", "yellow"], lambda x: x.tag)
def test_exclude_generic_relations(self):
"""
Test lookups over an object without GenericRelations.
"""
# Recall that the Mineral class doesn't have an explicit GenericRelation
# defined. That's OK, because you can create TaggedItems explicitly.
# However, excluding GenericRelations means your lookups have to be a
# bit more explicit.
TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
ctype = ContentType.objects.get_for_model(self.quartz)
q = TaggedItem.objects.filter(
content_type__pk=ctype.id, object_id=self.quartz.id
)
self.assertQuerysetEqual(q, [
"<TaggedItem: clearish>",
"<TaggedItem: shiny>"
])
def test_access_via_content_type(self):
"""
Test lookups through content type.
"""
self.lion.delete()
self.platypus.tags.create(tag="fatty")
ctype = ContentType.objects.get_for_model(self.platypus)
self.assertQuerysetEqual(
Animal.objects.filter(tags__content_type=ctype),
["<Animal: Platypus>"])
def test_set_foreign_key(self):
"""
You can set a generic foreign key in the way you'd expect.
"""
tag1 = TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
tag1.content_object = self.platypus
tag1.save()
self.assertQuerysetEqual(
self.platypus.tags.all(),
["<TaggedItem: shiny>"])
def test_queries_across_generic_relations(self):
"""
Queries across generic relations respect the content types. Even though
there are two TaggedItems with a tag of "fatty", this query only pulls
out the one with the content type related to Animals.
"""
self.assertQuerysetEqual(Animal.objects.order_by('common_name'), [
"<Animal: Lion>",
"<Animal: Platypus>"
])
def test_queries_content_type_restriction(self):
"""
Create another fatty tagged instance with different PK to ensure there
is a content type restriction in the generated queries below.
"""
mpk = ManualPK.objects.create(id=self.lion.pk)
mpk.tags.create(tag="fatty")
self.platypus.tags.create(tag="fatty")
self.assertQuerysetEqual(
Animal.objects.filter(tags__tag='fatty'), ["<Animal: Platypus>"])
self.assertQuerysetEqual(
Animal.objects.exclude(tags__tag='fatty'), ["<Animal: Lion>"])
def test_object_deletion_with_generic_relation(self):
"""
If you delete an object with an explicit Generic relation, the related
objects are deleted when the source object is deleted.
"""
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('fatty', Vegetable, self.bacon.pk),
('hairy', Animal, self.lion.pk),
('salty', Vegetable, self.bacon.pk),
('yellow', Animal, self.lion.pk)
],
self.comp_func
)
self.lion.delete()
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('fatty', Vegetable, self.bacon.pk),
('salty', Vegetable, self.bacon.pk),
],
self.comp_func
)
def test_object_deletion_without_generic_relation(self):
"""
If Generic Relation is not explicitly defined, any related objects
remain after deletion of the source object.
"""
TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
quartz_pk = self.quartz.pk
self.quartz.delete()
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('clearish', Mineral, quartz_pk),
('fatty', Vegetable, self.bacon.pk),
('hairy', Animal, self.lion.pk),
('salty', Vegetable, self.bacon.pk),
('yellow', Animal, self.lion.pk),
],
self.comp_func
)
def test_tag_deletion_related_objects_unaffected(self):
"""
If you delete a tag, the objects using the tag are unaffected (other
than losing a tag).
"""
ctype = ContentType.objects.get_for_model(self.lion)
tag = TaggedItem.objects.get(
content_type__pk=ctype.id, object_id=self.lion.id, tag="hairy")
tag.delete()
self.assertQuerysetEqual(self.lion.tags.all(), ["<TaggedItem: yellow>"])
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('fatty', Vegetable, self.bacon.pk),
('salty', Vegetable, self.bacon.pk),
('yellow', Animal, self.lion.pk)
],
self.comp_func
)
def test_add_bulk(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
t1 = TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
t2 = TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
# One update() query.
with self.assertNumQueries(1):
bacon.tags.add(t1, t2)
self.assertEqual(t1.content_object, bacon)
self.assertEqual(t2.content_object, bacon)
def test_add_bulk_false(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
t1 = TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
t2 = TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
# One save() for each object.
with self.assertNumQueries(2):
bacon.tags.add(t1, t2, bulk=False)
self.assertEqual(t1.content_object, bacon)
self.assertEqual(t2.content_object, bacon)
def test_add_rejects_unsaved_objects(self):
t1 = TaggedItem(content_object=self.quartz, tag="shiny")
msg = "<TaggedItem: shiny> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
self.bacon.tags.add(t1)
def test_set(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
fatty = bacon.tags.create(tag="fatty")
salty = bacon.tags.create(tag="salty")
bacon.tags.set([fatty, salty])
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>",
])
bacon.tags.set([fatty])
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
])
bacon.tags.set([])
self.assertQuerysetEqual(bacon.tags.all(), [])
bacon.tags.set([fatty, salty], bulk=False, clear=True)
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>",
])
bacon.tags.set([fatty], bulk=False, clear=True)
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
])
bacon.tags.set([], clear=True)
self.assertQuerysetEqual(bacon.tags.all(), [])
def test_assign(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
fatty = bacon.tags.create(tag="fatty")
salty = bacon.tags.create(tag="salty")
bacon.tags.set([fatty, salty])
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>",
])
bacon.tags.set([fatty])
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
])
bacon.tags.set([])
self.assertQuerysetEqual(bacon.tags.all(), [])
def test_assign_with_queryset(self):
# Ensure that querysets used in reverse GFK assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# ManyRelatedManager.set() (#19816).
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
bacon.tags.create(tag="fatty")
bacon.tags.create(tag="salty")
self.assertEqual(2, bacon.tags.count())
qs = bacon.tags.filter(tag="fatty")
bacon.tags.set(qs)
self.assertEqual(1, bacon.tags.count())
self.assertEqual(1, qs.count())
def test_generic_relation_related_name_default(self):
# Test that GenericRelation by default isn't usable from
# the reverse side.
with self.assertRaises(FieldError):
TaggedItem.objects.filter(vegetable__isnull=True)
def test_multiple_gfk(self):
# Simple tests for multiple GenericForeignKeys
# only uses one model, since the above tests should be sufficient.
tiger = Animal.objects.create(common_name="tiger")
cheetah = Animal.objects.create(common_name="cheetah")
bear = Animal.objects.create(common_name="bear")
# Create directly
Comparison.objects.create(
first_obj=cheetah, other_obj=tiger, comparative="faster"
)
Comparison.objects.create(
first_obj=tiger, other_obj=cheetah, comparative="cooler"
)
# Create using GenericRelation
tiger.comparisons.create(other_obj=bear, comparative="cooler")
tiger.comparisons.create(other_obj=cheetah, comparative="stronger")
self.assertQuerysetEqual(cheetah.comparisons.all(), [
"<Comparison: cheetah is faster than tiger>"
])
# Filtering works
self.assertQuerysetEqual(tiger.comparisons.filter(comparative="cooler"), [
"<Comparison: tiger is cooler than cheetah>",
"<Comparison: tiger is cooler than bear>",
], ordered=False)
# Filtering and deleting works
subjective = ["cooler"]
tiger.comparisons.filter(comparative__in=subjective).delete()
self.assertQuerysetEqual(Comparison.objects.all(), [
"<Comparison: cheetah is faster than tiger>",
"<Comparison: tiger is stronger than cheetah>"
], ordered=False)
# If we delete cheetah, Comparisons with cheetah as 'first_obj' will be
# deleted since Animal has an explicit GenericRelation to Comparison
# through first_obj. Comparisons with cheetah as 'other_obj' will not
# be deleted.
cheetah.delete()
self.assertQuerysetEqual(Comparison.objects.all(), [
"<Comparison: tiger is stronger than None>"
])
def test_gfk_subclasses(self):
# GenericForeignKey should work with subclasses (see #8309)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
valuedtag = ValuableTaggedItem.objects.create(
content_object=quartz, tag="shiny", value=10
)
self.assertEqual(valuedtag.content_object, quartz)
def test_generic_relation_to_inherited_child(self):
# GenericRelations to models that use multi-table inheritance work.
granite = ValuableRock.objects.create(name='granite', hardness=5)
ValuableTaggedItem.objects.create(content_object=granite, tag="countertop", value=1)
granite.delete() # deleting the rock should delete the related tag.
self.assertEqual(ValuableTaggedItem.objects.count(), 0)
def test_generic_inline_formsets(self):
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet()
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">
Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text"
name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" />
<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id"
id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>"""
)
formset = GenericFormSet(instance=Animal())
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">
Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag"
type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" /><input type="hidden"
name="generic_relations-taggeditem-content_type-object_id-0-id"
id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>"""
)
platypus = Animal.objects.create(
common_name="Platypus", latin_name="Ornithorhynchus anatinus"
)
platypus.tags.create(tag="shiny")
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet(instance=platypus)
tagged_item_id = TaggedItem.objects.get(
tag='shiny', object_id=platypus.id
).id
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">Tag:</label>
<input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text"
name="generic_relations-taggeditem-content_type-object_id-0-tag" value="shiny" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" />
<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id"
value="%s" id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-1-tag">Tag:</label>
<input id="id_generic_relations-taggeditem-content_type-object_id-1-tag" type="text"
name="generic_relations-taggeditem-content_type-object_id-1-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-1-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-1-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-1-DELETE" />
<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-1-id"
id="id_generic_relations-taggeditem-content_type-object_id-1-id" /></p>""" % tagged_item_id
)
lion = Animal.objects.create(common_name="Lion", latin_name="Panthera leo")
formset = GenericFormSet(instance=lion, prefix='x')
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_x-0-tag">Tag:</label>
<input id="id_x-0-tag" type="text" name="x-0-tag" maxlength="50" /></p>
<p><label for="id_x-0-DELETE">Delete:</label> <input type="checkbox" name="x-0-DELETE" id="id_x-0-DELETE" />
<input type="hidden" name="x-0-id" id="id_x-0-id" /></p>"""
)
def test_gfk_manager(self):
# GenericForeignKey should not use the default manager (which may filter objects) #16048
tailless = Gecko.objects.create(has_tail=False)
tag = TaggedItem.objects.create(content_object=tailless, tag="lizard")
self.assertEqual(tag.content_object, tailless)
def test_subclasses_with_gen_rel(self):
"""
Test that concrete model subclasses with generic relations work
correctly (ticket 11263).
"""
granite = Rock.objects.create(name='granite', hardness=5)
TaggedItem.objects.create(content_object=granite, tag="countertop")
self.assertEqual(Rock.objects.get(tags__tag="countertop"), granite)
def test_subclasses_with_parent_gen_rel(self):
"""
Generic relations on a base class (Vegetable) work correctly in
subclasses (Carrot).
"""
bear = Carrot.objects.create(name='carrot')
TaggedItem.objects.create(content_object=bear, tag='orange')
self.assertEqual(Carrot.objects.get(tags__tag='orange'), bear)
def test_generic_inline_formsets_initial(self):
"""
Test for #17927 Initial values support for BaseGenericInlineFormSet.
"""
quartz = Mineral.objects.create(name="Quartz", hardness=7)
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
ctype = ContentType.objects.get_for_model(quartz)
initial_data = [{
'tag': 'lizard',
'content_type': ctype.pk,
'object_id': quartz.pk,
}]
formset = GenericFormSet(initial=initial_data)
self.assertEqual(formset.forms[0].initial, initial_data[0])
def test_get_or_create(self):
# get_or_create should work with virtual fields (content_object)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
tag, created = TaggedItem.objects.get_or_create(tag="shiny",
defaults={'content_object': quartz})
self.assertTrue(created)
self.assertEqual(tag.tag, "shiny")
self.assertEqual(tag.content_object.id, quartz.id)
def test_update_or_create_defaults(self):
# update_or_create should work with virtual fields (content_object)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
diamond = Mineral.objects.create(name="Diamond", hardness=7)
tag, created = TaggedItem.objects.update_or_create(tag="shiny",
defaults={'content_object': quartz})
self.assertTrue(created)
self.assertEqual(tag.content_object.id, quartz.id)
tag, created = TaggedItem.objects.update_or_create(tag="shiny",
defaults={'content_object': diamond})
self.assertFalse(created)
self.assertEqual(tag.content_object.id, diamond.id)
def test_query_content_type(self):
msg = "Field 'content_object' does not generate an automatic reverse relation"
with self.assertRaisesMessage(FieldError, msg):
TaggedItem.objects.get(content_object='')
def test_unsaved_instance_on_generic_foreign_key(self):
"""
Assigning an unsaved object to GenericForeignKey should raise an
exception on model.save().
"""
quartz = Mineral(name="Quartz", hardness=7)
with self.assertRaises(IntegrityError):
TaggedItem.objects.create(tag="shiny", content_object=quartz)
class CustomWidget(forms.TextInput):
pass
class TaggedItemForm(forms.ModelForm):
class Meta:
model = TaggedItem
fields = '__all__'
widgets = {'tag': CustomWidget}
class GenericInlineFormsetTest(TestCase):
def test_generic_inlineformset_factory(self):
"""
Regression for #14572: Using base forms with widgets
defined in Meta should not raise errors.
"""
Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm)
form = Formset().forms[0]
self.assertIsInstance(form['tag'].field.widget, CustomWidget)
def test_save_new_uses_form_save(self):
"""
Regression for #16260: save_new should call form.save()
"""
class SaveTestForm(forms.ModelForm):
def save(self, *args, **kwargs):
self.instance.saved_by = "custom method"
return super(SaveTestForm, self).save(*args, **kwargs)
Formset = generic_inlineformset_factory(
ForProxyModelModel, fields='__all__', form=SaveTestForm)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj = formset.save()[0]
self.assertEqual(new_obj.saved_by, "custom method")
def test_save_new_for_proxy(self):
Formset = generic_inlineformset_factory(ForProxyModelModel,
fields='__all__', for_concrete_model=False)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj, = formset.save()
self.assertEqual(new_obj.obj, instance)
def test_save_new_for_concrete(self):
Formset = generic_inlineformset_factory(ForProxyModelModel,
fields='__all__', for_concrete_model=True)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj, = formset.save()
self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel)
class ProxyRelatedModelTest(TestCase):
def test_default_behavior(self):
"""
The default for for_concrete_model should be True
"""
base = ForConcreteModelModel()
base.obj = rel = ProxyRelatedModel.objects.create()
base.save()
base = ForConcreteModelModel.objects.get(pk=base.pk)
rel = ConcreteRelatedModel.objects.get(pk=rel.pk)
self.assertEqual(base.obj, rel)
def test_works_normally(self):
"""
When for_concrete_model is False, we should still be able to get
an instance of the concrete class.
"""
base = ForProxyModelModel()
base.obj = rel = ConcreteRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
self.assertEqual(base.obj, rel)
def test_proxy_is_returned(self):
"""
Instances of the proxy should be returned when
for_concrete_model is False.
"""
base = ForProxyModelModel()
base.obj = ProxyRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
self.assertIsInstance(base.obj, ProxyRelatedModel)
def test_query(self):
base = ForProxyModelModel()
base.obj = rel = ConcreteRelatedModel.objects.create()
base.save()
self.assertEqual(rel, ConcreteRelatedModel.objects.get(bases__id=base.id))
def test_query_proxy(self):
base = ForProxyModelModel()
base.obj = rel = ProxyRelatedModel.objects.create()
base.save()
self.assertEqual(rel, ProxyRelatedModel.objects.get(bases__id=base.id))
def test_generic_relation(self):
base = ForProxyModelModel()
base.obj = ProxyRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
rel = ProxyRelatedModel.objects.get(pk=base.obj.pk)
self.assertEqual(base, rel.bases.get())
def test_generic_relation_set(self):
base = ForProxyModelModel()
base.obj = ConcreteRelatedModel.objects.create()
base.save()
newrel = ConcreteRelatedModel.objects.create()
newrel.bases.set([base])
newrel = ConcreteRelatedModel.objects.get(pk=newrel.pk)
self.assertEqual(base, newrel.bases.get())
class TestInitWithNoneArgument(SimpleTestCase):
def test_none_not_allowed(self):
# TaggedItem requires a content_type, initializing with None should
# raise a ValueError.
with six.assertRaisesRegex(self, ValueError,
'Cannot assign None: "TaggedItem.content_type" does not allow null values'):
TaggedItem(content_object=None)
def test_none_allowed(self):
# AllowsNullGFK doesn't require a content_type, so None argument should
# also be allowed.
AllowsNullGFK(content_object=None)
| |
import os
import sys
import numpy as np
import ConfigParser
from errno import ENOENT
from numpy import loadtxt
from pandas import read_excel, read_csv
from collections import OrderedDict
from scipy.interpolate import interp1d
from distutils.util import strtobool
from astropy.io import fits as astropyfits
# Function to create folders
def make_folder(folder_path):
#TODO This one is only valid for 2.7
#TODO add this one to your collection
try:
os.makedirs(folder_path)
except OSError:
if not os.path.isdir(folder_path):
raise
return
# Function to delete files
def silent_remove(filename_list):
for filename in filename_list:
try:
os.remove(filename)
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occurred
# Sample data for FIT3D compilation
def example_data(data_folder):
arguments_dict = OrderedDict()
arguments_dict['script'] = 'auto_ssp_elines_rnd.py' # 0
arguments_dict['input_spec'] = 'NGC5947.spec_5.txt' # 1
arguments_dict['SSPs_lib'] = 'ssp_lib.fits,' + 'ssp_lib.fits' # 2
arguments_dict['output_file'] = 'auto_ssp.NGC5947.cen.only.out' # 3
arguments_dict['mask_file'] = 'mask_elines.txt' # 4
arguments_dict['conf_file'] = 'auto_ssp_V500_several_Hb.config' # 5
arguments_dict['plot_tag'] = 1 # 6
arguments_dict['min'] = -1 # 7
arguments_dict['max'] = 40 # 8
arguments_dict['wmin'] = '3850' # 9
arguments_dict['wmax'] = '6800' # 10
arguments_dict['z_elines_mask'] = 'emission_lines.txt' # 11
arguments_dict['input_z'] = 0.02 # 12
arguments_dict['delta_z'] = 0.001 # 13
arguments_dict['min_z'] = 0.015 # 14
arguments_dict['max_z'] = 0.025 # 15
arguments_dict['input_sigma'] = 2.0 # 16
arguments_dict['delta_sigma'] = 0.5 # 17
arguments_dict['min_sigma'] = 1 # 18
arguments_dict['max_sigma'] = 9 # 19
arguments_dict['input_Av'] = 0.5 # 20
arguments_dict['delta_Av'] = 0.1 # 21
arguments_dict['min_Av'] = 0.0 # 22
arguments_dict['max_Av'] = 1.6 # 23
return arguments_dict
# Function to check for nan entries
def check_missing_flux_values(flux):
# Evaluate the nan array
nan_idcs = np.isnan(flux)
nan_count = np.sum(nan_idcs)
# Directly save if not nan
if nan_count > 0:
print '--WARNING: missing flux entries'
return
# Function to import configuration data
def parseObjData(file_address, sectionName, objData):
parser = ConfigParser.SafeConfigParser()
parser.optionxform = str
if os.path.isfile(file_address):
parser.read(file_address)
if not parser.has_section(sectionName):
parser.add_section(sectionName)
for key in objData.keys():
value = objData[key]
if value is not None:
if isinstance(value, list) or isinstance(value, np.ndarray):
value = ','.join(str(x) for x in value)
else:
value = str(value)
else:
value = ''
parser.set(sectionName, key, value)
with open(file_address, 'w') as f:
parser.write(f)
return
# Function to save data to configuration file section
def parseDataFile(file_address, section, data, type_data=None, key_suffix = ''):
# Check if file exists
if os.path.isfile(file_address):
cfg = ConfigParser.ConfigParser()
cfg.optionxform = str
cfg.read(file_address)
else:
exit('--WARNING: Default configuration could not be found exiting program\n-Missing file: {}'.format(file_address))
# Check section is in conf.ini else create it
if not cfg.has_section(section):
cfg.add_section(section)
# Change format to safe data in dictionary
for key in data:
value = data[key]
if type_data is not None:
# TODO add a protocol to infer best format to save data
if type_data is 'lists':
value = list(value)
value = ','.join(str(x) for x in value)
# try:
# confDict[option] = np.array(map(float, raw_list.split(',')))
# except:
# confDict[option] = np.array(map(str, raw_list.split(',')))
cfg.set(section, key + key_suffix, value)
with open(file_address, 'w') as f:
cfg.write(f)
return
# Class with tools to import SSPs libraries
class SspSynthesisImporter:
def __init__(self):
# ------------Configuration of Fit3D
self.sspSyn_commands_params = [
'script', # 0 python script name
'input_spec', # 1 input galactic spectrum name
'SSPs_lib', # 2 fits-table to use with python
'output_file', # 3 Reference name for output files
'mask_file', # 4 File with the spectrum region masks
'conf_file', # 5 Configuration file for the masks
'plot_tag', # 6 tag to launch the plotting
'min', # 7 Min flux for ploting
'max', # 8 Max flux for ploting
'wmin', # 9 Minimum wavelength for plotting
'wmax', # 10 Maximum wavelength for plotting
'z_elines_mask', # 11 Emission lines file
'input_z', # 12 Input redshift
'delta_z', # 13 Increments for redshift
'min_z', # 14 Minimum redshift
'max_z', # 15 Maximum redshift
'input_sigma', # 16 Input velocity dispersion
'delta_sigma', # 17 Increments for velocity dispersion
'min_sigma', # 18 Minimum velocity dispersion
'max_sigma', # 19 Maximum velocity dispersion
'input_Av', # 20 Input reddening
'delta_Av', # 21 Increments for reddening
'min_Av', # 22 Minimum reddening
'max_Av', # 23 Maximum reddening
]
# The first 4 lines in the configuration file describe the input
self.sspSyn_config_params = [['input_z', 'delta_z', 'min_z', 'max_z', 'DV', 'RV', 'DS', 'RS', 'MIN_W', 'MAX_W'],
# 12-16
['input_sigma', 'delta_sigma', 'min_sigma', 'max_sigma'],
# 17-20
['input_Av', 'delta_Av', 'min_Av', 'max_Av'],
# 21-24
['N_Systems'], # Number of SSP bases
['START_W', 'END_W', 'MASK_FILE', 'CONFIG_FILE', 'NPOLY', 'MASK_FILE_POLY',
'N_MIN_E', 'N_MAX_E'], # Bases config
['MIN_DELTA_CHISQ', 'MAX_NITER', 'CUT_MEDIAN_FLUX'],
['start_w_peak', 'end_w_peak'],
['wavelength_to_norm', 'width_AA', 'new_back_templates.fits']]
# Bases float indeces
self.idcs_floats = np.array([0, 1, 4, 6, 7])
# Emision lines mask columns headers
self.eline_mask_header = ['start_wave', 'end_wave', 'mask_file', 'mask_config_file', 'n_poly', 'mask_file_poly',
'n_min_e', 'n_max_e']
# Number of montercarlo iterations
self.n_mc = 30
# Initial value for the chiSq_min
self.chiSq_min = 1e12
return
def load_FIT3D_data(self, conf_file, data_folder=None):
# Check if we are executing from the folder file
data_folder = os.getcwd() + '/' if data_folder is None else data_folder
# Read parameters from command line
command_dict = self.load_FIT3D_command_params(data_folder=data_folder)
config_dict = self.load_FIT3D_config_file(conf_file)
# Update the fit configuration giving preference to the values from the command line
config_dict.update(command_dict)
# Load observational data and masks
config_dict = self.load_FIT3D_observational_fits(data_folder, config_dict)
# Prepare output files
output_root = config_dict['output_file'][:config_dict['output_file'].rfind('.')]
config_dict['single_output_file'] = '{rootname}_{file_code}.{ext}'.format(rootname=output_root,
file_code='single', ext='txt')
config_dict['coeffs_output_file'] = '{rootname}_{file_code}.{ext}'.format(rootname=output_root,
file_code='coeffs', ext='txt')
config_dict['spectrum_output_file'] = '{rootname}_{file_code}.{ext}'.format(rootname=output_root,
file_code='spec', ext='txt')
config_dict['em_lines_output_file'] = '{rootname}_{file_code}.{ext}'.format(rootname=output_root,
file_code='elines', ext='txt')
# Delete these output files if they had been generated from a previos run #USEFULL_Function
silent_remove([config_dict['output_file'], config_dict['single_output_file'], config_dict['coeffs_output_file'],
config_dict['spectrum_output_file'], config_dict['em_lines_output_file']])
# Store folder with the data and configuration folder
config_dict['data_folder'] = data_folder
config_dict['conf_file'] = conf_file
config_dict['data_type'] = 'FIT3D'
return config_dict
def load_FIT3D_command_params(self, data_folder):
# Empty dictionary to store the data from the commands from the command line
command_dict = OrderedDict()
# Extract line command arguments
self.args_list = sys.argv
# Check if the minimum parameters have been introduced (WARNING: Need to convert these to the right units)
if len(self.args_list) > 7:
command_dict = OrderedDict(zip(self.sspSyn_commands_params[:len(self.args_list)], self.args_list))
else:
print '--Error: The input command must include all these arguments:'
print ', '.join(self.sspSyn_commands_params[:7])
# Currently run test example if not enought data is provided
print '---Using example data'
command_dict = example_data(data_folder=data_folder)
return command_dict
def load_FIT3D_config_file(self, config_file_address):
# Empty dictionary to store the data from the config file
fit_conf_dict = {}
# Read the configuration text file
with open(config_file_address) as conf_file:
conf_lines = conf_file.readlines()
# Read redshift, sigma and Av params rows
for i in range(3):
param_values = np.array(conf_lines[i].split(), dtype=float)
fit_conf_dict.update(zip(self.sspSyn_config_params[i], param_values))
# Read masks rows: 'START_W_n','END_W_n','MASK_FILE_n' ...
nLineMasks = int(conf_lines[3])
fit_conf_dict['nLineMasks'] = int(conf_lines[3])
for i in range(4, 4 + fit_conf_dict['nLineMasks']):
bases_key = 'base_{}'.format(i - 4)
param_values = np.array(conf_lines[i].split())
# Convert to float numerical entries
param_values[0] = float(param_values[0])
param_values[1] = float(param_values[1])
param_values[4] = float(param_values[4])
param_values[6] = float(param_values[6])
param_values[7] = float(param_values[7])
fit_conf_dict[bases_key] = param_values
# Add ChiSq row (converting to float)
param_values = np.array(conf_lines[4 + nLineMasks].split(), dtype=float)
fit_conf_dict.update(zip(self.sspSyn_config_params[5], param_values))
# Add peak wavelength row (converting to float)
param_values = np.array(conf_lines[5 + nLineMasks].split(), dtype=float)
fit_conf_dict.update(zip(self.sspSyn_config_params[6], param_values))
# Normalizing row (if available) (converting to float)
if len(conf_lines) == 7 + nLineMasks:
param_values = np.array(conf_lines[6 + nLineMasks].split(), dtype=float)
fit_conf_dict.update(zip(self.sspSyn_config_params[7], param_values))
else:
fit_conf_dict['wave_norm'] = None
fit_conf_dict['w_wave_norm'] = None
fit_conf_dict['new_back_file'] = None
return fit_conf_dict
def load_FIT3D_mask(self, config_dict, obs_flux_resam):
obs_wave = config_dict['obs_wave']
# --------------Generating spectrum mask
# Load spectrum masks
mask_xmin, mask_xmax = loadtxt(config_dict['data_folder'] + config_dict['mask_file'], unpack=True)
# Load emission lines reference to generate artificial mask
emLine_wave = loadtxt(config_dict['data_folder'] + config_dict['z_elines_mask'], usecols=([0]), unpack=True)
emLine_mask_xmin = emLine_wave * (1 + config_dict['input_z']) - 4.0 * config_dict['input_sigma']
emLine_mask_xmax = emLine_wave * (1 + config_dict['input_z']) + 4.0 * config_dict['input_sigma']
# Firt check non zero entries
idx_mask_zero = (obs_flux_resam != 0)
# Pixels within the spectrum mask
idx_spec_mask = np.ones(len(obs_wave), dtype=bool)
for i in range(len(mask_xmin)):
idx_cur_spec_mask = (obs_wave > mask_xmin[i]) & (obs_wave < mask_xmax[i])
idx_spec_mask = idx_spec_mask & ~idx_cur_spec_mask
# Pixels within the emline mask
idx_emline_mask = np.ones(len(obs_wave), dtype=bool)
for i in range(len(emLine_wave)):
idx_cur_emline_mask = (obs_wave > emLine_mask_xmin[i]) & (obs_wave < emLine_mask_xmax[i])
idx_emline_mask = idx_emline_mask & ~idx_cur_emline_mask
# Recover wavelength limits for the masks
wmin_str, wmax_str = config_dict['wmin'].split(','), config_dict['wmax'].split(',')
wmin = float(wmin_str[0]) if len(wmin_str) == 2 else float(config_dict['wmin'])
wmax = float(wmax_str[0]) if len(wmax_str) == 2 else float(config_dict['wmax'])
idx_mask_wmin, idx_mask_wmax = (obs_wave > wmin), (obs_wave < wmax)
# Combined individual indeces into a global mask
print idx_mask_zero.shape
print idx_spec_mask.shape
print idx_emline_mask.shape
print idx_mask_wmax.shape
total_masks = idx_mask_zero & idx_spec_mask & idx_emline_mask & idx_mask_wmin & idx_mask_wmax
return total_masks
def load_FIT3D_observational_fits(self, data_folder, config_dict):
# --------------Read observational data
obs_data = loadtxt(data_folder + config_dict['input_spec'])
obs_wave = obs_data[:, 1]
obs_flux = obs_data[:, 2]
obs_fluxVar = obs_data[:, 3]
# Issues with spectra: nan entries
check_missing_flux_values(obs_flux)
# Get the error from the library fits
if obs_fluxVar is not None:
obs_flux_err = np.sqrt(abs(obs_fluxVar))
# Else calculate it from the spectrum
else:
obs_flux_err = np.sqrt(abs(obs_flux) / 10)
# Remove big error entries
median_err = np.median(obs_flux_err)
idx_big_err = (obs_flux_err > 1.5 * median_err)
obs_fluxErrAdj = np.copy(obs_flux_err)
obs_fluxErrAdj[idx_big_err] = 1.5 * median_err
# --------------Store data
config_dict['obs_wave'] = obs_wave
config_dict['obs_flux'] = obs_flux
config_dict['obs_flux_err'] = obs_flux_err
config_dict['obs_fluxErrAdj'] = obs_fluxErrAdj
config_dict['nObsPix'] = len(obs_flux)
return config_dict
def import_Fit3D_ssplibrary(self, ssp_file_address):
# Dictionary to store the data
ssp_lib_dict = {}
fluxBases, hdrBases = astropyfits.getdata(ssp_file_address, 0, header=True)
fluxBases = np.asfortranarray(fluxBases)
nBases, nPixelsBases = fluxBases.shape
crpix, cdelt, crval = hdrBases['CRPIX1'], hdrBases['CDELT1'], hdrBases['CRVAL1']
pixArray = np.arange(0, nPixelsBases) # WARNING should this arrange start at one?
basesWavelength = (crval + cdelt * (pixArray + 1 - crpix))
# Extract age and metallicity from the bases names
Z_vector, age_vector = np.empty(nBases), np.empty(nBases)
for i in range(nBases):
header_code = 'NAME{}'.format(i)
# Read metallicity and age from and headers list
base_keyname = hdrBases[header_code]
age_str = base_keyname[9:base_keyname.find('_z')]
metal_str = base_keyname[base_keyname.find('_z') + 2:base_keyname.rfind('.')]
age_factor = 1000.0 if 'Myr' in age_str else 1
age_vector[i] = float(age_str[:-3]) / age_factor
Z_vector[i] = float('0.' + metal_str)
# Staore library data in a dictionary
ssp_lib_dict['crpix_bases'] = crpix
ssp_lib_dict['cdelt_bases'] = cdelt
ssp_lib_dict['crval_bases'] = crval
ssp_lib_dict['basesWave'] = basesWavelength
ssp_lib_dict['nBases'] = nBases
ssp_lib_dict['nPixBases_max'] = nPixelsBases
ssp_lib_dict['fluxBases'] = fluxBases
ssp_lib_dict['hdrBases'] = hdrBases
ssp_lib_dict['ageBases'] = age_vector
ssp_lib_dict['zBases'] = Z_vector
# ssp_lib_dict['bases_one_array'] = ones(nBases)
return ssp_lib_dict
def import_STARLIGHT_ssplibrary(self, bases_folder, libraries_file_list):
print '\n--Importing STARLIGHT library'
print '---Bases file: {}'.format(libraries_file_list)
print '---Bases folder: {}'.format(bases_folder)
# Dictionary to store the data
ssp_lib_dict = {}
columns_names = ['file_name', 'age_yr', 'z_star', 'bases_nickname', 'f_star', 'YAV_flag', 'alpha/Fe']
bases_df = read_csv(libraries_file_list, delim_whitespace=True, names=columns_names, skiprows=1)
# Initial pass to check the biggest size
nBases = len(bases_df.index)
max_nPixelsBases = 0
# Empty contaiores to store the data
waveBases_orig = []
fluxBases_orig = []
Z_vector, age_vector = np.empty(nBases), np.empty(nBases)
for i in range(nBases):
bases_file = bases_folder + bases_df.iloc[i]['file_name']
wave_base_i, flux_base_i = loadtxt(bases_file, unpack=True)
# Original wavelength range and fluxes from the bases. They may have different wavelength range
waveBases_orig.append(
wave_base_i) # This is not pretty but not other option if bases do not have same length...
fluxBases_orig.append(
flux_base_i) # This is not pretty but not other option if bases do not have same length...
# Interpolate the bases to observed wavelength resolution (1 angstrom per pixel is the current rule)
age_vector[i] = bases_df.iloc[i]['age_yr']
Z_vector[i] = bases_df.iloc[i]['z_star']
ssp_lib_dict['basesWave'] = waveBases_orig # This is not pretty but not other option if bases do not have same length...
ssp_lib_dict['nBases'] = nBases
ssp_lib_dict['nPixBases_max'] = max_nPixelsBases
ssp_lib_dict['fluxBases'] = fluxBases_orig # This is not pretty but not other option if bases do not have same length...
ssp_lib_dict['ageBases'] = age_vector
ssp_lib_dict['zBases'] = Z_vector
# ssp_lib_dict['bases_one_array'] = ones(nBases)
print '--Library imported'
return ssp_lib_dict
# Class with SpecSyzer dataloading tools
class ImportModelData(SspSynthesisImporter):
def __init__(self, confFolder):
# Class with tools to import starlight bases
SspSynthesisImporter.__init__(self)
# Load default configuration file
self.config = self.load_confFile(confFolder, 'config.ini')
# Define default folders
self.dataFolder = os.path.join(os.path.expanduser('~'), self.config['inference_folder'])
self.inputsFolder = os.path.join(self.dataFolder, self.config['input_data_folder'])
self.outputsFolder = os.path.join(self.dataFolder, self.config['output_data_folder'])
self.externalDataFolder = os.path.join(confFolder, self.config['external_data_folder']) # TODO this declaration is not universal with operative system try pathlib
self.linesFormatDf = os.path.join(confFolder, self.config['external_data_folder'])
self.configFolder = os.path.join(confFolder, 'config.ini')
self.linesDb = read_excel(os.path.join(self.externalDataFolder, self.config['linesData_file']), sheet_name=0, header=0, index_col=0)
def load_confFile(self, root_folder, confFile):
# Configuration file address
file_address = '{}/{}'.format(root_folder, confFile)
# Check if file exists
if os.path.isfile(file_address):
cfg = ConfigParser.ConfigParser()
cfg.optionxform = str
cfg.read(file_address)
else:
exit('--WARNING: Default configuration could not be found exiting program')
# Loop through configuration file sections and merge into a dictionary
confDict = dict(cfg.items('conf_entries'))
confDict['sections'] = cfg.sections()
for i in range(1, len(cfg.sections())):
section = cfg.sections()[i]
confDict[section] = cfg.options(section)
for option in cfg.options(section):
if (option in confDict['string_conf']) or ('_folder' in option) or ('_file' in option):
confDict[option] = cfg.get(section, option)
elif '_check' in option:
confDict[option] = cfg.getboolean(section, option)
elif (option in confDict['list_conf']) or ('_parameters' in option) or ('_prior' in option) or ('_list' in option) or ('_coeffs' in option):
raw_list = cfg.get(section, option)
# Special entry
if option is 'input_lines':
if raw_list is 'all':
confDict[option] = raw_list
else:
confDict[option] = np.array(map(str, raw_list.split(',')))
# By default try to read as a list of floats else strings
else:
try:
confDict[option] = np.array(map(float, raw_list.split(',')))
except:
confDict[option] = np.array(map(str, raw_list.split(',')))
# By default read as a float
else:
confDict[option] = cfg.getfloat(section, option)
# Include configuration file in the dictionary
confDict['confAddress'] = file_address
return confDict
def load_obsData(self, obsFile=None, objName=None):
# TODO this should go into the master configuration
list_parameters = ['input_lines', 'Av_prefit','sigma_star_prefit', 'coeffsPop_prefit', 'coeffsPopErr_prefit', 'wavelengh_limits', 'norm_interval'] #also all 'param_prior'
boolean_parameters = ['Normalized_by_Hbeta']
string_parameters = ['address_lines_log', 'address_spectrum', 'address_obs_mask', 'obsFile', 'objName']
# ----Load the obj data
if obsFile is not None:
cfg = ConfigParser.SafeConfigParser()
cfg.optionxform = str
cfg.read(obsFile)
# If not section is provided we assume the file only has one and it gives us the properties of the observation
if objName is None:
objName = cfg.options(cfg.sections()[0])
# Dictionary with the observation data
obj_data = dict(cfg.items(objName))
obj_data['obsFile'] = obsFile
obj_data['objName'] = objName
#Recover data from previous fits
results_section = objName + '_results'
if cfg.has_section(results_section):
prefit_data = dict(cfg.items(results_section))
obj_data.update(prefit_data)
else:
# Dictionary with the observation data # TODO This does not work so well
obj_data = locals()
# Convert to the right format # TODO Add security warnings for wrong data
for key in obj_data.keys():
# Empty variable
if obj_data[key] == '':
obj_data[key] = None
# None variable
elif obj_data[key] is None:
obj_data[key] = None
# Arrays (The last boolean overrides the parameters
elif ',' in obj_data[key]:
if (key in list_parameters) or ('_prior' in key) or ('_true' in key) or (',' in obj_data[key]):
if key in ['input_lines']:
if obj_data[key] == 'all':
obj_data[key] = 'all'
else:
obj_data[key] = np.array(map(str, obj_data[key].split(',')))
else:
newArray = []
textArrays = obj_data[key].split(',')
for item in textArrays:
convertValue = float(item) if item != 'None' else np.nan
newArray.append(convertValue)
obj_data[key] = np.array(newArray)
# Boolean
elif (key in boolean_parameters) or ('_check' in key):
obj_data[key] = strtobool(obj_data[key]) == 1
# Remaining are either strings (rest floats)
elif key not in string_parameters:
obj_data[key] = float(obj_data[key])
# #Unrecognize object function
# else:
# print 'WARNING: Parameter {} in {} not recognize. Exiting code'.format(key, obsFile)
# exit()
# ----Load the obj spectrum, #TODO read this one using pandas and that way you can chek if there is a third column for the error
obj_data['obs_wavelength'], obj_data['obs_flux'] = loadtxt(obj_data['address_spectrum'], usecols=(0, 1), unpack=True)
# ----Load obj lines log # TODO update code to use address_lines_log
obj_data['obj_lines_file'] = obj_data['address_lines_log']
return obj_data
def import_optical_depth_coeff_table(self, file_address):
Data_dict = OrderedDict()
opticalDepthCoeffs_df = read_csv(file_address, delim_whitespace=True, header=0)
opticalDepthCoeffs = {}
for column in opticalDepthCoeffs_df.columns:
opticalDepthCoeffs[column] = opticalDepthCoeffs_df[column].values
return opticalDepthCoeffs
def load_ssp_library(self, ssp_lib_type, data_folder=None, data_file=None, wavelengh_limits=None, resample_inc=None, norm_interval=None):
# TODO In here we need to add a test sample library
# Store stellar base type
sspLib_dict = {'data_type': ssp_lib_type}
# Import the base type
if ssp_lib_type == 'FIT3D':
# Check if more files are being introduced
if ',' in data_file:
ssp_lib1, ssp_lib2 = data_file.split(',') # Corrently we are only using the first one (the big)
else:
ssp_lib1 = data_file
sspLib_dict = self.import_Fit3D_ssplibrary(data_folder + ssp_lib1)
elif ssp_lib_type == 'starlight':
sspLib_dict = self.import_STARLIGHT_ssplibrary(data_folder, data_file)
# Store stellar base type
sspLib_dict['data_type'] = ssp_lib_type
# Trim, resample and normalized the ssp library if required
if wavelengh_limits or resample_inc or norm_interval:
self.treat_input_spectrum(sspLib_dict, sspLib_dict['basesWave'], sspLib_dict['fluxBases'], wavelengh_limits,
resample_inc, norm_interval)
return sspLib_dict
def treat_input_spectrum(self, output_dict, spec_wave, spec_flux, wavelengh_limits=None, resample_inc=None, norm_interval=None):
# TODO we should remove the nBases requirement by some style which can just read the number of dimensions
# Store input values
output_dict['wavelengh_limits'] = wavelengh_limits
output_dict['resample_inc'] = resample_inc
output_dict['norm_interval'] = norm_interval
# Special case using 0, -1 indexing
if wavelengh_limits is not None:
if (wavelengh_limits[0] != 0) and (wavelengh_limits[0] != -1):
inputWaveLimits = wavelengh_limits
else:
inputWaveLimits = wavelengh_limits
if wavelengh_limits[0] == 0:
inputWaveLimits[0] = int(np.ceil(spec_wave[0]) + 1)
if wavelengh_limits[-1] == -1:
inputWaveLimits[-1] = int(np.floor(spec_wave[-1]) - 1)
# Resampling the spectra
if resample_inc is not None:
wave_resam = np.arange(inputWaveLimits[0], inputWaveLimits[-1], resample_inc, dtype=float)
# Loop throught the fluxes (In the case of the bases it is assumed they may have different wavelength ranges)
if isinstance(spec_flux, list):
flux_resam = np.empty((output_dict['nBases'], len(wave_resam)))
for i in range(output_dict['nBases']):
flux_resam[i, :] = interp1d(spec_wave[i], spec_flux[i], bounds_error=True)(wave_resam)
# In case only one dimension
elif spec_flux.ndim == 1:
flux_resam = interp1d(spec_wave, spec_flux, bounds_error=True)(wave_resam)
output_dict['wave_resam'] = wave_resam
output_dict['flux_resam'] = flux_resam
else:
output_dict['wave_resam'] = spec_wave
output_dict['flux_resam'] = spec_flux
# Normalizing the spectra
if norm_interval is not None:
# Loop throught the fluxes (In the case of the bases it is assumed they may have different wavelength ranges)
if isinstance(spec_flux, list):
normFlux_coeff = np.empty(output_dict['nBases'])
flux_norm = np.empty((output_dict['nBases'], len(wave_resam)))
for i in range(output_dict['nBases']):
idx_Wavenorm_min, idx_Wavenorm_max = np.searchsorted(spec_wave[i], norm_interval)
normFlux_coeff[i] = np.mean(spec_flux[i][idx_Wavenorm_min:idx_Wavenorm_max])
flux_norm[i] = output_dict['flux_resam'][i] / normFlux_coeff[i]
elif spec_flux.ndim == 1:
idx_Wavenorm_min, idx_Wavenorm_max = np.searchsorted(spec_wave, norm_interval)
normFlux_coeff = np.mean(spec_flux[idx_Wavenorm_min:idx_Wavenorm_max])
flux_norm = output_dict['flux_resam'] / normFlux_coeff
output_dict['flux_norm'] = flux_norm
output_dict['normFlux_coeff'] = normFlux_coeff
else:
output_dict['flux_norm'] = output_dict['flux_resam']
output_dict['normFlux_coeff'] = 1.0
return
def generate_object_mask(self, linesDf, wavelength, linelabels):
# TODO This will not work for a redshifted lines log
idcs_lineMasks = linesDf.index.isin(linelabels)
idcs_spectrumMasks = ~linesDf.index.isin(linelabels)
# Matrix mask for integring the emission lines
n_lineMasks = idcs_lineMasks.sum()
self.boolean_matrix = np.zeros((n_lineMasks, wavelength.size), dtype=bool)
# Array with line wavelength resolution which we fill with default value (This is because there are lines beyong the continuum range)
self.lineRes = np.ones(n_lineMasks) * (wavelength[1] - wavelength[0])
# Total mask for valid regions in the spectrum
n_objMasks = idcs_spectrumMasks.sum()
self.int_mask = np.ones(wavelength.size, dtype=bool)
self.object_mask = np.ones(wavelength.size, dtype=bool)
# Loop through the emission lines
wmin, wmax = linesDf['w3'].loc[idcs_lineMasks].values, linesDf['w4'].loc[idcs_lineMasks].values
idxMin, idxMax = np.searchsorted(wavelength, [wmin, wmax])
for i in range(n_lineMasks):
if not np.isnan(wmin[i]) and not np.isnan(wmax[i]) and (wmax[i] < wavelength[-1]): # We need this for lines beyong continuum range #TODO propose better
w2, w3 = wavelength[idxMin[i]], wavelength[idxMax[i]]
idx_currentMask = (wavelength >= w2) & (wavelength <= w3)
self.boolean_matrix[i, :] = idx_currentMask
self.int_mask = self.int_mask & ~idx_currentMask
self.lineRes[i] = wavelength[idxMax[i]] - wavelength[idxMax[i] - 1]
# Loop through the object masks
wmin, wmax = linesDf['w3'].loc[idcs_spectrumMasks].values, linesDf['w4'].loc[idcs_spectrumMasks].values
idxMin, idxMax = np.searchsorted(wavelength, [wmin, wmax])
for i in range(n_objMasks):
if not np.isnan(wmin[i]) and not np.isnan(wmax[i]) and (wmax[i] < wavelength[-1]):
w2, w3 = wavelength[idxMin[i]], wavelength[idxMax[i]]
idx_currentMask = (wavelength >= w2) & (wavelength <= w3)
self.int_mask = self.int_mask & ~idx_currentMask
self.object_mask = self.object_mask & ~idx_currentMask
return
| |
"""
==================================
Wolfcamp Example - Single las file
==================================
This example shows the full petrophysical workflow avaiable in PetroPy
for a single wolfcamp las file courtesy of University Lands Texas.
The workflow progresses in these 11 steps
1. Read las file and create a :class:`petropy.Log` object
2. Load tops from a csv file using :meth:`petropy.Log.tops_from_csv`
3. Create a :class:`petropy.LogViewer` show in edit_mode to fix data
4. Define formations for calculations.
5. Calculate fluid properties by
1. Loading parameters via :meth:`petropy.Log.fluid_properties_parameters_from_csv`
2. Calculating over formations via :meth:`petropy.Log.formation_fluid_properties`
6. Calculate mulitmineral properties by
1. Loading parameters via :meth:`petropy.Log.multimineral_parameters_from_csv`
2. Calculating over formations via :meth:`petropy.Log.formation_multimineral_model`
7. Curve summations via :meth:`petropy.Log.summations`
8. Adding pay flags via :meth:`petropy.Log.add_pay_flag`
9. Clustering intervals into Electrofacies via :meth:`petropy.electrofacies`
10. Exporting log statistics via :meth:`petropy.Log.statistics`
11. Saving LogViewer to png and Log to las
To bulk process a folder of las files at once, use the `bulk example`_ .
Downloading the script at the bottom of this webpage will not download the required las
file or PetroPy logo. To download all files, view the `examples folder`_ on GitHub.
.. _bulk example: wolfcamp_bulk.html
.. _examples folder: https://github.com/toddheitmann/PetroPy/tree/master/examples
"""
import petropy as ptr
# import pyplot to add logo to figure
import matplotlib.pyplot as plt
### 1. Read las file
# create a Log object by reading a file path #
las_file_path = '42303347740000.las'
log = ptr.Log(las_file_path)
### 2. load tops ###
tops_file_path = 'tops.csv'
log.tops_from_csv(tops_file_path)
### 3. graphically edit log ###
# use manual mode for fixing borehole washout #
# and other changes requiring redrawing data #
# use bulk shift mode to linearly adjust all #
# curve data #
# close both windows to continue program #
viewer = ptr.LogViewer(log, top = 6950, height = 100)
viewer.show(edit_mode = True)
# overwrite log variable with updated log #
# from LogViewer edits #
log = viewer.log
### 4. define formations ###
f = ['WFMPA', 'WFMPB', 'WFMPC']
### 5. fluid properties ###
# load fluid properties from a csv file #
# since path is not specified, load default #
# csv file included with petropy #
log.fluid_properties_parameters_from_csv()
# calculate fluid properties over defined #
# formations with parameter WFMP from #
# previously loaded csv #
log.formation_fluid_properties(f, parameter = 'WFMP')
### 6. multimineral model ###
# load multimineral parameters from csv file #
# since path is not specified, load default #
# csv file included with petropy #
log.multimineral_parameters_from_csv()
# calculate multiminearl model over defined #
# formations with parameter WFMP from #
# previously loaded csv #
log.formation_multimineral_model(f, parameter = 'WFMP')
### 7. summations ###
# define curves to calculate cumulative values #
c = ['OIP', 'BVH', 'PHIE']
# calculate cumulative values over formations #
log.summations(f, curves = c)
### 8. pay flags ###
# define pay flogs as list of tuples for #
# (curve, value) #
flag_1_gtoe = [('PHIE', 0.03)]
flag_2_gtoe = [('PAY_FLAG_1', 1), ('BVH', 0.02)]
flag_3_gtoe = [('PAY_FLAG_2', 1)]
flag_3_ltoe = [('SW', 0.2)]
# add pay flags over defined formations #
log.add_pay_flag(f, greater_than_or_equal = flag_1_gtoe)
log.add_pay_flag(f, greater_than_or_equal = flag_2_gtoe)
log.add_pay_flag(f, greater_than_or_equal = flag_3_gtoe,
less_than_or_equal = flag_3_ltoe)
### 9. electrofacies ###
# define curves to use in electofaceis module #
electro_logs = ['GR_N', 'RESDEEP_N', 'NPHI_N', 'RHOB_N', 'PE_N']
# make a list of Log objects as input #
logs = [log]
# calculate electrofacies for the defined logs#
# over the specified formations #
# finding 6 clusters of electrofacies #
# with RESDEEP_N logarithmically scaled #
logs = ptr.electrofacies(logs, f, electro_logs, 6,
log_scale = ['RESDEEP_N'])
# unpack log object from returned list #
log = logs[0]
### 10. statistics ###
# define list of curves to find statistics #
stats_curves = ['OIP', 'BVH', 'PHIE', 'SW', 'VCLAY', 'TOC']
# calculate stats over specified formation and#
# save to csv file wfmp_statistics.csv #
# update the line if the well, formation is #
# already included in the csv file #
log.statistics_to_csv('wfmp_statistics.csv', replace = True,
formations = f, curves = stats_curves,
pay_flags = pay_flags, facies = facies_curves)
### 11. export data ###
# find way to name well, looking for well name#
# or UWI or API #
if len(log.well['WELL'].value) > 0:
well_name = log.well['WELL'].value
elif len(str(log.well['UWI'].value)) > 0:
well_name = str(log.well['UWI'].value)
elif len(log.well['API'].value) > 0:
well_name = str(log.well['API'].value)
else:
well_name = 'UNKNOWN'
well_name = well_name.replace('.', '')
# scale height of viewer to top and bottom #
# of calculated values #
wfmpa_top = log.tops['WFMPA']
wfmpc_base = log.next_formation_depth('WFMPC')
top = wfmpa_top
height = wfmpc_base - wfmpa_top
# create LogViewer with the default full_oil #
# template included in petropy #
viewer = ptr.LogViewer(log, top = top, height = height,
template_defaults = 'full_oil')
# set viewer to 17x11 inches size for use in #
# PowerPoint or printing to larger paper #
viewer.fig.set_size_inches(17, 11)
# add well_name to title of LogViewer #
viewer.fig.suptitle(well_name, fontweight = 'bold', fontsize = 30)
# add logo to top left corner #
logo_im = plt.imread('company_logo.png')
logo_ax = viewer.fig.add_axes([0, 0.85, 0.2, 0.2])
logo_ax.imshow(logo_im)
logo_ax.axis('off')
# add text to top right corner #
if len(str(log.well['UWI'].value)) > 0:
label = 'UWI: ' + str(log.well['UWI'].value) + '\n'
elif len(log.well['API'].value) > 0:
label = 'API: ' + str(log.well['API'].value) + '\n'
else:
label = ''
label += 'County: Reagan\nCreated By: Todd Heitmann\n'
label += 'Creation Date: October 23, 2017'
viewer.axes[0].annotate(label, xy = (0.99,0.99),
xycoords = 'figure fraction',
horizontalalignment = 'right',
verticalalignment = 'top',
fontsize = 14)
# save figure and log #
viewer_file_name=r'%s_processed.png' % well_name
las_file_name = r'%s_processed.las' % well_name
viewer.fig.savefig(viewer_file_name)
viewer.log.write(las_file_name)
| |
from ipaddress import IPv4Address, IPv6Address, IPv6Network
from contextlib import contextmanager
import tempfile
import os
import random
import unittest
import requests
import iprir
from iprir.record import RIRRecord, ip_to_int
from iprir.parser import parse_file, parse_string
from iprir.database import DB
from iprir.ipset import IpSet
import iprir.updater
SAMPLE_TEXT_DB_CONTENT = '''
#
2|apnic|20170120|50186|19830613|20170119|+1000
apnic|*|asn|*|7517|summary
apnic|*|ipv4|*|36581|summary
apnic|*|ipv6|*|6088|summary
apnic|NZ|asn|681|1|20020801|allocated
apnic|AU|ipv4|1.0.0.0|256|20110811|assigned
apnic|CN|ipv4|1.0.1.0|256|20110414|allocated
apnic|CN|ipv6|2001:250::|35|20000426|allocated
apnic|CN|ipv6|2001:250:2000::|35|20020726|allocated
'''
REAL_RECORDS = None
# noinspection PyPep8Naming
def setUpModule():
global REAL_RECORDS
iprir.updater.initialize()
REAL_RECORDS = sum(map(parse_file, iprir.TEXT_DB_PATH.values()), [])
@contextmanager
def patch(obj, key, value):
origin = getattr(obj, key)
setattr(obj, key, value)
try:
yield
finally:
setattr(obj, key, origin)
@contextmanager
def patch_db_path():
fd, text_db_path = tempfile.mkstemp(prefix='iprir_test_', suffix='.txt')
os.close(fd)
fd, sql_db_path = tempfile.mkstemp(prefix='iprir_test_', suffix='.sqlite')
os.close(fd)
print('text_db_path', text_db_path)
print('sql_db_path', sql_db_path)
with patch(iprir, 'TEXT_DB_PATH', dict(test=text_db_path)):
with patch(iprir, 'TEXT_DB_URLS', dict(test='https://dummy/')):
with patch(iprir, 'SQL_DB_PATH', sql_db_path):
try:
yield text_db_path, sql_db_path
except Exception:
raise
else:
os.remove(text_db_path)
os.remove(sql_db_path)
def write_string_to_file(filename: str, string: str):
with open(filename, 'wt') as fp:
fp.write(string)
def test_record_ipv4():
r = RIRRecord('CN', 'ipv4', '1.0.1.0', '256', 'assigned')
assert r.length == 256
assert r.ipv4.exploded == '1.0.1.0'
assert r.ipv4_network.network_address == r.ipv4
assert r.ipv4_network.prefixlen == 24
assert r.ipv4 == IPv4Address(r.as_int)
def test_record_ipv6():
r = RIRRecord('CN', 'ipv6', '2001:250::', '35', 'allocated')
assert r.length == 2 ** (128 - 35)
assert r.ipv6.compressed == '2001:250::'
assert r.ipv6_network.network_address == r.ipv6
assert r.ipv6_network.prefixlen == 35
assert r.ipv6 == IPv6Address(r.as_int)
def test_parse():
records = parse_string(SAMPLE_TEXT_DB_CONTENT)
assert len(records) == 5
r = records[-1]
assert (r.country, r.ipv6, r.ipv6_network, r.status) == (
'CN',
IPv6Address('2001:250:2000::'),
IPv6Network('2001:250:2000::/35'),
'allocated'
)
def test_ip_overlap():
def verify(lst):
lst.sort(key=lambda x: x[0])
for i in range(1, len(lst)):
prev_start, prev_len = lst[i - 1]
assert prev_start + prev_len <= lst[i][0]
lst4 = []
lst6 = []
for r in REAL_RECORDS:
if r.country == 'AP': # asia/pacific
# XXX: conflicts
# apnic|AP|ipv4|159.117.192.0|2048|19920409|allocated|A928972C
# ripencc|NL|ipv4|159.117.192.0|2048|19920409|assigned|
continue
if not DB.filter_record(r):
continue
if r.type == 'ipv4':
lst4.append((r.as_int, r.length))
elif r.type == 'ipv6':
lst6.append((r.as_int, r.length))
verify(lst4)
verify(lst6)
def test_db():
with patch_db_path() as pathes:
text_db_path, sql_db_path = pathes
write_string_to_file(text_db_path, SAMPLE_TEXT_DB_CONTENT)
records = parse_file(text_db_path)
db = DB()
try:
ret = db.reset_table()
assert ret
ret = db.add_records(records)
assert ret
cn4 = db.by_country('ipv4', 'CN')
assert len(cn4) == 1
assert cn4[0] == records[2]
cn6 = db.by_country('ipv6', 'CN')
assert len(cn6) == 2
assert cn6 == records[3:5]
r = db.by_ip(IPv4Address('1.0.1.0'))
assert r == records[2]
r = db.by_ip(IPv4Address('1.0.1.255'))
assert r == records[2]
r = db.by_ip(IPv4Address('1.0.2.0'))
assert r is None
r = db.by_ip(IPv6Address('2001:250::'))
assert r == records[3]
net = records[3].ipv6_network
r = db.by_ip(net.network_address + net.num_addresses)
assert r == records[4]
net = records[4].ipv6_network
r = db.by_ip(net.network_address + net.num_addresses)
assert r is None
finally:
db.close()
def test_update():
def fake_get(*args, **kwargs):
class Obj:
pass
o = Obj()
o.text = SAMPLE_TEXT_DB_CONTENT
return o
with patch(requests, 'get', fake_get):
with patch_db_path():
iprir.updater.update()
db = DB()
try:
records = parse_string(SAMPLE_TEXT_DB_CONTENT)
records = list(filter(lambda r: r.type in ('ipv4', 'ipv6'), records))
assert db.all() == records
finally:
db.close()
def test_ipset():
def to_int(ips):
return [ip_to_int(IPv4Address(ip)) for ip in ips]
text = '''
2|apnic|20170120|50186|19830613|20170119|+1000
apnic|*|ipv6|*|6088|summary
apnic|AU|ipv4|1.0.0.0|256|20110811|assigned
apnic|CN|ipv4|1.0.1.0|256|20110414|allocated
apnic|CN|ipv4|1.0.5.0|256|20110414|allocated
'''
records = parse_string(text)
random.shuffle(records)
ipset = IpSet(records)
assert ipset.lo == to_int(['1.0.0.0', '1.0.5.0'])
assert ipset.hi == to_int(['1.0.2.0', '1.0.6.0'])
assert IPv4Address('0.255.255.255') not in ipset
assert IPv4Address('1.0.0.0') in ipset
assert IPv4Address('1.0.1.0') in ipset
assert IPv4Address('1.0.1.255') in ipset
assert IPv4Address('1.0.2.0') not in ipset
assert IPv4Address('1.0.4.255') not in ipset
assert IPv4Address('1.0.5.0') in ipset
assert IPv4Address('1.0.5.255') in ipset
assert IPv4Address('1.0.6.0') not in ipset
# test IpSet.by_country()
with patch_db_path() as pathes:
text_db_path, sql_db_path = pathes
write_string_to_file(text_db_path, text)
iprir.updater.update_sql_db()
ipset = IpSet.by_country('ipv4', 'CN')
assert ipset.lo == to_int(['1.0.1.0', '1.0.5.0'])
assert ipset.hi == to_int(['1.0.2.0', '1.0.6.0'])
class TestIpSetOnRealData(unittest.TestCase):
by_country = staticmethod(IpSet.by_country)
def test_by_country(self):
# test on real data
cn4 = self.by_country('ipv4', 'CN')
assert IPv4Address('1.2.4.8') in cn4
assert IPv4Address('111.13.101.208') in cn4
assert IPv4Address('112.124.47.27') in cn4
assert IPv4Address('74.125.68.105') not in cn4
class TestRealDataWithApi(TestIpSetOnRealData):
by_country = staticmethod(iprir.by_country)
def test_by_ip(self):
assert iprir.by_ip(IPv4Address('8.8.8.8')) == RIRRecord(
country='US', type='ipv4', start='8.0.0.0', value='16777216', status='allocated',
)
# noinspection PyPep8Naming
def tearDownModule():
iprir.get_db().close()
| |
#!/usr/bin/env python
#
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# a simple command line OF-CONFIG client
#
# a usage example:
# % PYTHONPATH=. ./bin/of_config_cli \
# --peers=sw1=localhost:1830:username:password
# (Cmd) raw_get sw1
from __future__ import print_function
import ryu.contrib
ryu.contrib.update_module_path()
from ryu import cfg
import cmd
import sys
import lxml.etree as ET
from ryu.lib import of_config
from ryu.lib.of_config import capable_switch
from ncclient.operations.rpc import RPCError
import ryu.lib.of_config.classes as ofc
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.ListOpt('peers', default=[], help='list of peers')
])
class Peer(capable_switch.OFCapableSwitch):
def __init__(self, name, host, port, username, password):
self._name = name
super(Peer, self).__init__(
host=host, port=port, username=username, password=password,
unknown_host_cb=lambda host, fingeprint: True)
peers = {}
def add_peer(name, host, port, username, password):
peers[name] = Peer(name, host, port, username, password)
def et_tostring_pp(tree):
# pretty_print is an lxml feature, not available in ElementTree
try:
return ET.tostring(tree, pretty_print=True)
except TypeError:
return ET.tostring(tree)
def validate(tree):
schema = ET.XMLSchema(file=of_config.OF_CONFIG_1_1_1_XSD)
if not schema(tree):
print(schema.error_log)
class Cmd(cmd.Cmd):
def __init__(self, *args, **kwargs):
self._in_onecmd = False
cmd.Cmd.__init__(self, *args, **kwargs)
def _request(self, line, f):
args = line.split()
try:
peer = args[0]
except:
print("argument error")
return
try:
p = peers[peer]
except KeyError:
print("unknown peer %s" % peer)
return
try:
f(p, args[1:])
except RPCError as e:
print("RPC Error %s" % e)
except EOFError:
print("disconnected")
def _complete_peer(self, text, line, _begidx, _endidx):
if len((line + 'x').split()) >= 3:
return []
return [name for name in peers if name.startswith(text)]
def do_list_cap(self, line):
"""list_cap <peer>
"""
def f(p, args):
for i in p.netconf.server_capabilities:
print(i)
self._request(line, f)
def do_raw_get(self, line):
"""raw_get <peer>
"""
def f(p, args):
result = p.raw_get()
tree = ET.fromstring(result)
validate(tree)
print(et_tostring_pp(tree))
self._request(line, f)
def do_raw_get_config(self, line):
"""raw_get_config <peer> <source>
"""
def f(p, args):
try:
source = args[0]
except:
print("argument error")
return
result = p.raw_get_config(source)
tree = ET.fromstring(result)
validate(tree)
print(et_tostring_pp(tree))
self._request(line, f)
def do_get(self, line):
"""get <peer>
eg. get sw1
"""
def f(p, args):
print(p.get())
self._request(line, f)
def do_commit(self, line):
"""commit <peer>
eg. commit sw1
"""
def f(p, args):
print(p.commit())
self._request(line, f)
def do_discard(self, line):
"""discard <peer>
eg. discard sw1
"""
def f(p, args):
print(p.discard_changes())
self._request(line, f)
def do_get_config(self, line):
"""get_config <peer> <source>
eg. get_config sw1 startup
"""
def f(p, args):
try:
source = args[0]
except:
print("argument error")
return
print(p.get_config(source))
self._request(line, f)
def do_delete_config(self, line):
"""delete_config <peer> <source>
eg. delete_config sw1 startup
"""
def f(p, args):
try:
source = args[0]
except:
print("argument error")
return
print(p.delete_config(source))
self._request(line, f)
def do_copy_config(self, line):
"""copy_config <peer> <source> <target>
eg. copy_config sw1 running startup
"""
def f(p, args):
try:
source, target = args
except:
print("argument error")
return
print(p.copy_config(source, target))
self._request(line, f)
def do_list_port(self, line):
"""list_port <peer>
"""
def f(p, args):
o = p.get()
for p in o.resources.port:
print('%s %s %s' % (p.resource_id, p.name, p.number))
self._request(line, f)
_port_settings = [
'admin-state',
'no-forward',
'no-packet-in',
'no-receive',
]
def do_get_port_config(self, line):
"""get_config_port <peer> <source> <port>
eg. get_port_config sw1 running LogicalSwitch7-Port2
"""
def f(p, args):
try:
source, port = args
except:
print("argument error")
return
o = p.get_config(source)
for p in o.resources.port:
if p.resource_id != port:
continue
print(p.resource_id)
conf = p.configuration
for k in self._port_settings:
try:
v = getattr(conf, k)
except AttributeError:
continue
print('%s %s' % (k, v))
self._request(line, f)
def do_set_port_config(self, line):
"""set_port_config <peer> <target> <port> <key> <value>
eg. set_port_config sw1 running LogicalSwitch7-Port2 admin-state down
eg. set_port_config sw1 running LogicalSwitch7-Port2 no-forward false
"""
def f(p, args):
try:
target, port, key, value = args
except:
print("argument error")
print(args)
return
# get switch id
o = p.get()
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
resources=ofc.OFCapableSwitchResourcesType(
port=[
ofc.OFPortType(
resource_id=port,
configuration=ofc.OFPortConfigurationType(
**{key: value}))
]
)
)
except TypeError:
print("argument error")
return
try:
p.edit_config(target, capable_switch)
except Exception as e:
print(e)
self._request(line, f)
def do_list_queue(self, line):
"""list_queue <peer>
"""
def f(p, args):
o = p.get()
if o.resources.queue:
for q in o.resources.queue:
print('%s %s' % (q.resource_id, q.port))
self._request(line, f)
_queue_settings = [
'max-rate',
'min-rate',
'experimenter',
]
def do_get_queue_config(self, line):
"""get_queue_port <peer> <source> <queue>
eg. get_queue_config sw1 running LogicalSwitch7-Port1-Queue922
"""
def f(p, args):
try:
source, queue = args
except:
print("argument error")
return
o = p.get_config(source)
for q in o.resources.queue:
if q.resource_id != queue:
continue
print(q.resource_id)
conf = q.properties
for k in self._queue_settings:
try:
v = getattr(conf, k)
except AttributeError:
continue
print('%s %s' % (k, v))
self._request(line, f)
def do_set_queue_config(self, line):
"""set_queue_config <peer> <target> <queue> <key> <value>
eg. set_queue_config sw1 running LogicalSwitch7-Port1-Queue922 \
max-rate 100
"""
def f(p, args):
try:
target, queue, key, value = args
except:
print("argument error")
print(args)
return
# get switch id
o = p.get()
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
resources=ofc.OFCapableSwitchResourcesType(
queue=[
ofc.OFQueueType(
resource_id=queue,
properties=ofc.OFQueuePropertiesType(
**{key: value})),
]
)
)
except TypeError:
print("argument error")
return
try:
p.edit_config(target, capable_switch)
except Exception as e:
print(e)
self._request(line, f)
def do_add_queue(self, line):
"""add_queue <peer> <target> <logical-switch> <queue>
eg. add_queue sw1 running LogicalSwitch7 NameOfNewQueue
"""
def f(p, args):
try:
target, lsw, queue = args
except:
print("argument error")
print(args)
return
# get switch id
o = p.get()
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
resources=ofc.OFCapableSwitchResourcesType(
queue=[
ofc.OFQueueType(resource_id=queue)
]
),
logical_switches=ofc.OFCapableSwitchLogicalSwitchesType(
switch=[ofc.OFLogicalSwitchType(
id=lsw,
resources=ofc.OFLogicalSwitchResourcesType(
queue=[queue])
)]
)
)
except TypeError:
print("argument error")
return
try:
p.edit_config(target, capable_switch)
except Exception as e:
print(e)
self._request(line, f)
def do_list_logical_switch(self, line):
"""list_logical_switch <peer>
"""
def f(p, args):
o = p.get()
for s in o.logical_switches.switch:
print('%s %s' % (s.id, s.datapath_id))
self._request(line, f)
def do_show_logical_switch(self, line):
"""show_logical_switch <peer> <logical switch>
"""
def f(p, args):
try:
(lsw,) = args
except:
print("argument error")
return
o = p.get()
for s in o.logical_switches.switch:
if s.id != lsw:
continue
print(s.id)
print('datapath-id %s' % s.datapath_id)
if s.resources.queue:
print('queues:')
for q in s.resources.queue:
print('\t %s' % q)
if s.resources.port:
print('ports:')
for p in s.resources.port:
print('\t %s' % p)
self._request(line, f)
_lsw_settings = [
'lost-connection-behavior',
]
def do_get_logical_switch_config(self, line):
"""get_logical_switch_config <peer> <source> <logical switch>
"""
def f(p, args):
try:
source, lsw = args
except:
print("argument error")
return
o = p.get_config(source)
for l in o.logical_switches.switch:
if l.id != lsw:
continue
print(l.id)
for k in self._lsw_settings:
try:
v = getattr(l, k)
except AttributeError:
continue
print('%s %s' % (k, v))
self._request(line, f)
def do_set_logical_switch_config(self, line):
"""set_logical_switch_config <peer> <logical switch> <key> <value>
eg. set_logical_switch_config sw1 running LogicalSwitch7 \
lost-connection-behavior failStandaloneMode
"""
def f(p, args):
try:
target, lsw, key, value = args
except:
print("argument error")
return
# get switch id
o = p.get_config(target)
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
logical_switches=ofc.OFCapableSwitchLogicalSwitchesType(
switch=[ofc.OFLogicalSwitchType(
id=lsw,
**{key: value}
)]
)
)
except TypeError:
print("argument error")
return
try:
p.edit_config(target, capable_switch)
except Exception as e:
print(e)
self._request(line, f)
completedefault = _complete_peer
def complete_EOF(self, _text, _line, _begidx, _endidx):
return []
def do_EOF(self, _line):
sys.exit(0)
def onecmd(self, string):
self._in_onecmd = True
try:
return cmd.Cmd.onecmd(self, string)
finally:
self._in_onecmd = False
def main(args=None, prog=None):
CONF(args=args, prog=prog,
project='of-config-cli', version='of-config-cli')
for p_str in CONF.peers:
name, addr = p_str.split('=')
host, port, username, password = addr.rsplit(':', 3)
add_peer(name, host, port, username, password)
Cmd().cmdloop()
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
#
# geo.py is a python module with no dependencies on extra packages,
# providing some convenience functions for working with geographic
# coordinates
#
# Copyright (C) 2010 Maximilian Hoegner <hp.maxi@hoegners.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# taken from http://hoegners.de/Maxi/geo/
### Part one - Functions for dealing with points on a sphere ###
import math
EARTH_RADIUS = 6370000.
MAG_LAT=82.7
MAG_LON=-114.4
direction_names = ["N","NNE","NE","ENE","E","ESE","SE","SSE","S","SSW","SW","WSW","W","WNW","NW","NNW"]
directions_num = len(direction_names)
directions_step = 360./directions_num
def xyz(lat,lon,r=EARTH_RADIUS):
""" Takes spherical coordinates and returns a triple of cartesian coordinates """
x = r*math.cos(math.radians(lat))*math.cos(math.radians(lon))
y = r*math.cos(math.radians(lat))*math.sin(math.radians(lon))
z = r*math.sin(math.radians(lat))
return x,y,z
def dot(p1,p2):
""" Dot product of two vectors """
return p1[0]*p2[0]+p1[1]*p2[1]+p1[2]*p2[2]
def cross(p1,p2):
""" Cross product of two vectors """
x = p1[1]*p2[2]-p1[2]*p2[1]
y = p1[2]*p2[0]-p1[0]*p2[2]
z = p1[0]*p2[1]-p1[1]*p2[0]
return x,y,z
def determinant(p1,p2,p3):
""" Determinant of three vectors """
return dot(p1,cross(p2,p3))
def normalize_angle(angle):
""" Takes angle in degrees and returns angle from 0 to 360 degrees """
cycles = angle/360.
normalized_cycles = cycles - math.floor(cycles)
return normalized_cycles*360.
def sgn(x):
""" Returns sign of number """
if x==0: return 0.
elif x>0: return 1.
else: return -1.
def angle(v1,v2,n=None):
""" Returns angle between v1 and v2 in degrees. n can be a vector that points to an observer who is looking at the plane containing v1 and v2. This way, you can get well-defined signs. """
if n==None:
n=cross(v1,v2)
prod = dot(v1,v2) / math.sqrt( dot(v1,v1) * dot(v2,v2) )
rad = sgn(determinant(v1,v2,n)) * math.acos( prod )
deg = math.degrees(rad)
return normalize_angle(deg)
def great_circle_angle(p1,p2,p3):
""" Returns angle w(p1,p2,p3) in degrees. Needs p1 != p2 and p2 != p3. """
n1=cross(p1,p2)
n2=cross(p3,p2)
return angle(n1,n2,p2)
def distance(p1,p2,r=EARTH_RADIUS):
""" Returns length of curved way between two points p1 and p2 on a sphere with radius r. """
return math.radians(angle(p1,p2)) * r
def direction_name(angle):
""" Returns a name for a direction given in degrees. Example: direction_name(0.0) returns "N", direction_name(90.0) returns "O", direction_name(152.0) returns "SSO". """
index = int(round( normalize_angle(angle)/directions_step ))
index %= directions_num
return direction_names[index]
magnetic_northpole=xyz(MAG_LAT,MAG_LON)
geographic_northpole=xyz(90,0)
### Part two - A tolerant parser for position strings ###
import re
class Parser:
""" A parser class using regular expressions. """
def __init__(self):
self.patterns={}
self.raw_patterns={}
self.virtual={}
def add(self,name,pattern,virtual=False):
""" Adds a new named pattern (regular expression) that can reference previously added patterns by %(pattern_name)s.
Virtual patterns can be used to make expressions more compact but don't show up in the parse tree. """
self.raw_patterns[name] = "(?:"+pattern+")"
self.virtual[name]=virtual
try:
self.patterns[name] = ("(?:"+pattern+")") % self.patterns
except KeyError,e:
raise Exception, "Unknown pattern name: %s" % str(e)
def parse(self,pattern_name,text):
""" Parses 'text' with pattern 'pattern_name' and returns parse tree """
# build pattern with subgroups
sub_dict = {}
subpattern_names = []
for s in re.finditer("%\(.*?\)s",self.raw_patterns[pattern_name]):
subpattern_name = s.group()[2:-2]
if not self.virtual[subpattern_name]:
sub_dict[subpattern_name]="("+self.patterns[subpattern_name]+")"
subpattern_names.append(subpattern_name)
else:
sub_dict[subpattern_name]=self.patterns[subpattern_name]
pattern = "^"+( self.raw_patterns[pattern_name] % sub_dict )+"$"
# do matching
m=re.match(pattern, text)
if m==None:
return None
# build tree recursively by parsing subgroups
tree={"TEXT":text}
for i in xrange(len(subpattern_names)):
text_part = m.group(i+1)
if not text_part==None:
subpattern = subpattern_names[i]
tree[subpattern]=self.parse(subpattern,text_part)
return tree
position_parser=Parser()
position_parser.add("direction_ns",r"[NSns]")
position_parser.add("direction_ew",r"[EOWeow]")
position_parser.add("decimal_separator",r"[\.,]",True)
position_parser.add("sign",r"[+-]")
position_parser.add("nmea_style_degrees",r"[0-9]{2,}")
position_parser.add("nmea_style_minutes",r"[0-9]{2}(?:%(decimal_separator)s[0-9]*)?")
position_parser.add("nmea_style", r"%(sign)s?\s*%(nmea_style_degrees)s%(nmea_style_minutes)s")
position_parser.add("number",r"[0-9]+(?:%(decimal_separator)s[0-9]*)?|%(decimal_separator)s[0-9]+")
position_parser.add("plain_degrees",r"(?:%(sign)s\s*)?%(number)s")
position_parser.add("degree_symbol",r"\xc2\xb0",True)
position_parser.add("minutes_symbol",r"'|\xe2\x80\xb2|`|\xc2\xb4",True)
position_parser.add("seconds_symbol",r"%(minutes_symbol)s%(minutes_symbol)s|\xe2\x80\xb3|\"",True)
position_parser.add("degrees",r"%(number)s\s*%(degree_symbol)s")
position_parser.add("minutes",r"%(number)s\s*%(minutes_symbol)s")
position_parser.add("seconds",r"%(number)s\s*%(seconds_symbol)s")
position_parser.add("degree_coordinates","(?:%(sign)s\s*)?%(degrees)s(?:[+\s]*%(minutes)s)?(?:[+\s]*%(seconds)s)?|(?:%(sign)s\s*)%(minutes)s(?:[+\s]*%(seconds)s)?|(?:%(sign)s\s*)%(seconds)s")
position_parser.add("coordinates_ns", r"%(nmea_style)s|%(plain_degrees)s|%(degree_coordinates)s")
position_parser.add("coordinates_ew", r"%(nmea_style)s|%(plain_degrees)s|%(degree_coordinates)s")
position_parser.add("position", """\
\s*%(direction_ns)s\s*%(coordinates_ns)s[,;\s]*%(direction_ew)s\s*%(coordinates_ew)s\s*|\
\s*%(direction_ew)s\s*%(coordinates_ew)s[,;\s]*%(direction_ns)s\s*%(coordinates_ns)s\s*|\
\s*%(coordinates_ns)s\s*%(direction_ns)s[,;\s]*%(coordinates_ew)s\s*%(direction_ew)s\s*|\
\s*%(coordinates_ew)s\s*%(direction_ew)s[,;\s]*%(coordinates_ns)s\s*%(direction_ns)s\s*|\
\s*%(coordinates_ns)s[,;\s]+%(coordinates_ew)s\s*\
""")
def get_number(b):
""" Takes appropriate branch of parse tree and returns float. """
s = b["TEXT"].replace(",",".")
return float(s)
def get_coordinate(b):
""" Takes appropriate branch of the parse tree and returns degrees as a float. """
r=0.
if b.has_key("nmea_style"):
if b["nmea_style"].has_key("nmea_style_degrees"): r += get_number(b["nmea_style"]["nmea_style_degrees"])
if b["nmea_style"].has_key("nmea_style_minutes"): r += get_number(b["nmea_style"]["nmea_style_minutes"])/60.
if b["nmea_style"].has_key("sign") and b["nmea_style"]["sign"]["TEXT"]=="-": r *= -1.
elif b.has_key("plain_degrees"):
r += get_number(b["plain_degrees"]["number"])
if b["plain_degrees"].has_key("sign") and b["plain_degrees"]["sign"]["TEXT"]=="-": r *= -1.
elif b.has_key("degree_coordinates"):
if b["degree_coordinates"].has_key("degrees"):
r += get_number(b["degree_coordinates"]["degrees"]["number"])
if b["degree_coordinates"].has_key("minutes"):
r += get_number(b["degree_coordinates"]["minutes"]["number"])/60.
if b["degree_coordinates"].has_key("seconds"):
r += get_number(b["degree_coordinates"]["seconds"]["number"])/3600.
if b["degree_coordinates"].has_key("sign") and b["degree_coordinates"]["sign"]["TEXT"]=="-": r *= -1.
return r
def parse_position(s):
""" Takes a (utf8-encoded) string describing a position and returns a tuple of floats for latitude and longitude in degrees.
Tries to be as tolerant as possible with input. Returns None if parsing doesn't succeed. """
parse_tree = position_parser.parse("position", s)
if parse_tree==None: return None
lat_sign = +1.
if parse_tree.has_key("direction_ns") and parse_tree["direction_ns"]["TEXT"] in ("S","s"): lat_sign = -1.
lon_sign = +1.
if parse_tree.has_key("direction_ew") and parse_tree["direction_ew"]["TEXT"] in ("W","w"): lon_sign = -1.
lat = lat_sign*get_coordinate(parse_tree["coordinates_ns"])
lon = lon_sign*get_coordinate(parse_tree["coordinates_ew"])
return lat, lon
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
from twitter.common.collections import maybe_list
from pants.base.build_environment import get_buildroot
from pants.ivy.ivy_subsystem import IvySubsystem
from pants.util.contextutil import temporary_dir
from pants_test.backend.project_info.tasks.resolve_jars_test_mixin import ResolveJarsTestMixin
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
from pants_test.subsystem.subsystem_util import subsystem_instance
class ExportIntegrationTest(ResolveJarsTestMixin, PantsRunIntegrationTest):
_confs_args = [
'--export-libraries-sources',
'--export-libraries-javadocs',
]
def run_export(self, test_target, workdir, load_libs=False, only_default=False, extra_args=None):
"""Runs ./pants export ... and returns its json output.
:param string test_target: spec of the target to run on.
:param string workdir: working directory to run pants with.
:param bool load_libs: whether to load external libraries (of any conf).
:param bool only_default: if loading libraries, whether to only resolve the default conf, or to
additionally resolve sources and javadocs.
:param list extra_args: list of extra arguments for the pants invocation.
:return: the json output of the console task.
:rtype: dict
"""
export_out_file = os.path.join(workdir, 'export_out.txt')
args = ['export',
'--output-file={out_file}'.format(out_file=export_out_file)] + maybe_list(test_target)
libs_args = ['--no-export-libraries'] if not load_libs else self._confs_args
if load_libs and only_default:
libs_args = []
pants_run = self.run_pants_with_workdir(args + libs_args + (extra_args or []), workdir)
self.assert_success(pants_run)
self.assertTrue(os.path.exists(export_out_file),
msg='Could not find export output file in {out_file}'
.format(out_file=export_out_file))
with open(export_out_file) as json_file:
json_data = json.load(json_file)
if not load_libs:
self.assertIsNone(json_data.get('libraries'))
return json_data
def evaluate_subtask(self, targets, workdir, load_extra_confs, extra_args, expected_jars):
json_data = self.run_export(targets, workdir, load_libs=True, only_default=not load_extra_confs,
extra_args=extra_args)
for jar in expected_jars:
self.assertIn(jar, json_data['libraries'])
for path in json_data['libraries'][jar].values():
self.assertTrue(os.path.exists(path), 'Expected jar at {} to actually exist.'.format(path))
def test_export_code_gen(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
thrift_target_name = ('examples.src.thrift.org.pantsbuild.example.precipitation'
'.precipitation-java')
codegen_target = os.path.join(os.path.relpath(workdir, get_buildroot()),
'gen/thrift/isolated/{0}:{0}'.format(thrift_target_name))
self.assertIn(codegen_target, json_data.get('targets').keys())
def test_export_json_transitive_jar(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
targets = json_data.get('targets')
self.assertIn('org.hamcrest:hamcrest-core:1.3', targets[test_target]['libraries'])
def test_export_jar_path_with_excludes(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'testprojects/src/java/org/pantsbuild/testproject/exclude:foo'
json_data = self.run_export(test_target, workdir, load_libs=True)
self.assertIsNone(json_data
.get('libraries')
.get('com.typesafe.sbt:incremental-compiler:0.13.7'))
foo_target = (json_data
.get('targets')
.get('testprojects/src/java/org/pantsbuild/testproject/exclude:foo'))
self.assertTrue('com.typesafe.sbt:incremental-compiler' in foo_target.get('excludes'))
def test_export_jar_path_with_excludes_soft(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'testprojects/src/java/org/pantsbuild/testproject/exclude:'
json_data = self.run_export(test_target,
workdir,
load_libs=True,
extra_args=['--resolve-ivy-soft-excludes'])
self.assertIsNotNone(json_data
.get('libraries')
.get('com.martiansoftware:nailgun-server:0.9.1'))
self.assertIsNotNone(json_data.get('libraries').get('org.pantsbuild:jmake:1.3.8-10'))
foo_target = (json_data
.get('targets')
.get('testprojects/src/java/org/pantsbuild/testproject/exclude:foo'))
self.assertTrue('com.typesafe.sbt:incremental-compiler' in foo_target.get('excludes'))
self.assertTrue('org.pantsbuild' in foo_target.get('excludes'))
def test_export_jar_path(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
with subsystem_instance(IvySubsystem) as ivy_subsystem:
ivy_cache_dir = ivy_subsystem.get_options().cache_dir
common_lang_lib_info = json_data.get('libraries').get('commons-lang:commons-lang:2.5')
self.assertIsNotNone(common_lang_lib_info)
self.assertEquals(
common_lang_lib_info.get('default'),
os.path.join(ivy_cache_dir, 'commons-lang/commons-lang/jars/commons-lang-2.5.jar')
)
self.assertEquals(
common_lang_lib_info.get('javadoc'),
os.path.join(ivy_cache_dir,
'commons-lang/commons-lang/javadocs/commons-lang-2.5-javadoc.jar')
)
self.assertEquals(
common_lang_lib_info.get('sources'),
os.path.join(ivy_cache_dir,
'commons-lang/commons-lang/sources/commons-lang-2.5-sources.jar')
)
def test_dep_map_for_java_sources(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'examples/src/scala/org/pantsbuild/example/scala_with_java_sources'
json_data = self.run_export(test_target, workdir)
targets = json_data.get('targets')
self.assertIn('examples/src/java/org/pantsbuild/example/java_sources:java_sources', targets)
def test_sources_and_javadocs(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'examples/src/scala/org/pantsbuild/example/scala_with_java_sources'
json_data = self.run_export(test_target, workdir, load_libs=True)
scala_lang_lib = json_data.get('libraries').get('org.scala-lang:scala-library:2.10.4')
self.assertIsNotNone(scala_lang_lib)
self.assertIsNotNone(scala_lang_lib['default'])
self.assertIsNotNone(scala_lang_lib['sources'])
self.assertIsNotNone(scala_lang_lib['javadoc'])
def test_ivy_classifiers(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'testprojects/tests/java/org/pantsbuild/testproject/ivyclassifier:ivyclassifier'
json_data = self.run_export(test_target, workdir, load_libs=True)
with subsystem_instance(IvySubsystem) as ivy_subsystem:
ivy_cache_dir = ivy_subsystem.get_options().cache_dir
avro_lib_info = json_data.get('libraries').get('org.apache.avro:avro:1.7.7')
self.assertIsNotNone(avro_lib_info)
self.assertEquals(
avro_lib_info.get('default'),
os.path.join(ivy_cache_dir, 'org.apache.avro/avro/jars/avro-1.7.7.jar')
)
self.assertEquals(
avro_lib_info.get('tests'),
os.path.join(ivy_cache_dir, 'org.apache.avro/avro/jars/avro-1.7.7-tests.jar')
)
self.assertEquals(
avro_lib_info.get('javadoc'),
os.path.join(ivy_cache_dir, 'org.apache.avro/avro/javadocs/avro-1.7.7-javadoc.jar')
)
self.assertEquals(
avro_lib_info.get('sources'),
os.path.join(ivy_cache_dir, 'org.apache.avro/avro/sources/avro-1.7.7-sources.jar')
)
def test_distributions_and_platforms(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
test_target = 'examples/src/java/org/pantsbuild/example/hello/simple'
json_data = self.run_export(test_target, workdir, load_libs=False, extra_args=[
'--jvm-platform-default-platform=java7',
'--jvm-platform-platforms={'
' "java7": {"source": "1.7", "target": "1.7", "args": [ "-X123" ]},'
' "java8": {"source": "1.8", "target": "1.8", "args": [ "-X456" ]}'
'}',
'--jvm-distributions-paths={'
' "macos": [ "/Library/JDK" ],'
' "linux": [ "/usr/lib/jdk7", "/usr/lib/jdk8"]'
'}'
])
self.assertFalse('python_setup' in json_data)
target_name = 'examples/src/java/org/pantsbuild/example/hello/simple:simple'
targets = json_data.get('targets')
self.assertEquals('java7', targets[target_name]['platform'])
self.assertEquals(
{
'darwin': ['/Library/JDK'],
'linux': ['/usr/lib/jdk7', u'/usr/lib/jdk8'],
},
json_data['jvm_distributions'])
self.assertEquals(
{
'default_platform' : 'java7',
'platforms': {
'java7': {
'source_level': '1.7',
'args': ['-X123'],
'target_level': '1.7'},
'java8': {
'source_level': '1.8',
'args': ['-X456'],
'target_level': '1.8'},
}
},
json_data['jvm_platforms'])
def test_intellij_integration(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
targets = ['src/python/::', 'tests/python/pants_test:all', 'contrib/::']
excludes = [
'--exclude-target-regexp=.*go/examples.*',
'--exclude-target-regexp=.*scrooge/tests/thrift.*',
'--exclude-target-regexp=.*spindle/tests/thrift.*',
'--exclude-target-regexp=.*spindle/tests/jvm.*'
]
json_data = self.run_export(targets, workdir, extra_args=excludes)
python_setup = json_data['python_setup']
self.assertIsNotNone(python_setup)
self.assertIsNotNone(python_setup['interpreters'])
default_interpreter = python_setup['default_interpreter']
self.assertIsNotNone(default_interpreter)
self.assertIsNotNone(python_setup['interpreters'][default_interpreter])
self.assertTrue(os.path.exists(python_setup['interpreters'][default_interpreter]['binary']))
self.assertTrue(os.path.exists(python_setup['interpreters'][default_interpreter]['chroot']))
core_target = json_data['targets']['src/python/pants/backend/core:core']
self.assertIsNotNone(core_target)
self.assertEquals(default_interpreter, core_target['python_interpreter'])
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for ternary operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import scipy.special as sps
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class TernaryOpsTest(xla_test.XLATestCase, parameterized.TestCase):
def _testTernary(self, op, a, b, c, expected, rtol=1e-3, atol=1e-6):
with self.session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
pc = array_ops.placeholder(dtypes.as_dtype(c.dtype), c.shape, name="c")
output = op(pa, pb, pc)
result = session.run(output, {pa: a, pb: b, pc: c})
self.assertAllClose(result, expected, rtol=rtol, atol=atol)
return result
@parameterized.parameters(
{'start': 1, 'end': 2, 'num': 1},
{'start': 1, 'end': 4, 'num': 3},
{'start': 0, 'end': 41, 'num': 42})
@test_util.disable_mlir_bridge(
'TODO(b/156174708): Dynamic result types not supported')
def testLinspace(self, start, end, num):
expected = np.linspace(start, end, num, dtype=np.float32)
result = self._testTernary(
math_ops.linspace,
np.float32(start),
np.float32(end),
np.int32(num),
expected)
# According to linspace spec, start has to be the first element and end has
# to be last element.
self.assertEqual(result[-1], expected[-1])
self.assertEqual(result[0], expected[0])
def testRange(self):
self._testTernary(
math_ops.range,
np.int32(1),
np.int32(2),
np.int32(1),
expected=np.array([1], dtype=np.int32))
self._testTernary(
math_ops.range,
np.int32(1),
np.int32(7),
np.int32(2),
expected=np.array([1, 3, 5], dtype=np.int32))
def testSelect(self):
for dtype in self.numeric_types:
self._testTernary(
array_ops.where,
np.array(False),
np.array(2, dtype=dtype),
np.array(7, dtype=dtype),
expected=np.array(7, dtype=dtype))
self._testTernary(
array_ops.where,
np.array(True),
np.array([1, 2, 3, 4], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([1, 2, 3, 4], dtype=dtype))
self._testTernary(
array_ops.where,
np.array(False),
np.array([[1, 2], [3, 4], [5, 6]], dtype=dtype),
np.array([[7, 8], [9, 10], [11, 12]], dtype=dtype),
expected=np.array([[7, 8], [9, 10], [11, 12]], dtype=dtype))
self._testTernary(
array_ops.where,
np.array([0, 1, 1, 0], dtype=np.bool_),
np.array([1, 2, 3, 4], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([5, 2, 3, 8], dtype=dtype))
self._testTernary(
array_ops.where,
np.array([0, 1, 0], dtype=np.bool_),
np.array([[1, 2], [3, 4], [5, 6]], dtype=dtype),
np.array([[7, 8], [9, 10], [11, 12]], dtype=dtype),
expected=np.array([[7, 8], [3, 4], [11, 12]], dtype=dtype))
def testSelectV2(self):
for dtype in self.numeric_types:
self._testTernary(
array_ops.where_v2,
np.array(False),
np.array(2, dtype=dtype),
np.array(7, dtype=dtype),
expected=np.array(7, dtype=dtype))
self._testTernary(
array_ops.where_v2,
np.array(True),
np.array([1, 2, 3, 4], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([1, 2, 3, 4], dtype=dtype))
self._testTernary(
array_ops.where_v2,
np.array(False),
np.array([[1, 2], [3, 4], [5, 6]], dtype=dtype),
np.array([[7, 8], [9, 10], [11, 12]], dtype=dtype),
expected=np.array([[7, 8], [9, 10], [11, 12]], dtype=dtype))
self._testTernary(
array_ops.where_v2,
np.array([0, 1, 1, 0], dtype=np.bool_),
np.array([1, 2, 3, 4], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([5, 2, 3, 8], dtype=dtype))
# Broadcast the condition
self._testTernary(
array_ops.where_v2,
np.array([0, 1], dtype=np.bool_),
np.array([[1, 2], [3, 4], [5, 6]], dtype=dtype),
np.array([[7, 8], [9, 10], [11, 12]], dtype=dtype),
expected=np.array([[7, 2], [9, 4], [11, 6]], dtype=dtype))
# Broadcast the then branch to the else
self._testTernary(
array_ops.where_v2,
np.array([[0, 1], [1, 0], [1, 1]], dtype=np.bool_),
np.array([[1, 2]], dtype=dtype),
np.array([[7, 8], [9, 10], [11, 12]], dtype=dtype),
expected=np.array([[7, 2], [1, 10], [1, 2]], dtype=dtype))
# Broadcast the else branch to the then
self._testTernary(
array_ops.where_v2,
np.array([[1, 0], [0, 1], [0, 0]], dtype=np.bool_),
np.array([[7, 8], [9, 10], [11, 12]], dtype=dtype),
np.array([[1, 2]], dtype=dtype),
expected=np.array([[7, 2], [1, 10], [1, 2]], dtype=dtype))
# Broadcast the then/else branches to the condition
self._testTernary(
array_ops.where_v2,
np.array([[1, 0], [0, 1], [1, 1]], dtype=np.bool_),
np.array(7, dtype=dtype),
np.array(8, dtype=dtype),
expected=np.array([[7, 8], [8, 7], [7, 7]], dtype=dtype))
self._testTernary(
array_ops.where_v2,
np.array([[1, 0], [0, 1], [0, 0]], dtype=np.bool_),
np.array(7, dtype=dtype),
np.array([8, 9], dtype=dtype),
expected=np.array([[7, 9], [8, 7], [8, 9]], dtype=dtype))
def testSlice(self):
for dtype in self.numeric_types:
self._testTernary(
array_ops.slice,
np.array([[], [], []], dtype=dtype),
np.array([1, 0], dtype=np.int32),
np.array([2, 0], dtype=np.int32),
expected=np.array([[], []], dtype=dtype))
self._testTernary(
array_ops.slice,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype),
np.array([0, 1], dtype=np.int32),
np.array([2, 1], dtype=np.int32),
expected=np.array([[2], [5]], dtype=dtype))
def testClipByValue(self):
for dtype in self.numeric_types - self.complex_types:
test_cases = [
(np.array([2, 4, 5], dtype=dtype), dtype(7)), #
(dtype(1), np.array([2, 4, 5], dtype=dtype)), #
(np.array([-2, 7, 7], dtype=dtype), np.array([-2, 9, 8], dtype=dtype))
]
x = np.array([-2, 10, 6], dtype=dtype)
for lower, upper in test_cases:
self._testTernary(
gen_math_ops._clip_by_value,
x,
lower,
upper,
expected=np.minimum(np.maximum(x, lower), upper))
def testBetaincSanity(self):
# This operation is only supported for float32 and float64.
for dtype in self.numeric_types & {np.float32, np.float64}:
# Sanity check a few identities:
# - betainc(a, b, 0) == 0
# - betainc(a, b, 1) == 1
# - betainc(a, 1, x) == x ** a
# Compare against the implementation in SciPy.
a = np.array([.3, .4, .2, .2], dtype=dtype)
b = np.array([1., 1., .4, .4], dtype=dtype)
x = np.array([.3, .4, .0, .1], dtype=dtype)
expected = sps.betainc(a, b, x)
self._testTernary(
math_ops.betainc, a, b, x, expected, rtol=5e-6, atol=6e-6)
@parameterized.parameters(
{
'sigma': 1e15,
'rtol': 1e-6,
'atol': 1e-4
},
{
'sigma': 30,
'rtol': 1e-6,
'atol': 2e-3
},
{
'sigma': 1e-8,
'rtol': 5e-4,
'atol': 3e-4
},
{
'sigma': 1e-16,
'rtol': 1e-6,
'atol': 2e-4
},
)
def testBetainc(self, sigma, rtol, atol):
# This operation is only supported for float32 and float64.
for dtype in self.numeric_types & {np.float32, np.float64}:
# Randomly generate a, b, x in the numerical domain of betainc.
# Compare against the implementation in SciPy.
a = np.abs(np.random.randn(10, 10) * sigma).astype(dtype) # in (0, infty)
b = np.abs(np.random.randn(10, 10) * sigma).astype(dtype) # in (0, infty)
x = np.random.rand(10, 10).astype(dtype) # in (0, 1)
expected = sps.betainc(a, b, x, dtype=dtype)
self._testTernary(
math_ops.betainc, a, b, x, expected, rtol=rtol, atol=atol)
if __name__ == "__main__":
googletest.main()
| |
# -*- coding: utf-8 -*-
"""
continuity.cli.github
~~~~~~~~~~~~~~~~~~~~~
Continuity GitHub CLI commands.
:copyright: 2015 by Jonathan Zempel.
:license: BSD, see LICENSE for more details.
"""
from .commons import (FinishCommand as BaseFinishCommand,
GitHubCommand as BaseGitHubCommand,
ReviewCommand as BaseReviewCommand, StartCommand as BaseStartCommand,
TasksCommand as BaseTasksCommand)
from .utils import less, puts
from clint.textui import colored
from continuity.services.github import Issue
from continuity.services.utils import cached_property
from StringIO import StringIO
from sys import exit
class GitHubCommand(BaseGitHubCommand):
"""Base GitHub command.
"""
def get_issues(self, **parameters):
"""Get a list of issues, ordered by milestone.
:param parameters: Parameter keyword-arguments.
"""
ret_val = []
milestones = self.github.get_milestones()
for milestone in milestones:
parameters["milestone"] = milestone.number
issues = self.github.get_issues(**parameters)
ret_val.extend(issues)
parameters["milestone"] = None
issues = self.github.get_issues(**parameters)
ret_val.extend(issues)
return ret_val
@cached_property
def issue(self):
"""Current branch issue accessor.
"""
configuration = self.git.get_configuration("branch",
self.git.branch.name)
if configuration:
try:
number = configuration["issue"]
ret_val = self.github.get_issue(number)
except KeyError:
ret_val = None
else:
ret_val = None
if not ret_val:
exit("fatal: Not an issue branch.")
return ret_val
class FinishCommand(BaseFinishCommand, GitHubCommand):
"""Finish an issue branch.
"""
def _merge_branch(self, branch, *args):
"""Merge a branch.
:param branch: The name of the branch to merge.
:param *args: Merge argument list.
"""
try:
self.git.get_branch(branch)
self.issue # Cache the branch issue.
finally:
self.git.get_branch(self.branch)
if self.issue.pull_request:
message = "Merge pull request #{0:d} from {1}".format(
self.issue.number, branch)
else:
message = "[close #{0:d}] Merge branch '{1}'".format(
self.issue.number, branch)
self.git.merge_branch(branch, message, args)
def finalize(self):
"""Finalize this finish command.
"""
self.github.add_labels(self.issue, "finished")
self.github.remove_label(self.issue, "started")
puts("Finished issue #{0:d}.".format(self.issue.number))
super(FinishCommand, self).finalize()
class IssueCommand(GitHubCommand):
"""Display issue branch information.
:param parser: Command-line argument parser.
:param namespace: Command-line argument namespace.
"""
name = "issue"
def __init__(self, parser, namespace):
parser.add_argument("-c", "--comments", action="store_true",
help="include issue comments")
super(IssueCommand, self).__init__(parser, namespace)
def execute(self):
"""Execute this issue command.
"""
puts(self.issue.title)
if self.issue.milestone:
puts()
puts("Milestone: {0}".format(self.issue.milestone))
if self.issue.description:
puts()
puts(colored.cyan(self.issue.description))
puts()
puts(colored.white("Created by {0} on {1}".format(
self.issue.user.login,
self.issue.created.strftime("%d %b %Y, %I:%M%p"))))
puts(colored.white(self.issue.url))
if self.namespace.comments:
for comment in self.github.get_comments(self.issue):
puts()
puts(colored.yellow("{0} ({1})".format(
comment.user.login, comment.created)))
puts()
puts(comment)
class IssuesCommand(GitHubCommand):
"""List open issues.
:param parser: Command-line argument parser.
:param namespace: Command-line argument namespace.
"""
name = "issues"
def __init__(self, parser, namespace):
parser.add_argument("-u", "--assignedtoyou", action="store_true",
help="list issues assigned to you")
super(IssuesCommand, self).__init__(parser, namespace)
def execute(self):
"""Execute this issues command.
"""
if self.namespace.assignedtoyou:
user = self.github.get_user()
issues = self.get_issues(assignee=user.login)
else:
issues = self.get_issues()
output = StringIO()
for issue in issues:
number = colored.yellow(str(issue.number))
if "started" in issue.labels:
title = "{0} [STARTED]".format(issue.title)
elif "finished" in issue.labels:
title = "{0} [FINISHED]".format(issue.title)
else:
title = issue.title
information = issue.assignee
if information:
if issue.milestone:
information = "{0}, {1}".format(information,
issue.milestone)
else:
information = issue.milestone
if information:
title = "{0} ({1})".format(title, information)
message = "{0}: {1}\n".format(number, title.strip())
output.write(message)
less(output)
class ReviewCommand(BaseReviewCommand, GitHubCommand):
"""Open a GitHub pull request for issue branch review.
"""
def _create_pull_request(self, branch):
"""Create a pull request.
:param branch: The base branch the pull request is for.
"""
return self.github.create_pull_request(self.issue.number,
branch=branch)
class StartCommand(BaseStartCommand, GitHubCommand):
"""Start a branch linked to an issue.
:param parser: Command-line argument parser.
:param namespace: Command-line argument namespace.
"""
def __init__(self, parser, namespace):
parser.add_argument("number", help="start the specified issue",
nargs='?', type=int)
parser.add_argument("-u", "--assignedtoyou", action="store_true",
help="only start issues assigned to you")
parser.add_argument("-i", "--ignore", action="store_true",
help="ignore issue status")
super(StartCommand, self).__init__(parser, namespace)
@property
def error(self):
"""Error message accessor.
"""
if self.namespace.number and self.namespace.exclusive:
ret_val = "No available issue #{0} found assigned to you.".\
format(self.namespace.number)
if not self.namespace.ignore:
issue = self.github.get_issue(self.namespace.number)
if issue and issue.state == Issue.STATE_OPEN and \
issue.assignee == self.github.get_user():
ret_val = "{0}\nUse -i to ignore the status on issues assigned to you.".\
format(ret_val)
elif self.namespace.number:
ret_val = "No available issue #{0} found.".format(
self.namespace.number)
if not self.namespace.ignore:
issue = self.github.get_issue(self.namespace.number)
if issue and issue.state == Issue.STATE_OPEN:
ret_val = "{0}\nUse -i to ignore issue status.".format(
ret_val)
elif self.namespace.exclusive:
ret_val = "No available issues found assigned to you."
else:
ret_val = "No available issues found."
return ret_val
def execute(self):
"""Execute this start command.
"""
if self.issue:
puts("Issue: {0}".format(self.issue.title))
user = self.github.get_user()
if self.issue.assignee is None:
self.issue = self.github.set_issue(self.issue.number,
assignee=user.login)
# Verify that user got the issue.
if self.issue.assignee == user:
branch = super(StartCommand, self).execute()
self.git.set_configuration("branch", branch,
issue=self.issue.number)
self.github.add_labels(self.issue, "started")
else:
exit("Unable to update issue assignee.")
else:
exit(self.error)
def exit(self):
"""Handle start command exit.
"""
puts("Aborted issue branch.")
super(StartCommand, self).exit()
@cached_property
def issue(self):
"""Target issue accessor.
"""
ret_val = None
available = lambda issue: issue and \
issue.state == Issue.STATE_OPEN and \
(self.namespace.ignore or not("started" in issue.labels or "finished" in issue.labels)) and \
issue.pull_request is None
number = self.namespace.number
exclusive = self.namespace.exclusive
user = self.github.get_user()
if number and exclusive:
puts("Retrieving issue #{0} from GitHub for {1}...".format(number,
user))
issue = self.github.get_issue(number)
if available(issue) and issue.assignee and issue.assignee == user:
ret_val = issue
elif number:
puts("Retrieving issue #{0} from GitHub...".format(number))
issue = self.github.get_issue(number)
if available(issue):
ret_val = issue
elif exclusive:
puts("Retrieving next issue from GitHub for {0}...".format(user))
issues = self.get_issues(assignee=user.login)
if issues:
for issue in issues:
if available(issue):
ret_val = issue
break
else:
puts("Retrieving next available issue from GitHub...")
issues = self.get_issues()
for issue in issues:
if available(issue) and (issue.assignee is None or
issue.assignee == user):
ret_val = issue
break
return ret_val
class TasksCommand(BaseTasksCommand, GitHubCommand):
"""List and manage issue tasks.
"""
def _get_tasks(self):
"""Task list accessor.
"""
return self.github.get_tasks(self.issue)
def _set_task(self, task, checked):
"""Task mutator.
:param task: The task to update.
:param checked: ``True`` if the task is complete.
"""
return self.github.set_task(self.issue, task, checked)
def finalize(self):
"""Finalize this tasks command.
"""
for index, task in enumerate(self.tasks):
checkmark = 'x' if task.is_checked else ' '
message = "[{0}] {1}. {2}".format(checkmark, index + 1,
task.description)
puts(message)
| |
#!/usr/bin/env python
import keyword
import wx
import wx.stc as stc
import images
#----------------------------------------------------------------------
demoText = """\
## This version of the editor has been set up to edit Python source
## code. Here is a copy of wxPython/demo/Main.py to play with.
"""
#----------------------------------------------------------------------
if wx.Platform == '__WXMSW__':
faces = { 'times': 'Times New Roman',
'mono' : 'Courier New',
'helv' : 'Arial',
'other': 'Comic Sans MS',
'size' : 10,
'size2': 8,
}
elif wx.Platform == '__WXMAC__':
faces = { 'times': 'Times New Roman',
'mono' : 'Monaco',
'helv' : 'Arial',
'other': 'Comic Sans MS',
'size' : 12,
'size2': 10,
}
else:
faces = { 'times': 'Times',
'mono' : 'Courier',
'helv' : 'Helvetica',
'other': 'new century schoolbook',
'size' : 12,
'size2': 10,
}
#----------------------------------------------------------------------
class PythonSTC(stc.StyledTextCtrl):
fold_symbols = 2
def __init__(self, parent, ID,
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=0):
stc.StyledTextCtrl.__init__(self, parent, ID, pos, size, style)
self.CmdKeyAssign(ord('B'), stc.STC_SCMOD_CTRL, stc.STC_CMD_ZOOMIN)
self.CmdKeyAssign(ord('N'), stc.STC_SCMOD_CTRL, stc.STC_CMD_ZOOMOUT)
self.SetLexer(stc.STC_LEX_PYTHON)
self.SetKeyWords(0, " ".join(keyword.kwlist))
self.SetProperty("fold", "1")
self.SetProperty("tab.timmy.whinge.level", "1")
self.SetMargins(0,0)
self.SetViewWhiteSpace(False)
#self.SetBufferedDraw(False)
#self.SetViewEOL(True)
#self.SetEOLMode(stc.STC_EOL_CRLF)
#self.SetUseAntiAliasing(True)
self.SetEdgeMode(stc.STC_EDGE_BACKGROUND)
self.SetEdgeColumn(78)
# Setup a margin to hold fold markers
#self.SetFoldFlags(16) ### WHAT IS THIS VALUE? WHAT ARE THE OTHER FLAGS? DOES IT MATTER?
self.SetMarginType(2, stc.STC_MARGIN_SYMBOL)
self.SetMarginMask(2, stc.STC_MASK_FOLDERS)
self.SetMarginSensitive(2, True)
self.SetMarginWidth(2, 12)
if self.fold_symbols == 0:
# Arrow pointing right for contracted folders, arrow pointing down for expanded
self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPEN, stc.STC_MARK_ARROWDOWN, "black", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDER, stc.STC_MARK_ARROW, "black", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERSUB, stc.STC_MARK_EMPTY, "black", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERTAIL, stc.STC_MARK_EMPTY, "black", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDEREND, stc.STC_MARK_EMPTY, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPENMID, stc.STC_MARK_EMPTY, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERMIDTAIL, stc.STC_MARK_EMPTY, "white", "black")
elif self.fold_symbols == 1:
# Plus for contracted folders, minus for expanded
self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPEN, stc.STC_MARK_MINUS, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDER, stc.STC_MARK_PLUS, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERSUB, stc.STC_MARK_EMPTY, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERTAIL, stc.STC_MARK_EMPTY, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDEREND, stc.STC_MARK_EMPTY, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPENMID, stc.STC_MARK_EMPTY, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERMIDTAIL, stc.STC_MARK_EMPTY, "white", "black")
elif self.fold_symbols == 2:
# Like a flattened tree control using circular headers and curved joins
self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPEN, stc.STC_MARK_CIRCLEMINUS, "white", "#404040")
self.MarkerDefine(stc.STC_MARKNUM_FOLDER, stc.STC_MARK_CIRCLEPLUS, "white", "#404040")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERSUB, stc.STC_MARK_VLINE, "white", "#404040")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERTAIL, stc.STC_MARK_LCORNERCURVE, "white", "#404040")
self.MarkerDefine(stc.STC_MARKNUM_FOLDEREND, stc.STC_MARK_CIRCLEPLUSCONNECTED, "white", "#404040")
self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPENMID, stc.STC_MARK_CIRCLEMINUSCONNECTED, "white", "#404040")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERMIDTAIL, stc.STC_MARK_TCORNERCURVE, "white", "#404040")
elif self.fold_symbols == 3:
# Like a flattened tree control using square headers
self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPEN, stc.STC_MARK_BOXMINUS, "white", "#808080")
self.MarkerDefine(stc.STC_MARKNUM_FOLDER, stc.STC_MARK_BOXPLUS, "white", "#808080")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERSUB, stc.STC_MARK_VLINE, "white", "#808080")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERTAIL, stc.STC_MARK_LCORNER, "white", "#808080")
self.MarkerDefine(stc.STC_MARKNUM_FOLDEREND, stc.STC_MARK_BOXPLUSCONNECTED, "white", "#808080")
self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPENMID, stc.STC_MARK_BOXMINUSCONNECTED, "white", "#808080")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERMIDTAIL, stc.STC_MARK_TCORNER, "white", "#808080")
self.Bind(stc.EVT_STC_UPDATEUI, self.OnUpdateUI)
self.Bind(stc.EVT_STC_MARGINCLICK, self.OnMarginClick)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyPressed)
# Make some styles, The lexer defines what each style is used for, we
# just have to define what each style looks like. This set is adapted from
# Scintilla sample property files.
# Global default styles for all languages
self.StyleSetSpec(stc.STC_STYLE_DEFAULT, "face:%(helv)s,size:%(size)d" % faces)
self.StyleClearAll() # Reset all to be like the default
# Global default styles for all languages
self.StyleSetSpec(stc.STC_STYLE_DEFAULT, "face:%(helv)s,size:%(size)d" % faces)
self.StyleSetSpec(stc.STC_STYLE_LINENUMBER, "back:#C0C0C0,face:%(helv)s,size:%(size2)d" % faces)
self.StyleSetSpec(stc.STC_STYLE_CONTROLCHAR, "face:%(other)s" % faces)
self.StyleSetSpec(stc.STC_STYLE_BRACELIGHT, "fore:#FFFFFF,back:#0000FF,bold")
self.StyleSetSpec(stc.STC_STYLE_BRACEBAD, "fore:#000000,back:#FF0000,bold")
# Python styles
# Default
self.StyleSetSpec(stc.STC_P_DEFAULT, "fore:#000000,face:%(helv)s,size:%(size)d" % faces)
# Comments
self.StyleSetSpec(stc.STC_P_COMMENTLINE, "fore:#007F00,face:%(other)s,size:%(size)d" % faces)
# Number
self.StyleSetSpec(stc.STC_P_NUMBER, "fore:#007F7F,size:%(size)d" % faces)
# String
self.StyleSetSpec(stc.STC_P_STRING, "fore:#7F007F,face:%(helv)s,size:%(size)d" % faces)
# Single quoted string
self.StyleSetSpec(stc.STC_P_CHARACTER, "fore:#7F007F,face:%(helv)s,size:%(size)d" % faces)
# Keyword
self.StyleSetSpec(stc.STC_P_WORD, "fore:#00007F,bold,size:%(size)d" % faces)
# Triple quotes
self.StyleSetSpec(stc.STC_P_TRIPLE, "fore:#7F0000,size:%(size)d" % faces)
# Triple double quotes
self.StyleSetSpec(stc.STC_P_TRIPLEDOUBLE, "fore:#7F0000,size:%(size)d" % faces)
# Class name definition
self.StyleSetSpec(stc.STC_P_CLASSNAME, "fore:#0000FF,bold,underline,size:%(size)d" % faces)
# Function or method name definition
self.StyleSetSpec(stc.STC_P_DEFNAME, "fore:#007F7F,bold,size:%(size)d" % faces)
# Operators
self.StyleSetSpec(stc.STC_P_OPERATOR, "bold,size:%(size)d" % faces)
# Identifiers
self.StyleSetSpec(stc.STC_P_IDENTIFIER, "fore:#000000,face:%(helv)s,size:%(size)d" % faces)
# Comment-blocks
self.StyleSetSpec(stc.STC_P_COMMENTBLOCK, "fore:#7F7F7F,size:%(size)d" % faces)
# End of line where string is not closed
self.StyleSetSpec(stc.STC_P_STRINGEOL, "fore:#000000,face:%(mono)s,back:#E0C0E0,eol,size:%(size)d" % faces)
self.SetCaretForeground("BLUE")
# register some images for use in the AutoComplete box.
self.RegisterImage(1, images.Smiles.GetBitmap())
self.RegisterImage(2,
wx.ArtProvider.GetBitmap(wx.ART_NEW, size=(16,16)))
self.RegisterImage(3,
wx.ArtProvider.GetBitmap(wx.ART_COPY, size=(16,16)))
def OnKeyPressed(self, event):
if self.CallTipActive():
self.CallTipCancel()
key = event.GetKeyCode()
if key == 32 and event.ControlDown():
pos = self.GetCurrentPos()
# Tips
if event.ShiftDown():
self.CallTipSetBackground("yellow")
self.CallTipShow(pos, 'lots of of text: blah, blah, blah\n\n'
'show some suff, maybe parameters..\n\n'
'fubar(param1, param2)')
# Code completion
else:
#lst = []
#for x in range(50000):
# lst.append('%05d' % x)
#st = " ".join(lst)
#print(len(st))
#self.AutoCompShow(0, st)
kw = keyword.kwlist[:]
kw.append("zzzzzz?2")
kw.append("aaaaa?2")
kw.append("__init__?3")
kw.append("zzaaaaa?2")
kw.append("zzbaaaa?2")
kw.append("this_is_a_longer_value")
#kw.append("this_is_a_much_much_much_much_much_much_much_longer_value")
kw.sort() # Python sorts are case sensitive
self.AutoCompSetIgnoreCase(False) # so this needs to match
# Images are specified with a appended "?type"
for i in range(len(kw)):
if kw[i] in keyword.kwlist:
kw[i] = kw[i] + "?1"
self.AutoCompShow(0, " ".join(kw))
else:
event.Skip()
def OnUpdateUI(self, evt):
# check for matching braces
braceAtCaret = -1
braceOpposite = -1
charBefore = None
caretPos = self.GetCurrentPos()
if caretPos > 0:
charBefore = self.GetCharAt(caretPos - 1)
styleBefore = self.GetStyleAt(caretPos - 1)
# check before
if charBefore and chr(charBefore) in "[]{}()" and styleBefore == stc.STC_P_OPERATOR:
braceAtCaret = caretPos - 1
# check after
if braceAtCaret < 0:
charAfter = self.GetCharAt(caretPos)
styleAfter = self.GetStyleAt(caretPos)
if charAfter and chr(charAfter) in "[]{}()" and styleAfter == stc.STC_P_OPERATOR:
braceAtCaret = caretPos
if braceAtCaret >= 0:
braceOpposite = self.BraceMatch(braceAtCaret)
if braceAtCaret != -1 and braceOpposite == -1:
self.BraceBadLight(braceAtCaret)
else:
self.BraceHighlight(braceAtCaret, braceOpposite)
#pt = self.PointFromPosition(braceOpposite)
#self.Refresh(True, wxRect(pt.x, pt.y, 5,5))
#print(pt)
#self.Refresh(False)
def OnMarginClick(self, evt):
# fold and unfold as needed
if evt.GetMargin() == 2:
if evt.GetShift() and evt.GetControl():
self.FoldAll()
else:
lineClicked = self.LineFromPosition(evt.GetPosition())
if self.GetFoldLevel(lineClicked) & stc.STC_FOLDLEVELHEADERFLAG:
if evt.GetShift():
self.SetFoldExpanded(lineClicked, True)
self.Expand(lineClicked, True, True, 1)
elif evt.GetControl():
if self.GetFoldExpanded(lineClicked):
self.SetFoldExpanded(lineClicked, False)
self.Expand(lineClicked, False, True, 0)
else:
self.SetFoldExpanded(lineClicked, True)
self.Expand(lineClicked, True, True, 100)
else:
self.ToggleFold(lineClicked)
def FoldAll(self):
lineCount = self.GetLineCount()
expanding = True
# find out if we are folding or unfolding
for lineNum in range(lineCount):
if self.GetFoldLevel(lineNum) & stc.STC_FOLDLEVELHEADERFLAG:
expanding = not self.GetFoldExpanded(lineNum)
break
lineNum = 0
while lineNum < lineCount:
level = self.GetFoldLevel(lineNum)
if level & stc.STC_FOLDLEVELHEADERFLAG and \
(level & stc.STC_FOLDLEVELNUMBERMASK) == stc.STC_FOLDLEVELBASE:
if expanding:
self.SetFoldExpanded(lineNum, True)
lineNum = self.Expand(lineNum, True)
lineNum = lineNum - 1
else:
lastChild = self.GetLastChild(lineNum, -1)
self.SetFoldExpanded(lineNum, False)
if lastChild > lineNum:
self.HideLines(lineNum+1, lastChild)
lineNum = lineNum + 1
def Expand(self, line, doExpand, force=False, visLevels=0, level=-1):
lastChild = self.GetLastChild(line, level)
line = line + 1
while line <= lastChild:
if force:
if visLevels > 0:
self.ShowLines(line, line)
else:
self.HideLines(line, line)
else:
if doExpand:
self.ShowLines(line, line)
if level == -1:
level = self.GetFoldLevel(line)
if level & stc.STC_FOLDLEVELHEADERFLAG:
if force:
if visLevels > 1:
self.SetFoldExpanded(line, True)
else:
self.SetFoldExpanded(line, False)
line = self.Expand(line, doExpand, force, visLevels-1)
else:
if doExpand and self.GetFoldExpanded(line):
line = self.Expand(line, True, force, visLevels-1)
else:
line = self.Expand(line, False, force, visLevels-1)
else:
line = line + 1
return line
#----------------------------------------------------------------------
_USE_PANEL = 1
def runTest(frame, nb, log):
if not _USE_PANEL:
ed = p = PythonSTC(nb, -1)
else:
p = wx.Panel(nb, -1, style = wx.NO_FULL_REPAINT_ON_RESIZE)
ed = PythonSTC(p, -1)
s = wx.BoxSizer(wx.HORIZONTAL)
s.Add(ed, 1, wx.EXPAND)
p.SetSizer(s)
p.SetAutoLayout(True)
ed.SetText(demoText + open('Main.py').read())
ed.EmptyUndoBuffer()
ed.Colourise(0, -1)
# line numbers in the margin
ed.SetMarginType(1, stc.STC_MARGIN_NUMBER)
ed.SetMarginWidth(1, 25)
return p
#----------------------------------------------------------------------
overview = """\
<html><body>
Once again, no docs yet. <b>Sorry.</b> But <a href="data/stc.h.html">this</a>
and <a href="http://www.scintilla.org/ScintillaDoc.html">this</a> should
be helpful.
</body><html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
#----------------------------------------------------------------------
#----------------------------------------------------------------------
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import readline
import getpass
import sys
import cmd
import imp
import re
import os
import shlex
from . import restshlib
DEBUG = False
HELP_TEXT = '''Usage: command [<command-option>...]
Cliente commands:
set <header|setting> <header-key> <header-value>
unset <header|setting> <header-key>
show <headers|settings>
prompt <new-prompt>
login <username>
baseurl <host>
help [<command>]
Rest actions:
get <url>
post <url> <data>
put <url> <data>
delete <url>
'''
class RestSH(cmd.Cmd, object):
restshlib = None
baseurl = "no-host"
login = "no-user"
prompt = ""
cfg_prompt = "%(login)s@%(baseurl)s|restsh> "
global_data = {}
history_file = os.path.expanduser("~/.restsh-history")
history_file_max_lines = 1000
def __init__(self, *args, **kwargs):
self.restshlib = restshlib.RestSHLib(global_data=self.global_data)
self.prompt = self.cfg_prompt % {"login": self.login, "baseurl": self.baseurl}
readline.read_history_file(self.history_file)
readline.set_history_length(self.history_file_max_lines)
super(RestSH, self).__init__(*args, **kwargs)
def __del__(self):
readline.write_history_file(self.history_file)
def postcmd(self, stop, line):
super(RestSH, self).postcmd(stop, line)
self.prompt = self.cfg_prompt % {"login": self.login, "baseurl": self.baseurl}
def _print_response(self, response):
if self.restshlib.settings.get('print_request', "1") in ["1", "yes", "true"]:
print("Request:")
print(" url: {0}".format(response.request.full_url))
print(" data: {0}".format(response.request.data))
print(" headers:")
for header in response.request.headers.iteritems():
print(" {0}: {1}".format(header[0], header[1]))
if self.restshlib.settings.get('print_body', "1") in ["1", "yes", "true"]:
print("Response body:\n{0}".format(response.text))
if self.restshlib.settings.get('print_headers', "1") in ["1", "yes", "true"]:
print("Response headers:")
for header in response.headers.iteritems():
print(" {0}: {1}".format(header[0], header[1]))
if self.restshlib.settings.get('print_status', "1") in ["1", "yes", "true"]:
print("Status Code: {0}".format(response.status_code))
def do_reload(self, params):
imp.reload(restshlib)
self.restshlib = restshlib.RestSHLib(global_data=self.global_data)
def do_help(self, params):
"""
Show help information. Example: help set
"""
if params:
super(RestSH, self).do_help(params)
else:
print(HELP_TEXT)
def do_quit(self, params):
'''Quit restsh'''
sys.exit()
def do_EOF(self, params):
'''Quit restsh'''
sys.exit()
def do_setenv(self, params):
args = shlex.split(params)
if len(args) % 2 != 0:
raise ValueError("Invalid parameters")
while True:
key, value = args[:2]
args = args[2:]
self.global_data[key] = value
if len(args) == 0:
break
def do_delenv(self, params):
args = shlex.split(params)
if len(args) < 1:
raise ValueError("Invalid parameters")
for arg in args:
if arg in self.global_data:
del self.global_data[arg]
def do_set(self, params):
"""
Set headers and settings variables. Example: set settings auth_method digest
"""
args = shlex.split(params)
if len(args) != 3:
raise ValueError("Invalid number of parameters")
else:
(typ, key, value) = args
if typ == "header":
self.restshlib.set_header(key, value)
elif typ == "setting":
self.restshlib.set_setting(key, value)
else:
raise ValueError("Invalid type of variables")
def do_unset(self, params):
'''Unset headers and settings variables. Example: unset settings auth_method'''
args = shlex.split(params)
if len(args) != 2:
raise ValueError("Invalid number of parameters")
else:
(typ, key) = args
if typ == "header":
self.restshlib.unset_header(key)
elif typ == "setting":
self.restshlib.unset_setting(key)
else:
raise ValueError("Invalid type of variables")
def do_show(self, params):
'''Show headers and settings variables. Example: show settings'''
args = shlex.split(params)
if len(args) != 1:
raise ValueError("Invalid number of parameters")
else:
(typ,) = args
if typ == "headers":
for header in self.restshlib.headers.iteritems():
print("{0}: {1}".format(header[0], header[1]))
elif typ == "settings":
for setting in self.restshlib.settings.iteritems():
print("{0}: {1}".format(setting[0], setting[1]))
else:
raise ValueError("Invalid type of variables")
def do_baseurl(self, params):
'''Set the base url for all requests. Example: baseurl http://testserver.com/api'''
args = shlex.split(params)
if len(args) != 1:
raise ValueError("Invalid number of parameters")
else:
(baseurl,) = args
self.baseurl = baseurl
self.restshlib.set_base_url(self.baseurl)
def do_login(self, params):
'''Set HTTP AUTH login username and password. Example: login myusername'''
args = shlex.split(params)
if len(args) != 1:
raise ValueError("Invalid number of parameters")
else:
(username,) = args
self.login = username
password = getpass.getpass('Password: ')
self.restshlib.set_auth(self.login, password, self.restshlib.settings.get('auth_method', 'basic'))
def do_get(self, params):
"""
Send get request. Example: get /url
"""
try:
url, data = re.split(r'\s+', params, 1)
except ValueError:
url, data = params, {}
response = self.restshlib.get(url, data)
self._print_response(response)
def do_post(self, params):
"""
Send post request. Example: post /url key=value test=test
"""
try:
url, data = re.split(r'\s+', params, 1)
except ValueError:
url, data = params, {}
response = self.restshlib.post(url, data)
self._print_response(response)
def do_put(self, params):
"""
Send put request. Example: put /url key=value test=test
"""
try:
url, data = re.split(r'\s+', params, 1)
except ValueError:
url, data = params, {}
response = self.restshlib.put(url, data)
self._print_response(response)
def do_delete(self, params):
"""
Send delete request. Example: delete /url
"""
try:
url, data = re.split(r'\s+', params, 1)
except ValueError:
url, data = params, {}
response = self.restshlib.delete(url, data)
self._print_response(response)
def do_prompt(self, params):
'''Change restsh prompt. Example: prompt "restsh> "'''
args = shlex.split(params)
if len(args) != 1:
raise ValueError("Invalid number of parameters")
else:
(prompt,) = args
self.cfg_prompt = prompt
| |
# Django settings for leyaproject project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
if os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine'):
# Running on production App Engine, so use a Google Cloud SQL database.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '/cloudsql/%s:%s' % (os.getenv('GOOGLE_CLOUD_SQL_PROJECT_ID'),
os.getenv('GOOGLE_CLOUD_SQL_INSTANCE_NAME')),
'NAME': '%s' % (os.getenv('GOOGLE_CLOUD_SQL_DB_NAME')),
'USER': '%s' % (os.getenv('GOOGLE_CLOUD_SQL_DB_USER')),
}
}
elif os.getenv('SETTINGS_MODE') == 'prod':
# Running in development, but want to access the Google Cloud SQL instance
# in production.
DATABASES = {
'default': {
'ENGINE': 'google.appengine.ext.django.backends.rdbms',
'INSTANCE': '%s:%s' % (os.getenv('GOOGLE_CLOUD_SQL_PROJECT_ID'),
os.getenv('GOOGLE_CLOUD_SQL_INSTANCE_NAME')),
'NAME': '%s' % (os.getenv('GOOGLE_CLOUD_SQL_DB_NAME')),
'USER': '%s' % (os.getenv('GOOGLE_CLOUD_SQL_DB_USER')),
}
}
else:
# Running in development, so use a local MySQL database.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': '%s' % (os.getenv('LOCAL_DB_NAME')),
'USER': '%s' % (os.getenv('LOCAL_DB_USER')),
'PASSWORD': '%s' % (os.getenv('LOCAL_DB_PASSWORD')),
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# ============
# PATHS Configuration
# ============
#from ..libs import unipath
import unipath
# PROJECT_DIR = ../themoon_project
PROJECT_DIR = unipath.Path(__file__).ancestor(3)
# ============
# MEDIA
# ============
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = PROJECT_DIR.child('media')
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# ============
# Example: "/var/www/example.com/static/"
STATIC_ROOT = PROJECT_DIR.child('assets')
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = 'http://storage.googleapis.com/%s/%s/' % (os.getenv('GOOGLE_CLOUD_STORAGE_BUCKET_NAME_STATIC'), 'assets')
#STATIC_URL = '/static/'
STATICFILES_DIRS = (
#PROJECT_DIR.child('assets'),
)
TEMPLATE_DIRS = (
PROJECT_DIR.child('templates'),
)
LOCALE_PATHS = (
PROJECT_DIR.child('locale'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
#'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.getenv('SECRET_KEY')
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.static',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'leya.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'leya.wsgi.application'
import os
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), '../../', 'templates').replace('\\','/'),)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'leya.apps.hello',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# ================
# Google Cloud Storage
# ================
GOOGLE_CLOUD_STORAGE_BUCKET_NAME_MEDIA = '%s' % os.getenv('GOOGLE_CLOUD_STORAGE_BUCKET_NAME_MEDIA')
DEFAULT_FILE_STORAGE = 'leya.core.storage.GoogleCloudStorage'
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""Use blog post test to test user permissions logic"""
import frappe
import frappe.defaults
import unittest
import json
import frappe.model.meta
from frappe.permissions import (add_user_permission, remove_user_permission,
clear_user_permissions_for_doctype, get_doc_permissions, add_permission,
get_valid_perms)
from frappe.core.page.permission_manager.permission_manager import update, reset
from frappe.test_runner import make_test_records_for_doctype
from six import string_types
test_records = frappe.get_test_records('Blog Post')
test_dependencies = ["User", "Contact", "Salutation"]
class TestPermissions(unittest.TestCase):
def setUp(self):
frappe.clear_cache(doctype="Blog Post")
frappe.clear_cache(doctype="Contact")
user = frappe.get_doc("User", "test1@example.com")
user.add_roles("Website Manager")
user.add_roles("System Manager")
user = frappe.get_doc("User", "test2@example.com")
user.add_roles("Blogger")
user = frappe.get_doc("User", "test3@example.com")
user.add_roles("Sales User")
reset('Blogger')
reset('Blog Post')
reset('Contact')
reset('Salutation')
frappe.db.sql('delete from `tabUser Permission`')
self.set_ignore_user_permissions_if_missing(0)
frappe.set_user("test1@example.com")
def tearDown(self):
frappe.set_user("Administrator")
frappe.db.set_value("Blogger", "_Test Blogger 1", "user", None)
clear_user_permissions_for_doctype("Blog Category")
clear_user_permissions_for_doctype("Blog Post")
clear_user_permissions_for_doctype("Blogger")
clear_user_permissions_for_doctype("Contact")
clear_user_permissions_for_doctype("Salutation")
self.set_ignore_user_permissions_if_missing(0)
@staticmethod
def set_ignore_user_permissions_if_missing(ignore):
ss = frappe.get_doc("System Settings")
ss.ignore_user_permissions_if_missing = ignore
ss.flags.ignore_mandatory = 1
ss.save()
@staticmethod
def set_strict_user_permissions(ignore):
ss = frappe.get_doc("System Settings")
ss.apply_strict_user_permissions = ignore
ss.flags.ignore_mandatory = 1
ss.save()
def test_basic_permission(self):
post = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertTrue(post.has_permission("read"))
def test_user_permissions_in_doc(self):
self.set_user_permission_doctypes(["Blog Category"])
add_user_permission("Blog Category", "_Test Blog Category 1",
"test2@example.com")
frappe.set_user("test2@example.com")
post = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertFalse(post.has_permission("read"))
self.assertFalse(get_doc_permissions(post).get("read"))
post1 = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertTrue(post1.has_permission("read"))
self.assertTrue(get_doc_permissions(post1).get("read"))
def test_user_permissions_in_report(self):
self.set_user_permission_doctypes(["Blog Category"])
add_user_permission("Blog Category", "_Test Blog Category 1", "test2@example.com")
frappe.set_user("test2@example.com")
names = [d.name for d in frappe.get_list("Blog Post", fields=["name", "blog_category"])]
self.assertTrue("-test-blog-post-1" in names)
self.assertFalse("-test-blog-post" in names)
def test_default_values(self):
add_user_permission("Blog Category", "_Test Blog Category 1", "test2@example.com")
frappe.set_user("test2@example.com")
doc = frappe.new_doc("Blog Post")
self.assertEquals(doc.get("blog_category"), "_Test Blog Category 1")
def test_user_link_match_doc(self):
self.set_user_permission_doctypes(["Blogger"])
blogger = frappe.get_doc("Blogger", "_Test Blogger 1")
blogger.user = "test2@example.com"
blogger.save()
frappe.set_user("test2@example.com")
post = frappe.get_doc("Blog Post", "-test-blog-post-2")
self.assertTrue(post.has_permission("read"))
post1 = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertFalse(post1.has_permission("read"))
def test_user_link_match_report(self):
self.set_user_permission_doctypes(["Blogger"])
blogger = frappe.get_doc("Blogger", "_Test Blogger 1")
blogger.user = "test2@example.com"
blogger.save()
frappe.set_user("test2@example.com")
names = [d.name for d in frappe.get_list("Blog Post", fields=["name", "owner"])]
self.assertTrue("-test-blog-post-2" in names)
self.assertFalse("-test-blog-post-1" in names)
def test_set_user_permissions(self):
frappe.set_user("test1@example.com")
add_user_permission("Blog Post", "-test-blog-post", "test2@example.com")
def test_not_allowed_to_set_user_permissions(self):
frappe.set_user("test2@example.com")
# this user can't add user permissions
self.assertRaises(frappe.PermissionError, add_user_permission,
"Blog Post", "-test-blog-post", "test2@example.com")
def test_read_if_explicit_user_permissions_are_set(self):
self.set_user_permission_doctypes(["Blog Post"])
self.test_set_user_permissions()
frappe.set_user("test2@example.com")
# user can only access permitted blog post
doc = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertTrue(doc.has_permission("read"))
# and not this one
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
def test_not_allowed_to_remove_user_permissions(self):
self.test_set_user_permissions()
frappe.set_user("test2@example.com")
# user cannot remove their own user permissions
self.assertRaises(frappe.PermissionError, remove_user_permission,
"Blog Post", "-test-blog-post", "test2@example.com")
def test_user_permissions_based_on_blogger(self):
frappe.set_user("test2@example.com")
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertTrue(doc.has_permission("read"))
self.set_user_permission_doctypes(["Blog Post"])
frappe.set_user("test1@example.com")
add_user_permission("Blog Post", "-test-blog-post", "test2@example.com")
frappe.set_user("test2@example.com")
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
doc = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertTrue(doc.has_permission("read"))
def test_set_only_once(self):
blog_post = frappe.get_meta("Blog Post")
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
doc.db_set('title', 'Old')
blog_post.get_field("title").set_only_once = 1
doc.title = "New"
self.assertRaises(frappe.CannotChangeConstantError, doc.save)
blog_post.get_field("title").set_only_once = 0
def test_set_only_once_child_table_rows(self):
doctype_meta = frappe.get_meta("DocType")
doctype_meta.get_field("fields").set_only_once = 1
doc = frappe.get_doc("DocType", "Blog Post")
# remove last one
doc.fields = doc.fields[:-1]
self.assertRaises(frappe.CannotChangeConstantError, doc.save)
frappe.clear_cache(doctype='DocType')
def test_set_only_once_child_table_row_value(self):
doctype_meta = frappe.get_meta("DocType")
doctype_meta.get_field("fields").set_only_once = 1
doc = frappe.get_doc("DocType", "Blog Post")
# change one property from the child table
doc.fields[-1].fieldtype = 'HTML'
self.assertRaises(frappe.CannotChangeConstantError, doc.save)
frappe.clear_cache(doctype='DocType')
def test_set_only_once_child_table_okay(self):
doctype_meta = frappe.get_meta("DocType")
doctype_meta.get_field("fields").set_only_once = 1
doc = frappe.get_doc("DocType", "Blog Post")
doc.load_doc_before_save()
self.assertFalse(doc.validate_set_only_once())
frappe.clear_cache(doctype='DocType')
def test_user_permission_doctypes(self):
add_user_permission("Blog Category", "_Test Blog Category 1",
"test2@example.com")
add_user_permission("Blogger", "_Test Blogger 1",
"test2@example.com")
frappe.set_user("test2@example.com")
self.set_user_permission_doctypes(["Blogger"])
frappe.model.meta.clear_cache("Blog Post")
doc = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertFalse(doc.has_permission("read"))
doc = frappe.get_doc("Blog Post", "-test-blog-post-2")
self.assertTrue(doc.has_permission("read"))
frappe.model.meta.clear_cache("Blog Post")
def if_owner_setup(self):
update('Blog Post', 'Blogger', 0, 'if_owner', 1)
add_user_permission("Blog Category", "_Test Blog Category 1",
"test2@example.com")
add_user_permission("Blogger", "_Test Blogger 1",
"test2@example.com")
update('Blog Post', 'Blogger', 0, 'user_permission_doctypes', json.dumps(["Blog Category"]))
frappe.model.meta.clear_cache("Blog Post")
def set_user_permission_doctypes(self, user_permission_doctypes):
set_user_permission_doctypes(["Blog Post"], role="Blogger",
apply_user_permissions=1, user_permission_doctypes=user_permission_doctypes)
def test_insert_if_owner_with_user_permissions(self):
"""If `If Owner` is checked for a Role, check if that document is allowed to be read, updated, submitted, etc. except be created, even if the document is restricted based on User Permissions."""
frappe.delete_doc('Blog Post', '-test-blog-post-title')
self.set_user_permission_doctypes(["Blog Category"])
self.if_owner_setup()
frappe.set_user("test2@example.com")
doc = frappe.get_doc({
"doctype": "Blog Post",
"blog_category": "_Test Blog Category",
"blogger": "_Test Blogger 1",
"title": "_Test Blog Post Title",
"content": "_Test Blog Post Content"
})
self.assertRaises(frappe.PermissionError, doc.insert)
frappe.set_user("Administrator")
add_user_permission("Blog Category", "_Test Blog Category",
"test2@example.com")
frappe.set_user("test2@example.com")
doc.insert()
frappe.set_user("Administrator")
frappe.permissions.remove_user_permission("Blog Category", "_Test Blog Category",
"test2@example.com")
frappe.set_user("test2@example.com")
doc = frappe.get_doc(doc.doctype, doc.name)
self.assertTrue(doc.has_permission("read"))
self.assertTrue(doc.has_permission("write"))
self.assertFalse(doc.has_permission("create"))
def test_ignore_user_permissions_if_missing(self):
"""If `Ignore User Permissions If Missing` is checked in System Settings, show records even if User Permissions are missing for a linked doctype"""
self.set_user_permission_doctypes(['Blog Category', 'Blog Post', 'Blogger'])
frappe.set_user("Administrator")
# add_user_permission("Blog Category", "_Test Blog Category",
# "test2@example.com")
frappe.set_user("test2@example.com")
doc = frappe.get_doc({
"doctype": "Blog Post",
"blog_category": "_Test Blog Category",
"blogger": "_Test Blogger 1",
"title": "_Test Blog Post Title",
"content": "_Test Blog Post Content"
})
self.assertFalse(doc.has_permission("write"))
frappe.set_user("Administrator")
self.set_ignore_user_permissions_if_missing(1)
frappe.set_user("test2@example.com")
self.assertTrue(doc.has_permission("write"))
def test_strict_user_permissions(self):
"""If `Strict User Permissions` is checked in System Settings,
show records even if User Permissions are missing for a linked
doctype"""
frappe.set_user("Administrator")
frappe.db.sql('delete from tabContact')
make_test_records_for_doctype('Contact', force=True)
set_user_permission_doctypes("Contact", role="Sales User",
apply_user_permissions=1, user_permission_doctypes=['Salutation'])
set_user_permission_doctypes("Salutation", role="All",
apply_user_permissions=1, user_permission_doctypes=['Salutation'])
add_user_permission("Salutation", "Mr", "test3@example.com")
self.set_strict_user_permissions(0)
frappe.set_user("test3@example.com")
self.assertEquals(len(frappe.get_list("Contact")), 2)
frappe.set_user("Administrator")
self.set_strict_user_permissions(1)
frappe.set_user("test3@example.com")
self.assertTrue(len(frappe.get_list("Contact")), 1)
frappe.set_user("Administrator")
self.set_strict_user_permissions(0)
def test_automatic_apply_user_permissions(self):
'''Test user permissions are automatically applied when a user permission
is created'''
# create a user
frappe.get_doc(dict(doctype='User', email='test_user_perm@example.com',
first_name='tester')).insert(ignore_if_duplicate=True)
frappe.get_doc(dict(doctype='Role', role_name='Test Role User Perm')
).insert(ignore_if_duplicate=True)
# add a permission for event
add_permission('DocType', 'Test Role User Perm')
frappe.get_doc('User', 'test_user_perm@example.com').add_roles('Test Role User Perm')
# add user permission
add_user_permission('Module Def', 'Core', 'test_user_perm@example.com', True)
# check if user permission is applied in the new role
_perm = None
for perm in get_valid_perms('DocType', 'test_user_perm@example.com'):
if perm.role == 'Test Role User Perm':
_perm = perm
self.assertEqual(_perm.apply_user_permissions, 1)
# restrict by module
self.assertTrue('Module Def' in json.loads(_perm.user_permission_doctypes))
def set_user_permission_doctypes(doctypes, role, apply_user_permissions,
user_permission_doctypes):
user_permission_doctypes = None if not user_permission_doctypes else json.dumps(user_permission_doctypes)
if isinstance(doctypes, string_types):
doctypes = [doctypes]
for doctype in doctypes:
update(doctype, role, 0, 'apply_user_permissions', 1)
update(doctype, role, 0, 'user_permission_doctypes',
user_permission_doctypes)
frappe.clear_cache(doctype=doctype)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Classes that hold units of .po files (pounit) or entire files (pofile).
Gettext-style .po (or .pot) files are used in translations for KDE, GNOME and
many other projects.
This uses libgettextpo from the gettext package. Any version before 0.17 will
at least cause some subtle bugs or may not work at all. Developers might want
to have a look at gettext-tools/libgettextpo/gettext-po.h from the gettext
package for the public API of the library.
"""
from translate.misc.multistring import multistring
from translate.storage import pocommon
from translate.storage.pocommon import encodingToUse
from translate.misc import quote
from translate.lang import data
from ctypes import *
import ctypes.util
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import os
import pypo
import re
import sys
import tempfile
lsep = " "
"""Seperator for #: entries"""
STRING = c_char_p
# Structures
class po_message(Structure):
_fields_ = []
# Function prototypes
xerror_prototype = CFUNCTYPE(None, c_int, POINTER(po_message), STRING, c_uint, c_uint, c_int, STRING)
xerror2_prototype = CFUNCTYPE(None, c_int, POINTER(po_message), STRING, c_uint, c_uint, c_int, STRING, POINTER(po_message), STRING, c_uint, c_uint, c_int, STRING)
# Structures (error handler)
class po_xerror_handler(Structure):
_fields_ = [('xerror', xerror_prototype),
('xerror2', xerror2_prototype)]
class po_error_handler(Structure):
_fields_ = [
('error', CFUNCTYPE(None, c_int, c_int, STRING)),
('error_at_line', CFUNCTYPE(None, c_int, c_int, STRING, c_uint, STRING)),
('multiline_warning', CFUNCTYPE(None, STRING, STRING)),
('multiline_error', CFUNCTYPE(None, STRING, STRING)),
]
# Callback functions for po_xerror_handler
def xerror_cb(severity, message, filename, lineno, column, multilint_p, message_text):
print >> sys.stderr, "xerror_cb", severity, message, filename, lineno, column, multilint_p, message_text
if severity >= 1:
raise ValueError(message_text)
def xerror2_cb(severity, message1, filename1, lineno1, column1, multiline_p1, message_text1, message2, filename2, lineno2, column2, multiline_p2, message_text2):
print >> sys.stderr, "xerror2_cb", severity, message1, filename1, lineno1, column1, multiline_p1, message_text1, message2, filename2, lineno2, column2, multiline_p2, message_text2
if severity >= 1:
raise ValueError(message_text1)
# Load libgettextpo
gpo = None
# 'gettextpo' is recognised on Unix, while only 'libgettextpo' is recognised on
# windows. Therefore we test both.
names = ['gettextpo', 'libgettextpo']
for name in names:
lib_location = ctypes.util.find_library(name)
if lib_location:
gpo = cdll.LoadLibrary(lib_location)
if gpo:
break
else:
# Now we are getting desperate, so let's guess a unix type DLL that might
# be in LD_LIBRARY_PATH or loaded with LD_PRELOAD
try:
gpo = cdll.LoadLibrary('libgettextpo.so')
except OSError, e:
raise ImportError("gettext PO library not found")
# Setup return and paramater types
# File access
gpo.po_file_read_v3.argtypes = [STRING, POINTER(po_xerror_handler)]
gpo.po_file_write_v2.argtypes = [c_int, STRING, POINTER(po_xerror_handler)]
gpo.po_file_write_v2.retype = c_int
# Header
gpo.po_file_domain_header.restype = STRING
gpo.po_header_field.restype = STRING
gpo.po_header_field.argtypes = [STRING, STRING]
# Locations (filepos)
gpo.po_filepos_file.restype = STRING
gpo.po_message_filepos.restype = c_int
gpo.po_message_filepos.argtypes = [c_int, c_int]
gpo.po_message_add_filepos.argtypes = [c_int, STRING, c_int]
# Message (get methods)
gpo.po_message_comments.restype = STRING
gpo.po_message_extracted_comments.restype = STRING
gpo.po_message_prev_msgctxt.restype = STRING
gpo.po_message_prev_msgid.restype = STRING
gpo.po_message_prev_msgid_plural.restype = STRING
gpo.po_message_is_format.restype = c_int
gpo.po_message_is_format.argtypes = [c_int, STRING]
gpo.po_message_set_format.argtypes = [c_int, STRING, c_int]
gpo.po_message_msgctxt.restype = STRING
gpo.po_message_msgid.restype = STRING
gpo.po_message_msgid_plural.restype = STRING
gpo.po_message_msgstr.restype = STRING
gpo.po_message_msgstr_plural.restype = STRING
# Message (set methods)
gpo.po_message_set_comments.argtypes = [c_int, STRING]
gpo.po_message_set_extracted_comments.argtypes = [c_int, STRING]
gpo.po_message_set_fuzzy.argtypes = [c_int, c_int]
gpo.po_message_set_msgctxt.argtypes = [c_int, STRING]
# Setup the po_xerror_handler
xerror_handler = po_xerror_handler()
xerror_handler.xerror = xerror_prototype(xerror_cb)
xerror_handler.xerror2 = xerror2_prototype(xerror2_cb)
def escapeforpo(text):
return pypo.escapeforpo(text)
def quoteforpo(text):
return pypo.quoteforpo(text)
def unquotefrompo(postr):
return pypo.unquotefrompo(postr)
def get_libgettextpo_version():
"""Returns the libgettextpo version
@rtype: three-value tuple
@return: libgettextpo version in the following format::
(major version, minor version, subminor version)
"""
libversion = c_long.in_dll(gpo, 'libgettextpo_version')
major = libversion.value >> 16
minor = libversion.value >> 8
subminor = libversion.value - (major << 16) - (minor << 8)
return major, minor, subminor
class pounit(pocommon.pounit):
def __init__(self, source=None, encoding='utf-8', gpo_message=None):
self._rich_source = None
self._rich_target = None
self._encoding = encoding
if not gpo_message:
self._gpo_message = gpo.po_message_create()
if source or source == "":
self.source = source
self.target = ""
elif gpo_message:
self._gpo_message = gpo_message
def setmsgid_plural(self, msgid_plural):
if isinstance(msgid_plural, list):
msgid_plural = "".join(msgid_plural)
gpo.po_message_set_msgid_plural(self._gpo_message, msgid_plural)
msgid_plural = property(None, setmsgid_plural)
def getsource(self):
def remove_msgid_comments(text):
if not text:
return text
if text.startswith("_:"):
remainder = re.search(r"_: .*\n(.*)", text)
if remainder:
return remainder.group(1)
else:
return u""
else:
return text
singular = remove_msgid_comments(gpo.po_message_msgid(self._gpo_message).decode(self._encoding))
if singular:
if self.hasplural():
multi = multistring(singular, self._encoding)
pluralform = gpo.po_message_msgid_plural(self._gpo_message).decode(self._encoding)
multi.strings.append(pluralform)
return multi
else:
return singular
else:
return u""
def setsource(self, source):
if isinstance(source, multistring):
source = source.strings
if isinstance(source, unicode):
source = source.encode(self._encoding)
if isinstance(source, list):
gpo.po_message_set_msgid(self._gpo_message, source[0].encode(self._encoding))
if len(source) > 1:
gpo.po_message_set_msgid_plural(self._gpo_message, source[1].encode(self._encoding))
else:
gpo.po_message_set_msgid(self._gpo_message, source)
gpo.po_message_set_msgid_plural(self._gpo_message, None)
source = property(getsource, setsource)
def gettarget(self):
if self.hasplural():
plurals = []
nplural = 0
plural = gpo.po_message_msgstr_plural(self._gpo_message, nplural)
while plural:
plurals.append(plural.decode(self._encoding))
nplural += 1
plural = gpo.po_message_msgstr_plural(self._gpo_message, nplural)
if plurals:
multi = multistring(plurals, encoding=self._encoding)
else:
multi = multistring(u"")
else:
multi = (gpo.po_message_msgstr(self._gpo_message) or "").decode(self._encoding)
return multi
def settarget(self, target):
# for plural strings: convert 'target' into a list
if self.hasplural():
if isinstance(target, multistring):
target = target.strings
elif isinstance(target, basestring):
target = [target]
# for non-plurals: check number of items in 'target'
elif isinstance(target, (dict, list)):
if len(target) == 1:
target = target[0]
else:
raise ValueError("po msgid element has no plural but msgstr has %d elements (%s)" % (len(target), target))
# empty the previous list of messages
# TODO: the "pypo" implementation does not remove the previous items of
# the target, if self.target == target (essentially: comparing only
# the first item of a plural string with the single new string)
# Maybe this behaviour should be unified.
if isinstance(target, (dict, list)):
i = 0
message = gpo.po_message_msgstr_plural(self._gpo_message, i)
while message is not None:
gpo.po_message_set_msgstr_plural(self._gpo_message, i, None)
i += 1
message = gpo.po_message_msgstr_plural(self._gpo_message, i)
# add the items of a list
if isinstance(target, list):
for i in range(len(target)):
targetstring = target[i]
if isinstance(targetstring, unicode):
targetstring = targetstring.encode(self._encoding)
gpo.po_message_set_msgstr_plural(self._gpo_message, i, targetstring)
# add the values of a dict
elif isinstance(target, dict):
for i, targetstring in enumerate(target.itervalues()):
gpo.po_message_set_msgstr_plural(self._gpo_message, i, targetstring)
# add a single string
else:
if isinstance(target, unicode):
target = target.encode(self._encoding)
if target is None:
gpo.po_message_set_msgstr(self._gpo_message, "")
else:
gpo.po_message_set_msgstr(self._gpo_message, target)
target = property(gettarget, settarget)
def getid(self):
"""The unique identifier for this unit according to the convensions in
.mo files."""
id = (gpo.po_message_msgid(self._gpo_message) or "").decode(self._encoding)
# Gettext does not consider the plural to determine duplicates, only
# the msgid. For generation of .mo files, we might want to use this
# code to generate the entry for the hash table, but for now, it is
# commented out for conformance to gettext.
# plural = gpo.po_message_msgid_plural(self._gpo_message)
# if not plural is None:
# id = '%s\0%s' % (id, plural)
context = gpo.po_message_msgctxt(self._gpo_message)
if context:
id = u"%s\04%s" % (context.decode(self._encoding), id)
return id
def getnotes(self, origin=None):
if origin == None:
comments = gpo.po_message_comments(self._gpo_message) + \
gpo.po_message_extracted_comments(self._gpo_message)
elif origin == "translator":
comments = gpo.po_message_comments(self._gpo_message)
elif origin in ["programmer", "developer", "source code"]:
comments = gpo.po_message_extracted_comments(self._gpo_message)
else:
raise ValueError("Comment type not valid")
if comments and get_libgettextpo_version() < (0, 17, 0):
comments = "\n".join([line.strip() for line in comments.split("\n")])
# Let's drop the last newline
return comments[:-1].decode(self._encoding)
def addnote(self, text, origin=None, position="append"):
# ignore empty strings and strings without non-space characters
if not (text and text.strip()):
return
text = data.forceunicode(text)
oldnotes = self.getnotes(origin)
newnotes = None
if oldnotes:
if position == "append":
newnotes = oldnotes + "\n" + text
elif position == "merge":
if oldnotes != text:
oldnoteslist = oldnotes.split("\n")
for newline in text.split("\n"):
newline = newline.rstrip()
# avoid duplicate comment lines (this might cause some problems)
if newline not in oldnotes or len(newline) < 5:
oldnoteslist.append(newline)
newnotes = "\n".join(oldnoteslist)
else:
newnotes = text + '\n' + oldnotes
else:
newnotes = "\n".join([line.rstrip() for line in text.split("\n")])
if newnotes:
newlines = []
needs_space = get_libgettextpo_version() < (0, 17, 0)
for line in newnotes.split("\n"):
if line and needs_space:
newlines.append(" " + line)
else:
newlines.append(line)
newnotes = "\n".join(newlines).encode(self._encoding)
if origin in ["programmer", "developer", "source code"]:
gpo.po_message_set_extracted_comments(self._gpo_message, newnotes)
else:
gpo.po_message_set_comments(self._gpo_message, newnotes)
def removenotes(self):
gpo.po_message_set_comments(self._gpo_message, "")
def copy(self):
newpo = self.__class__()
newpo._gpo_message = self._gpo_message
return newpo
def merge(self, otherpo, overwrite=False, comments=True, authoritative=False):
"""Merges the otherpo (with the same msgid) into this one.
Overwrite non-blank self.msgstr only if overwrite is True
merge comments only if comments is True
"""
if not isinstance(otherpo, pounit):
super(pounit, self).merge(otherpo, overwrite, comments)
return
if comments:
self.addnote(otherpo.getnotes("translator"), origin="translator", position="merge")
# FIXME mergelists(self.typecomments, otherpo.typecomments)
if not authoritative:
# We don't bring across otherpo.automaticcomments as we consider ourself
# to be the the authority. Same applies to otherpo.msgidcomments
self.addnote(otherpo.getnotes("developer"), origin="developer", position="merge")
self.msgidcomment = otherpo._extract_msgidcomments() or None
self.addlocations(otherpo.getlocations())
if not self.istranslated() or overwrite:
# Remove kde-style comments from the translation (if any).
if self._extract_msgidcomments(otherpo.target):
otherpo.target = otherpo.target.replace('_: ' + otherpo._extract_msgidcomments()+ '\n', '')
self.target = otherpo.target
if self.source != otherpo.source or self.getcontext() != otherpo.getcontext():
self.markfuzzy()
else:
self.markfuzzy(otherpo.isfuzzy())
elif not otherpo.istranslated():
if self.source != otherpo.source:
self.markfuzzy()
else:
if self.target != otherpo.target:
self.markfuzzy()
def isheader(self):
#return self.source == u"" and self.target != u""
# we really want to make sure that there is no msgidcomment or msgctxt
return self.getid() == "" and len(self.target) > 0
def isblank(self):
return len(self.source) == len(self.target) == len(self.getcontext()) == 0
def hastypecomment(self, typecomment):
return gpo.po_message_is_format(self._gpo_message, typecomment)
def settypecomment(self, typecomment, present=True):
gpo.po_message_set_format(self._gpo_message, typecomment, present)
def hasmarkedcomment(self, commentmarker):
commentmarker = "(%s)" % commentmarker
for comment in self.getnotes("translator").split("\n"):
if comment.startswith(commentmarker):
return True
return False
def isfuzzy(self):
return gpo.po_message_is_fuzzy(self._gpo_message)
def markfuzzy(self, present=True):
gpo.po_message_set_fuzzy(self._gpo_message, present)
def isobsolete(self):
return gpo.po_message_is_obsolete(self._gpo_message)
def makeobsolete(self):
# FIXME: libgettexpo currently does not reset other data, we probably want to do that
# but a better solution would be for libgettextpo to output correct data on serialisation
gpo.po_message_set_obsolete(self._gpo_message, True)
def resurrect(self):
gpo.po_message_set_obsolete(self._gpo_message, False)
def hasplural(self):
return gpo.po_message_msgid_plural(self._gpo_message) is not None
def _extract_msgidcomments(self, text=None):
"""Extract KDE style msgid comments from the unit.
@rtype: String
@return: Returns the extracted msgidcomments found in this unit's msgid.
"""
if not text:
text = gpo.po_message_msgid(self._gpo_message).decode(self._encoding)
if text:
return pocommon.extract_msgid_comment(text)
return u""
def setmsgidcomment(self, msgidcomment):
if msgidcomment:
self.source = u"_: %s\n%s" % (msgidcomment, self.source)
msgidcomment = property(_extract_msgidcomments, setmsgidcomment)
def __str__(self):
pf = pofile()
pf.addunit(self)
return str(pf)
def getlocations(self):
locations = []
i = 0
location = gpo.po_message_filepos(self._gpo_message, i)
while location:
locname = gpo.po_filepos_file(location)
locline = gpo.po_filepos_start_line(location)
if locline == -1:
locstring = locname
else:
locstring = locname + ":" + str(locline)
locations.append(locstring)
i += 1
location = gpo.po_message_filepos(self._gpo_message, i)
return locations
def addlocation(self, location):
for loc in location.split():
parts = loc.split(":")
file = parts[0]
if len(parts) == 2:
line = int(parts[1] or "0")
else:
line = -1
gpo.po_message_add_filepos(self._gpo_message, file, line)
def getcontext(self):
msgctxt = gpo.po_message_msgctxt(self._gpo_message)
if msgctxt:
return msgctxt.decode(self._encoding)
else:
msgidcomment = self._extract_msgidcomments()
return msgidcomment
def buildfromunit(cls, unit):
"""Build a native unit from a foreign unit, preserving as much
information as possible."""
if type(unit) == cls and hasattr(unit, "copy") and callable(unit.copy):
return unit.copy()
elif isinstance(unit, pocommon.pounit):
newunit = cls(unit.source)
newunit.target = unit.target
#context
newunit.msgidcomment = unit._extract_msgidcomments()
context = unit.getcontext()
if not newunit.msgidcomment and context:
gpo.po_message_set_msgctxt(newunit._gpo_message, context)
locations = unit.getlocations()
if locations:
newunit.addlocations(locations)
notes = unit.getnotes("developer")
if notes:
newunit.addnote(notes, "developer")
notes = unit.getnotes("translator")
if notes:
newunit.addnote(notes, "translator")
if unit.isobsolete():
newunit.makeobsolete()
newunit.markfuzzy(unit.isfuzzy())
for tc in ['python-format', 'c-format', 'php-format']:
if unit.hastypecomment(tc):
newunit.settypecomment(tc)
# We assume/guess/hope that there will only be one
break
return newunit
else:
return base.TranslationUnit.buildfromunit(unit)
buildfromunit = classmethod(buildfromunit)
class pofile(pocommon.pofile):
UnitClass = pounit
def __init__(self, inputfile=None, encoding=None, unitclass=pounit):
self._gpo_memory_file = None
self._gpo_message_iterator = None
super(pofile, self).__init__(inputfile=inputfile, encoding=encoding)
if inputfile is None:
self._gpo_memory_file = gpo.po_file_create()
self._gpo_message_iterator = gpo.po_message_iterator(self._gpo_memory_file, None)
def addunit(self, unit, new=True):
if new:
gpo.po_message_insert(self._gpo_message_iterator, unit._gpo_message)
super(pofile, self).addunit(unit)
def removeduplicates(self, duplicatestyle="merge"):
"""make sure each msgid is unique ; merge comments etc from duplicates into original"""
# TODO: can we handle consecutive calls to removeduplicates()? What
# about files already containing msgctxt? - test
id_dict = {}
uniqueunits = []
# TODO: this is using a list as the pos aren't hashable, but this is slow.
# probably not used frequently enough to worry about it, though.
markedpos = []
def addcomment(thepo):
thepo.msgidcomment = " ".join(thepo.getlocations())
markedpos.append(thepo)
for thepo in self.units:
id = thepo.getid()
if thepo.isheader() and not thepo.getlocations():
# header msgids shouldn't be merged...
uniqueunits.append(thepo)
elif id in id_dict:
if duplicatestyle == "merge":
if id:
id_dict[id].merge(thepo)
else:
addcomment(thepo)
uniqueunits.append(thepo)
elif duplicatestyle == "msgctxt":
origpo = id_dict[id]
if origpo not in markedpos:
gpo.po_message_set_msgctxt(origpo._gpo_message, " ".join(origpo.getlocations()))
markedpos.append(thepo)
gpo.po_message_set_msgctxt(thepo._gpo_message, " ".join(thepo.getlocations()))
uniqueunits.append(thepo)
else:
if not id:
if duplicatestyle == "merge":
addcomment(thepo)
else:
gpo.po_message_set_msgctxt(thepo._gpo_message, " ".join(thepo.getlocations()))
id_dict[id] = thepo
uniqueunits.append(thepo)
new_gpo_memory_file = gpo.po_file_create()
new_gpo_message_iterator = gpo.po_message_iterator(new_gpo_memory_file, None)
for unit in uniqueunits:
gpo.po_message_insert(new_gpo_message_iterator, unit._gpo_message)
gpo.po_message_iterator_free(self._gpo_message_iterator)
self._gpo_message_iterator = new_gpo_message_iterator
self._gpo_memory_file = new_gpo_memory_file
self.units = uniqueunits
def __str__(self):
def obsolete_workaround():
# Remove all items that are not output by msgmerge when a unit is obsolete. This is a work
# around for bug in libgettextpo
# FIXME Do version test in case they fix this bug
for unit in self.units:
if unit.isobsolete():
gpo.po_message_set_extracted_comments(unit._gpo_message, "")
location = gpo.po_message_filepos(unit._gpo_message, 0)
while location:
gpo.po_message_remove_filepos(unit._gpo_message, 0)
location = gpo.po_message_filepos(unit._gpo_message, 0)
outputstring = ""
if self._gpo_memory_file:
obsolete_workaround()
f, fname = tempfile.mkstemp(prefix='translate', suffix='.po')
os.close(f)
self._gpo_memory_file = gpo.po_file_write_v2(self._gpo_memory_file, fname, xerror_handler)
f = open(fname)
outputstring = f.read()
f.close()
os.remove(fname)
return outputstring
def isempty(self):
"""Returns True if the object doesn't contain any translation units."""
if len(self.units) == 0:
return True
# Skip the first unit if it is a header.
if self.units[0].isheader():
units = self.units[1:]
else:
units = self.units
for unit in units:
if not unit.isblank() and not unit.isobsolete():
return False
return True
def parse(self, input):
if hasattr(input, 'name'):
self.filename = input.name
elif not getattr(self, 'filename', ''):
self.filename = ''
if hasattr(input, "read"):
posrc = input.read()
input.close()
input = posrc
needtmpfile = not os.path.isfile(input)
if needtmpfile:
# This is not a file - we write the string to a temporary file
fd, fname = tempfile.mkstemp(prefix='translate', suffix='.po')
os.write(fd, input)
input = fname
os.close(fd)
self._gpo_memory_file = gpo.po_file_read_v3(input, xerror_handler)
if self._gpo_memory_file is None:
print >> sys.stderr, "Error:"
if needtmpfile:
os.remove(input)
# Handle xerrors here
self._header = gpo.po_file_domain_header(self._gpo_memory_file, None)
if self._header:
charset = gpo.po_header_field(self._header, "Content-Type")
if charset:
charset = re.search("charset=([^\\s]+)", charset).group(1)
self._encoding = encodingToUse(charset)
self._gpo_message_iterator = gpo.po_message_iterator(self._gpo_memory_file, None)
newmessage = gpo.po_next_message(self._gpo_message_iterator)
while newmessage:
newunit = pounit(gpo_message=newmessage, encoding=self._encoding)
self.addunit(newunit, new=False)
newmessage = gpo.po_next_message(self._gpo_message_iterator)
self._free_iterator()
def __del__(self):
# We currently disable this while we still get segmentation faults.
# Note that this is definitely leaking memory because of this.
return
self._free_iterator()
if self._gpo_memory_file is not None:
gpo.po_file_free(self._gpo_memory_file)
self._gpo_memory_file = None
def _free_iterator(self):
# We currently disable this while we still get segmentation faults.
# Note that this is definitely leaking memory because of this.
return
if self._gpo_message_iterator is not None:
gpo.po_message_iterator_free(self._gpo_message_iterator)
self._gpo_message_iterator = None
| |
from __future__ import unicode_literals
import base64
import calendar
import datetime
import re
import sys
import unicodedata
from binascii import Error as BinasciiError
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text
from django.utils.functional import allow_lazy
from django.utils import six
from django.utils.six.moves.urllib.parse import (
quote, quote_plus, unquote, unquote_plus, urlparse,
urlencode as original_urlencode)
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(quote(force_str(url), force_str(safe)))
urlquote = allow_lazy(urlquote, six.text_type)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(quote_plus(force_str(url), force_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(unquote(force_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, six.text_type)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(unquote_plus(force_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first cast to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2])
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if six.PY2 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if i < 0:
raise ValueError("Negative base36 conversion input.")
if six.PY2:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def urlsafe_base64_encode(s):
"""
Encodes a bytestring in base64 for use in URLs, stripping any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decodes a base64 encoded string, adding back any trailing equal signs that
might have been stripped.
"""
s = s.encode('utf-8') # base64encode should only return ASCII.
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necessary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse(url1), urlparse(url2)
try:
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
except ValueError:
return False
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if url is not None:
url = url.strip()
if not url:
return False
# Chrome treats \ completely as /
url = url.replace('\\', '/')
# Chrome considers any URL with more than two slashes to be absolute, but
# urlaprse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
url_info = urlparse(url)
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
return ((not url_info.netloc or url_info.netloc == host) and
(not url_info.scheme or url_info.scheme in ['http', 'https']))
| |
# -*- coding: utf-8 -*-
"""
Copyright 2018 Dispel, LLC
Apache 2.0 License, see https://github.com/dispel/jak/blob/master/LICENSE for details.
"""
import os
import click
from . import helpers
from . import outputs
from . import __version_full__
from . import diff as diff_logic
from . import decorators
from . import start as start_logic
from . import crypto_services as cs
from .exceptions import JakException
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
class JakGroup(click.Group):
"""An override of the list_commands logic of click.Group so as to order the commands
in the help text more logically."""
def list_commands(self, ctx):
"""Override so we get commands in help file them in the order we want in the help"""
# These are the ones we care about having first for usability reasons
show_at_top = ['start', 'keygen', 'encrypt', 'decrypt', 'stomp', 'shave', 'diff']
# Append extra commands that are not in the priority list to the end.
all_commands = sorted(self.commands)
extras = set(all_commands) - set(show_at_top)
return show_at_top + sorted(list(extras))
@click.group(invoke_without_command=True,
context_settings=CONTEXT_SETTINGS,
no_args_is_help=True,
cls=JakGroup)
@click.option('-v', '--version', is_flag=True)
def main(version):
"""(c) Dispel LLC (Apache-2.0)
Jak is a CLI tool for securely encrypting files.
To get started I recommend typing "jak start" (preferably while your
working directory is a git repository).
Jaks intended use is for secret files in git repos that developers do
not want to enter their permanent git history. But nothing prevents
jak from being used outside of git.
\b
For more information about a certain command use:
$> jak COMMAND --help
For full documentation see https://github.com/dispel/jak
"""
if version:
click.echo(__version_full__)
@main.command()
def start():
"""Initializes jak in your working directory."""
click.echo('''- - - Welcome to jak - - -
"jak start" does a couple of things:
1. jakfile: File with per working directory settings for jak.
2. keyfile: Holds the key used to encrypt files.
''')
jwd = helpers.get_jak_working_directory()
click.echo(start_logic.create_jakfile(jwd=jwd))
if not os.path.exists('{}/.git'.format(jwd)):
msg = helpers.two_column('Is this a git repository?', 'Nope!')
msg += '\n jak says: I work great with git, but you do you.'
click.echo(msg)
else:
click.echo(helpers.two_column('Is this a git repository?', 'Yep!'))
if helpers.does_jwd_have_gitignore(cwd=jwd):
click.echo(helpers.two_column(' Is there a .gitignore?', 'Yep!'))
start_logic.add_keyfile_to_gitignore(filepath=jwd + '/.gitignore')
click.echo(helpers.two_column(' Adding ".jak" to .gitignore', 'Done'))
else:
click.echo(helpers.two_column(' Is there a .gitignore?', 'Nope!'))
helpers.create_or_overwrite_file(filepath=jwd + '/.gitignore',
content='# Jak KeyFile\n .jak \n')
click.echo(helpers.two_column(' Creating ./.gitignore', 'Done'))
click.echo(helpers.two_column(' Adding ".jak" to .gitignore', 'Done'))
if start_logic.want_to_add_pre_commit_encrypt_hook():
click.echo('\n' + start_logic.add_pre_commit_encrypt_hook(jwd))
click.echo(outputs.FINAL_START_MESSAGE.format(version=__version_full__))
@main.command()
@click.option('-m', '--minimal', is_flag=True)
def keygen(minimal):
"""Generate a strong key for use with jak.
You can keep the key wherever, but I would recommend putting it
in a .gitignored keyfile that your jakfile points to.
Do not add this key to your git repository. Nor should you ever give it
to anyone who should not have access. Remember, if you give someone a key
they can look at your git history and encrypt files encrypted with that key
that happened in the past. If your current or past keys get out, I would
recommend cycling your secrets and your keys.
In fact I would recommend cycling your keys every so often (3-6 months)
anyway, just as a standard best practice. But in reality very few developers
actually do this. =(
"""
key = helpers.generate_256bit_key().decode('utf-8')
if minimal:
output = key
else:
output = outputs.KEYGEN_RESPONSE.format(key=key)
click.echo(output)
@decorators.attach_jwd
@decorators.read_jakfile
@decorators.select_key
@decorators.select_files
def encrypt_inner(files, key, **kwargs):
"""Logic for encrypting file(s)"""
for filepath in files:
try:
result = cs.encrypt_file(filepath=filepath, key=key, **kwargs)
except JakException as je:
click.echo(je)
else:
click.echo(result)
@main.command(help='jak encrypt <file>')
@click.argument('filepaths', nargs=-1)
@click.option('-k', '--key', default=None, metavar='<string>')
@click.option('-kf', '--keyfile', default=None, metavar='<file_path>')
def encrypt(filepaths, key, keyfile):
"""Encrypt file(s)"""
for filepath in filepaths:
try:
encrypt_inner(all_or_filepath=filepath, key=key, keyfile=keyfile)
except JakException as je:
click.echo(je)
@decorators.attach_jwd
@decorators.read_jakfile
@decorators.select_key
@decorators.select_files
def decrypt_inner(files, key, **kwargs):
"""Logic for decrypting file(s)"""
for filepath in files:
try:
result = cs.decrypt_file(filepath=filepath, key=key, **kwargs)
except JakException as je:
click.echo(je)
else:
click.echo(result)
@main.command(help='jak decrypt <file>')
@click.argument('filepaths', nargs=-1)
@click.option('-k', '--key', default=None, metavar='<string>')
@click.option('-kf', '--keyfile', default=None, metavar='<file_path>')
def decrypt(filepaths, key, keyfile):
"""Decrypt file(s)"""
for filepath in filepaths:
try:
decrypt_inner(all_or_filepath=filepath, key=key, keyfile=keyfile)
except JakException as je:
click.echo(je)
@main.command()
@click.option('-k', '--key', default=None, metavar='<string>')
@click.option('-kf', '--keyfile', default=None, metavar='<file_path>')
def stomp(key, keyfile):
"""Alias for 'jak encrypt all'"""
try:
encrypt_inner(all_or_filepath='all', key=key, keyfile=keyfile)
except JakException as je:
click.echo(je)
@main.command()
@click.option('-k', '--key', default=None, metavar='<string>')
@click.option('-kf', '--keyfile', default=None, metavar='<file_path>')
def shave(key, keyfile):
"""Alias for 'jak decrypt all'"""
try:
decrypt_inner(all_or_filepath='all', key=key, keyfile=keyfile)
except JakException as je:
click.echo(je)
@main.command(options_metavar='<options>')
@click.argument('conflicted_file', metavar='<conflicted_file>')
@click.option('-k', '--key', default=None, metavar='<string>')
@click.option('-kf', '--keyfile', default=None, metavar='<file_path>')
def diff(conflicted_file, key, keyfile):
"""Decrypt conflicted file for an easier merge.
\b
Supported merge tools:
plain: Just decrypted and you can sort it out in a text editor. (default)
opendiff: macOS built in FileMerge GUI tool.
vimdiff: I decrypt and give you the vimdiff command to run to finish the merge.
"""
try:
result = diff_logic.diff(filepath=conflicted_file, key=key, keyfile=keyfile)
except JakException as je:
result = je
click.echo(result)
| |
"""
Kind of like htmlgen, only much simpler. The only important symbol
that is exported is ``html``.
This builds ElementTree nodes, but with some extra useful methods.
(Open issue: should it use ``ElementTree`` more, and the raw
``Element`` stuff less?)
You create tags with attribute access. I.e., the ``A`` anchor tag is
``html.a``. The attributes of the HTML tag are done with keyword
arguments. The contents of the tag are the non-keyword arguments
(concatenated). You can also use the special ``c`` keyword, passing a
list, tuple, or single tag, and it will make up the contents (this is
useful because keywords have to come after all non-keyword arguments,
which is non-intuitive). Or you can chain them, adding the keywords
with one call, then the body with a second call, like::
>>> str(html.a(href='http://yahoo.com')('<Yahoo>'))
'<a href="http://yahoo.com"><Yahoo></a>'
Note that strings will be quoted; only tags given explicitly will
remain unquoted.
If the value of an attribute is None, then no attribute
will be inserted. So::
>>> str(html.a(href='http://www.yahoo.com', name=None,
... c='Click Here'))
'<a href="http://www.yahoo.com">Click Here</a>'
If the value is None, then the empty string is used. Otherwise str()
is called on the value.
``html`` can also be called, and it will produce a special list from
its arguments, which adds a ``__str__`` method that does ``html.str``
(which handles quoting, flattening these lists recursively, and using
'' for ``None``).
``html.comment`` will generate an HTML comment, like
``html.comment('comment text')`` -- note that it cannot take keyword
arguments (because they wouldn't mean anything).
Examples::
>>> str(html.html(
... html.head(html.title("Page Title")),
... html.body(
... bgcolor='#000066',
... text='#ffffff',
... c=[html.h1('Page Title'),
... html.p('Hello world!')],
... )))
'<html><head><title>Page Title</title></head><body bgcolor="#000066" text="#ffffff"><h1>Page Title</h1><p>Hello world!</p></body></html>'
>>> str(html.a(href='#top')('return to top'))
'<a href="#top">return to top</a>'
"""
from __future__ import absolute_import
import xml.etree.ElementTree as ET
import six
from six.moves import map
try:
from html import escape
except ImportError: # Python < 3.2
from cgi import escape
__all__ = ['html']
default_encoding = 'utf-8'
class _HTML:
def __getattr__(self, attr):
if attr.startswith('_'):
raise AttributeError
attr = attr.lower()
if attr.endswith('_'):
attr = attr[:-1]
if '__' in attr:
attr = attr.replace('__', ':')
if attr == 'comment':
return Element(ET.Comment, {})
else:
return Element(attr, {})
def __call__(self, *args):
return ElementList(args)
def quote(self, arg):
if arg is None:
return ''
if six.text_type is not str: # Python 2
arg = six.text_type(arg).encode(default_encoding)
return escape(arg, True)
def str(self, arg, encoding=None):
if isinstance(arg, six.string_types):
if not isinstance(arg, str):
arg = arg.encode(default_encoding)
return arg
elif arg is None:
return ''
elif isinstance(arg, (list, tuple)):
return ''.join(map(self.str, arg))
elif isinstance(arg, Element):
return str(arg)
else:
arg = six.text_type(arg)
if not isinstance(arg, str): # Python 2
arg = arg.encode(default_encoding)
return arg
html = _HTML()
class Element(ET.Element
if isinstance(ET.Element, type) else ET._ElementInterface):
def __call__(self, *args, **kw):
el = self.__class__(self.tag, self.attrib)
if 'c' in kw:
if args:
raise ValueError(
"You may either provide positional arguments or a "
"'c' keyword argument, but not both")
args = kw.pop('c')
if not isinstance(args, (list, tuple)):
args = (args,)
for name, value in list(kw.items()):
if value is None:
del kw[name]
continue
kw[name] = six.text_type(value)
if name.endswith('_'):
kw[name[:-1]] = value
del kw[name]
if '__' in name:
new_name = name.replace('__', ':')
kw[new_name] = value
del kw[name]
el.attrib.update(kw)
el.text = self.text
last = None
for item in list(self):
last = item
el.append(item)
for arg in flatten(args):
if arg is None:
continue
if not ET.iselement(arg):
if last is None:
if el.text is None:
el.text = six.text_type(arg)
else:
el.text += six.text_type(arg)
else:
if last.tail is None:
last.tail = six.text_type(arg)
else:
last.tail += six.text_type(arg)
else:
last = arg
el.append(last)
return el
if six.text_type is str: # Python 3
def __str__(self):
return ET.tostring(
self, default_encoding).decode(default_encoding)
else:
def __str__(self):
return ET.tostring(self, default_encoding)
def __unicode__(self):
# This is lame!
return str(self).decode(default_encoding)
def __repr__(self):
content = str(self)
if len(content) > 25:
content = repr(content[:25]) + '...'
else:
content = repr(content)
return '<Element %r>' % content
class ElementList(list):
def __str__(self):
return html.str(self)
def __repr__(self):
return 'ElementList(%s)' % list.__repr__(self)
def flatten(items):
for item in items:
if isinstance(item, (list, tuple)):
for sub in flatten(item):
yield sub
else:
yield item
| |
# The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# What is piece-wise key generation?
#
# Using this method, it is possible for an untrusted source to generate
# addresses.
#
# For example, Alice could use the following to generate a vanity address (an
# address that has a specific format) recruiting assistance from outside
# users. Let us assume she wants a bitcoin address beginning with 1Alice.
#
# 1. Alice generates a private key PrivKeyA (with public key PubKeyA)
# 2. Alice then provides the Public Key PubKeyA to Bob, the ower of a large
# vanity address farm
# 3. Bob then uses his farm to find a private key PrivKeyB such that:
# get_address(PubKeyA, PrivKeyB).startswith('1Alice')
# 4. Bob can then give PrivKeyB to Alice
# 5. Alice computes the new private key PrivKeyVanity:
# combine_private_keys(PrivKeyA, PrivKeyB)
#
# The new private key PrivKeyVanity has an address beginning 1Alice, without
# Bob knowing what the new private key is. Only Alice, with PrivKeyVanity, can
# spend the address' funds.
import base64
from .base58 import decode_check, encode_check
from .hash import sha256d
from .ecdsa import SECP256k1 as curve
from .ecdsa import ellipticcurve
from .ecdsa.util import randrange, string_to_number, number_to_string
from .key import privkey_from_wif, privkey_to_wif, publickey_to_address
__all__ = ['get_address', 'combine_private_keys']
def get_address(public_key, private_key, version = chr(0)):
'Returns the address generated by combiningn a public key and private key.'
# valid public key?
if len(public_key) != 65 or public_key[0] != chr(0x04):
raise ValueError('public key must be decompressed')
# the public key's elliptic curve point
x = string_to_number(public_key[1:1 + curve.baselen])
y = string_to_number(public_key[1 + curve.baselen:])
public_point = ellipticcurve.Point(curve.curve, x, y, curve.order)
# the private key's public key's elliptic curve point
private_key = privkey_from_wif(private_key)
secexp = string_to_number(private_key)
private_point = curve.generator * secexp
# add them together
combined = public_point + private_point
# compute the new public key
x = number_to_string(combined.x(), curve.order)
y = number_to_string(combined.y(), curve.order)
key_combined = chr(0x04) + x + y
# return the public key's address
return publickey_to_address(key_combined, version = version)
def combine_private_keys(private_keys):
'Returns the private key generated by combining two private keys.'
# convert private keys to binary form
private_keys = [privkey_from_wif(k) for k in private_keys]
# decode the secret exponents
secexps = [string_to_number(k) for k in private_keys]
# add_mod them together
combined = sum(secexps) % curve.order
# convert into a wif encode key
private_key = number_to_string(combined, curve.order)
return privkey_to_wif(private_key)
def split_private_key(private_key, count = 2):
'''Splits a private key up into count private keys, all of which are
required to be combined back into the original key.'''
# convert and decode private key to secret exponent
private_key = privkey_from_wif(private_key)
secexp = string_to_number(private_key)
# generate random secret exponents, less one
secexps = [randrange(curve.order) for i in xrange(count - 1)]
# compute the missing secret exponent that will sum to the given key
secexp_missing = (secexp - sum(secexps)) % curve.order
secexps.append(secexp_missing)
# convert to wif encoded private keys
private_keys = [number_to_string(s, curve.order) for s in secexps]
return [privkey_to_wif(k) for k in private_keys]
# Experimental Idea - Partial Key-Sets
#
# Nothing beyond this point should be considered anything more than me
# thinking out loud.
#
# A private key P is broken up into N key-sets (each of m keys). Any m of N
# sets is sufficient to recreate P.
#
# Use case - Redundant Key:
# Alice could take a private key and break it into a 3-of-5 parital
# key-set. Now she can place 4 of these 5 key-sets in 4 separate
# safety deposit boxes in 4 separate banks and keep one at her home.
#
# To steal the key, a robber would need to steal 3 of the key-sets from
# 3 separate locations; upon hearing a bank was broken into and 1 of her
# key-sets stolen, she could quickly visit 2 of the other banks and
# recover the key-sets before the robber.
#
# If two of the banks catches fire, floods or becomes bankrupt and closes,
# Alice may visit the remaining 2 banks to recover her key.
#
# Use case - Easter Egg Hunt
# Bob wishes to host a bitcoin Easter Egg hunt. So he creates a partial
# key-set, requiring 12 of 250 key-sets. Printing the 250 key-sets on small
# sheets (each with 12 QR codes) he hides the sheets around a park.
#
# Contestants may then hunt to find them, the first to acquire 12 of the
# sheets wins, and is able to claim the funds.
# Future consideration - Larger m range
#
# Currently, m is restricted to a maximum of 255. Future versions could use
# a different prefix (eg. 0x1002) that would maintain the 6C prefix, but would
# indicate to use 2-bytes for the required count, stealing a byte from the
# checksum, to maintain the length.
#
# Future Consideration - Compressed key-sets
#
# For the above Easter Egg Hunt example, a new format could be used to more
# efficiently encode a key-set into a QR code, since the checksum and required
# are identical for all keys within it. This could be done at the application
# level.
# Valid prefix is in the range [0x0ff7, 0x1002] for prefix 6C
# Valid prefix is in the range [0x113c, 0x1148] for prefix 6c
def partial_split_private_key(private_key, required, total):
'''Returns a partial split set of addresses, which needs required of the
total keys to recreate the original key.
The result is a list of sets of private keys, such that:
len(result) = total
len(result[n]) = required
result[n][i].startswith('6C') # (ie. each key has the prefix 6C)
To recreate the original key, the keys from the required number of sets
must be passed into partial_combine_private_keys.
This is EXPERIMENTAL, and may change in the future.'''
if required > total:
raise ValueError('required cannot be larger than total')
# calculate a checksum
checksum = sha256d(privkey_from_wif(private_key))[:4]
# encode the private key with extra information embedded
# (ie. this key's index, the required number of keys and a checksum)
def partial_encode_key(k, index):
pk = privkey_from_wif(k)
pk = '\x10\x01' + chr(index) + chr(required) + checksum + pk
return encode_check(pk)
# get a random set of keys that combine to the private key
def get_keys(index):
keys = split_private_key(private_key, required)
return [partial_encode_key(k, index) for k in keys]
# total sets of required keys
keys = [get_keys(i) for i in xrange(0, total)]
# create linearly independent immutable sets of keys
groups = []
for i in xrange(0, total):
group = []
for j in xrange(0, required):
group.append(keys[(i + j) % total][j])
groups.append(frozenset(group))
return tuple(groups)
def partial_combine_private_keys(private_keys, ignore_errors = False):
'''Returns the combined private key from the relevant private keys in
private_keys, or None if insufficient private keys are provided.
If ignore_errors (default is False), then any key that does not fit
with the rest of the keys will raise a ValueError.
This is EXPERIMENTAL, and may change in the future.'''
parts = dict()
required = None
checksum = None
# for each key...
for key in private_keys:
# ...convert private keys to binary form
private_key = decode_check(key)
if not private_key.startswith('\x10\x01'):
raise ValueError('invalid combined key: %s' % key)
# ...verify the required number of keys
r = ord(private_key[3])
if required is None:
required = r
elif required != r:
if ignore_errors:
continue
raise ValueError('key does not match set: %s' % key)
# ...verify the checksum
c = private_key[4:8]
if checksum is None:
checksum = c
elif checksum != c:
if ignore_errors:
continue
raise ValueError('key checksum does not match set: %s' % key)
# ...add this key to the correct key-set
index = ord(private_key[2])
if index not in parts: parts[index] = set()
parts[index].add(private_key[8:])
# find (if any) a complete key-set
for group in parts.values():
if len(group) == required:
# combine the private keys and wif encode it
secexp = sum(string_to_number(k) for k in group) % curve.order
private_key = number_to_string(secexp, curve.order)
if sha256d(private_key)[:4] != checksum:
raise ValueError('checksum does not match')
return privkey_to_wif(private_key)
return None
def partial_split_qr_encode(private_keys):
'Encode a partial key-set appropriate for a QR code.'
required = None
checksum = None
binary = []
for private_key in map(decode_check, private_keys):
if private_key[0:2] != '\x10\x01':
raise ValueError('invalid combined key')
if required is None:
required = ord(private_key[3])
elif ord(private_key[3]) != required:
raise ValueError('unmatched private keys')
if checksum is None:
checksum = private_key[4:8]
elif private_key[4:8] != checksum:
raise ValueError('unmatched private keys')
binary.append((ord(private_key[2]), private_key[8:]))
binary.sort()
missing = len(binary)
for i in xrange(0, len(binary)):
if binary[i][0] != i:
missing = i
break
qr = '\x84\x7c\x20' + chr(missing) + chr(required) + checksum
for private_key in binary:
qr += private_key[1]
return base64.b32encode(qr).strip('=')
def partial_split_qr_decode(qr_code):
'Decode a partial key-set QR code.'
# calculate padding that would have been stripped
padding = (len(qr_code) * 5 - 9 * 8) % 32
binary = base64.b32decode(qr_code + ('=' * padding))
# check the header
if not binary.startswith('\x84\x7c\x20'):
raise ValueError('invalid header')
# the missing index for this set
missing = ord(binary[3])
required = ord(binary[4])
checksum = binary[5:9]
# extract each binary key and recompose the key
keys = set()
start = 9
index = 0
while start + 32 <= len(binary):
if index == missing: index += 1
key = '\x10\x01' + chr(index) + chr(required) + checksum + binary[start:start + 32]
keys.add(encode_check(key))
start += 32
index += 1
return keys
| |
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tacker.sol_refactored.infra_drivers.openstack import userdata_utils
from tacker.tests import base
SAMPLE_VNFD_ID = "b1bb0ce7-ebca-4fa7-95ed-4840d7000000"
SAMPLE_FLAVOUR_ID = "simple"
class TestUserDataUtils(base.BaseTestCase):
def setUp(self):
super(TestUserDataUtils, self).setUp()
cur_dir = os.path.dirname(__file__)
sample_dir = os.path.join(cur_dir, "../..", "samples")
self.vnfd_1 = userdata_utils.get_vnfd(SAMPLE_VNFD_ID,
os.path.join(sample_dir, "sample1"))
def test_init_nfv_dict(self):
hot_dict = self.vnfd_1.get_base_hot(SAMPLE_FLAVOUR_ID)
top_hot = hot_dict['template']
expected_result = {
'VDU': {
'VDU1': {'computeFlavourId': None},
'VirtualStorage': {'vcImageId': None},
'VDU2': {'computeFlavourId': None, 'vcImageId': None}
},
'CP': {
'VDU1_CP1': {'network': None},
'VDU1_CP2': {'network': None,
'fixed_ips': {0: {'subnet': None}}},
'VDU2_CP1': {'network': None,
'fixed_ips': {0: {'ip_address': None}}},
'VDU2_CP2': {'network': None,
'fixed_ips': {0: {'ip_address': None,
'subnet': None}}}
}
}
result = userdata_utils.init_nfv_dict(top_hot)
self.assertEqual(expected_result, result)
def test_get_param_flavor(self):
flavor = 'm1.large'
grant = {
'vimAssets': {
'computeResourceFlavours': [
{'vnfdVirtualComputeDescId': 'VDU1',
'vimFlavourId': flavor}
]
}
}
result = userdata_utils.get_param_flavor('VDU1', SAMPLE_FLAVOUR_ID,
self.vnfd_1, grant)
self.assertEqual(flavor, result)
# if not exist in grant, get from VNFD
result = userdata_utils.get_param_flavor('VDU2', SAMPLE_FLAVOUR_ID,
self.vnfd_1, grant)
self.assertEqual('m1.tiny', result)
def test_get_param_image(self):
image_id = 'f30e149d-b3c7-497a-8b19-a092bc81e47b'
grant = {
'vimAssets': {
'softwareImages': [
{'vnfdSoftwareImageId': 'VDU2',
'vimSoftwareImageId': image_id},
{'vnfdSoftwareImageId': 'VirtualStorage',
'vimSoftwareImageId': 'image-1.0.0-x86_64-disk'}
]
}
}
result = userdata_utils.get_param_image('VDU2', SAMPLE_FLAVOUR_ID,
self.vnfd_1, grant)
self.assertEqual(image_id, result)
def test_get_param_zone(self):
grant_req = {
'addResources': [
{'id': 'dd60c89a-29a2-43bc-8cff-a534515523df',
'type': 'COMPUTE', 'resourceTemplateId': 'VDU1'}
]
}
grant = {
'zones': [
{'id': '717f6ae9-3094-46b6-b070-89ede8337571',
'zoneId': 'nova'}
],
'addResources': [
{'resourceDefinitionId':
'dd60c89a-29a2-43bc-8cff-a534515523df',
'zoneId': '717f6ae9-3094-46b6-b070-89ede8337571'}
]
}
result = userdata_utils.get_param_zone('VDU1', grant_req, grant)
self.assertEqual('nova', result)
def test_get_param_capacity(self):
# test get_current_capacity at the same time
grant_req = {
'addResources': [
{'id': 'dd60c89a-29a2-43bc-8cff-a534515523df',
'type': 'COMPUTE', 'resourceTemplateId': 'VDU1'},
{'id': '49b99140-c897-478c-83fa-ba3698912b18',
'type': 'COMPUTE', 'resourceTemplateId': 'VDU1'},
{'id': 'b03c4b75-ca17-4773-8a50-9a53df78a007',
'type': 'COMPUTE', 'resourceTemplateId': 'VDU2'}
],
'removeResources': [
{'id': '0837249d-ac2a-4963-bf98-bc0755eec663',
'type': 'COMPUTE', 'resourceTemplateId': 'VDU1'},
{'id': '3904e9d1-c0ec-4c3c-b29e-c8942a20f866',
'type': 'COMPUTE', 'resourceTemplateId': 'VDU2'}
]
}
inst = {
'instantiatedVnfInfo': {
'vnfcResourceInfo': [
{'id': 'cdf36e11-f6ca-4c80-aaf1-0d2e764a2f3a',
'vduId': 'VDU2'},
{'id': 'c8cb522d-ddf8-4136-9c85-92bab8f2993d',
'vduId': 'VDU1'}
]
}
}
result = userdata_utils.get_param_capacity('VDU1', inst, grant_req)
self.assertEqual(2, result)
result = userdata_utils.get_param_capacity('VDU2', inst, grant_req)
self.assertEqual(1, result)
def test_get_parama_network(self):
res_id = "8fe7cc1a-e4ac-41b9-8b89-ed14689adb9c"
req = {
"extVirtualLinks": [
{
"id": "acf5c23a-02d3-42e6-801b-fba0314bb6aa",
"resourceId": res_id,
"extCps": [
{
"cpdId": "VDU1_CP1",
"cpConfig": {} # omit
}
]
}
]
}
result = userdata_utils.get_param_network('VDU1_CP1', {}, req)
self.assertEqual(res_id, result)
def test_get_param_fixed_ips(self):
ip_address = "10.10.1.101"
subnet_id = "9defebca-3e9c-4bd2-9fa0-c4210c56ece6"
ext_cp = {
"cpdId": "VDU2_CP2",
"cpConfig": {
"VDU2_CP2_1": {
"cpProtocolData": [
{
"layerProtocol": "IP_OVER_ETHERNET",
"ipOverEthernet": {
"ipAddresses": [
{
"type": "IPV4",
"fixedAddresses": [
ip_address
],
"subnetId": subnet_id
}
]
}
}
]
}
}
}
req = {
"extVirtualLinks": [
{
"id": "8b49f4b6-1ff9-4a03-99cf-ff445b788436",
"resourceId": "4c54f742-5f1d-4287-bb81-37bf2e6ddc3e",
"extCps": [ext_cp]
}
]
}
expected_result = [{'ip_address': ip_address, 'subnet': subnet_id}]
result = userdata_utils.get_param_fixed_ips('VDU2_CP2', {}, req)
self.assertEqual(expected_result, result)
def test_apply_ext_managed_vls(self):
hot_dict = self.vnfd_1.get_base_hot(SAMPLE_FLAVOUR_ID)
top_hot = hot_dict['template']
res_id = "c738c2bb-1d24-4883-a2d8-a5c7c4ee8879"
vl = "internalVL1"
vl_subnet = "internalVL1_subnet"
req = {
"extManagedVirtualLinks": [
{
"id": "1c7825cf-b883-4281-b8fc-ee006df8b2ba",
"vnfVirtualLinkDescId": vl,
"resourceId": res_id
}
]
}
# make sure before apply
self.assertEqual({'get_resource': vl},
top_hot['resources']['VDU1_scale_group']['properties']
['resource']['properties']['net3'])
self.assertEqual({'get_resource': vl},
top_hot['resources']['VDU2_CP3']['properties']['network'])
self.assertIn(vl, top_hot['resources'])
self.assertIn(vl_subnet, top_hot['resources'])
userdata_utils.apply_ext_managed_vls(top_hot, req, {})
# check after
# replaced to resource id
self.assertEqual(res_id,
top_hot['resources']['VDU1_scale_group']['properties']
['resource']['properties']['net3'])
self.assertEqual(res_id,
top_hot['resources']['VDU2_CP3']['properties']['network'])
# removed
self.assertNotIn(vl, top_hot['resources'])
self.assertNotIn(vl_subnet, top_hot['resources'])
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
import frappe.permissions
import frappe.async
from frappe import _
from frappe.utils.csvutils import getlink
from frappe.utils.dateutils import parse_date
from frappe.utils import cint, cstr, flt
from frappe.core.page.data_import_tool.data_import_tool import get_data_keys
#@frappe.async.handler
frappe.whitelist()
def upload(rows = None, submit_after_import=None, ignore_encoding_errors=False, overwrite=None,
ignore_links=False, pre_process=None):
"""upload data"""
frappe.flags.mute_emails = True
# extra input params
params = json.loads(frappe.form_dict.get("params") or '{}')
if params.get("submit_after_import"):
submit_after_import = True
if params.get("ignore_encoding_errors"):
ignore_encoding_errors = True
from frappe.utils.csvutils import read_csv_content_from_uploaded_file
def get_data_keys_definition():
return get_data_keys()
def bad_template():
frappe.throw(_("Please do not change the rows above {0}").format(get_data_keys_definition().data_separator))
def check_data_length():
max_rows = 5000
if not data:
frappe.throw(_("No data found"))
elif len(data) > max_rows:
frappe.throw(_("Only allowed {0} rows in one import").format(max_rows))
def get_start_row():
for i, row in enumerate(rows):
if row and row[0]==get_data_keys_definition().data_separator:
return i+1
bad_template()
def get_header_row(key):
return get_header_row_and_idx(key)[0]
def get_header_row_and_idx(key):
for i, row in enumerate(header):
if row and row[0]==key:
return row, i
return [], -1
def filter_empty_columns(columns):
empty_cols = filter(lambda x: x in ("", None), columns)
if empty_cols:
if columns[-1*len(empty_cols):] == empty_cols:
# filter empty columns if they exist at the end
columns = columns[:-1*len(empty_cols)]
else:
frappe.msgprint(_("Please make sure that there are no empty columns in the file."),
raise_exception=1)
return columns
def make_column_map():
doctype_row, row_idx = get_header_row_and_idx(get_data_keys_definition().doctype)
if row_idx == -1: # old style
return
dt = None
for i, d in enumerate(doctype_row[1:]):
if d not in ("~", "-"):
if d: # value in doctype_row
if doctype_row[i]==dt:
# prev column is doctype (in case of parentfield)
doctype_parentfield[dt] = doctype_row[i+1]
else:
dt = d
doctypes.append(d)
column_idx_to_fieldname[dt] = {}
column_idx_to_fieldtype[dt] = {}
if dt:
column_idx_to_fieldname[dt][i+1] = rows[row_idx + 2][i+1]
column_idx_to_fieldtype[dt][i+1] = rows[row_idx + 4][i+1]
def get_doc(start_idx):
if doctypes:
doc = {}
for idx in xrange(start_idx, len(rows)):
if (not doc) or main_doc_empty(rows[idx]):
for dt in doctypes:
d = {}
for column_idx in column_idx_to_fieldname[dt]:
try:
fieldname = column_idx_to_fieldname[dt][column_idx]
fieldtype = column_idx_to_fieldtype[dt][column_idx]
d[fieldname] = rows[idx][column_idx]
if fieldtype in ("Int", "Check"):
d[fieldname] = cint(d[fieldname])
elif fieldtype in ("Float", "Currency", "Percent"):
d[fieldname] = flt(d[fieldname])
elif fieldtype == "Date":
d[fieldname] = parse_date(d[fieldname]) if d[fieldname] else None
except IndexError:
pass
# scrub quotes from name and modified
if d.get("name") and d["name"].startswith('"'):
d["name"] = d["name"][1:-1]
if sum([0 if not val else 1 for val in d.values()]):
d['doctype'] = dt
if dt == doctype:
doc.update(d)
else:
if not overwrite:
d['parent'] = doc["name"]
d['parenttype'] = doctype
d['parentfield'] = doctype_parentfield[dt]
doc.setdefault(d['parentfield'], []).append(d)
else:
break
return doc
else:
doc = frappe._dict(zip(columns, rows[start_idx][1:]))
doc['doctype'] = doctype
return doc
def main_doc_empty(row):
return not (row and ((len(row) > 1 and row[1]) or (len(row) > 2 and row[2])))
users = frappe.db.sql_list("select name from tabUser")
def prepare_for_insert(doc):
# don't block data import if user is not set
# migrating from another system
if not doc.owner in users:
doc.owner = frappe.session.user
if not doc.modified_by in users:
doc.modified_by = frappe.session.user
# header
if not rows:
rows = read_csv_content_from_uploaded_file(ignore_encoding_errors)
start_row = get_start_row()
header = rows[:start_row]
data = rows[start_row:]
doctype = get_header_row(get_data_keys_definition().main_table)[1]
columns = filter_empty_columns(get_header_row(get_data_keys_definition().columns)[1:])
doctypes = []
doctype_parentfield = {}
column_idx_to_fieldname = {}
column_idx_to_fieldtype = {}
if submit_after_import and not cint(frappe.db.get_value("DocType",
doctype, "is_submittable")):
submit_after_import = False
parenttype = get_header_row(get_data_keys_definition().parent_table)
if len(parenttype) > 1:
parenttype = parenttype[1]
# check permissions
if not frappe.permissions.can_import(parenttype or doctype):
frappe.flags.mute_emails = False
return {"messages": [_("Not allowed to Import") + ": " + _(doctype)], "error": True}
# allow limit rows to be uploaded
check_data_length()
make_column_map()
if overwrite==None:
overwrite = params.get('overwrite')
# delete child rows (if parenttype)
parentfield = None
if parenttype:
parentfield = get_parent_field(doctype, parenttype)
if overwrite:
delete_child_rows(data, doctype)
ret = []
error = False
total = len(data)
for i, row in enumerate(data):
# bypass empty rows
if main_doc_empty(row):
continue
row_idx = i + start_row
doc = None
# publish task_update
frappe.publish_realtime("data_import_progress", {"progress": [i, total]},
user=frappe.session.user, now=True)
try:
doc = get_doc(row_idx)
if pre_process:
pre_process(doc)
if parentfield:
parent = frappe.get_doc(parenttype, doc["parent"])
doc = parent.append(parentfield, doc)
parent.save()
ret.append('Inserted row for %s at #%s' % (getlink(parenttype,
doc.parent), unicode(doc.idx)))
else:
if overwrite and doc["name"] and frappe.db.exists(doctype, doc["name"]):
original = frappe.get_doc(doctype, doc["name"])
original.update(doc)
original.flags.ignore_links = ignore_links
original.save()
ret.append('Updated row (#%d) %s' % (row_idx + 1, getlink(original.doctype, original.name)))
doc = original
else:
doc = frappe.get_doc(doc)
prepare_for_insert(doc)
doc.flags.ignore_links = ignore_links
doc.insert()
ret.append('Inserted row (#%d) %s' % (row_idx + 1, getlink(doc.doctype, doc.name)))
if submit_after_import:
doc.submit()
ret.append('Submitted row (#%d) %s' % (row_idx + 1, getlink(doc.doctype, doc.name)))
except Exception, e:
error = True
if doc:
frappe.errprint(doc if isinstance(doc, dict) else doc.as_dict())
err_msg = frappe.local.message_log and "\n\n".join(frappe.local.message_log) or cstr(e)
ret.append('Error for row (#%d) %s : %s' % (row_idx + 1,
len(row)>1 and row[1] or "", err_msg))
frappe.errprint(frappe.get_traceback())
finally:
frappe.local.message_log = []
if error:
frappe.db.rollback()
else:
frappe.db.commit()
frappe.flags.mute_emails = False
return {"messages": ret, "error": error}
def get_parent_field(doctype, parenttype):
parentfield = None
# get parentfield
if parenttype:
for d in frappe.get_meta(parenttype).get_table_fields():
if d.options==doctype:
parentfield = d.fieldname
break
if not parentfield:
frappe.msgprint(_("Did not find {0} for {0} ({1})").format("parentfield", parenttype, doctype))
raise Exception
return parentfield
def delete_child_rows(rows, doctype):
"""delete child rows for all parents"""
for p in list(set([r[1] for r in rows])):
if p:
frappe.db.sql("""delete from `tab{0}` where parent=%s""".format(doctype), p)
| |
#!/usr/bin/env python
"""Calculates the spin and density structure factors.
This script crawls down a directory finding all the estimator file, i.e. those
whose name is 'estimators.dat'. It reads each file and store the estimator data
in a database. Then you calculate the structure factors reading the data you
ned from the database with all the info. The structure factor data are saved
into a bunch of files, one per parameter set.
The estimator files must have metadata included.
Usage:
calculate_structure_factors.py [--in=DIR, --out=DIR, --replot_from=DIR]
calculate_structure_factors.py -h | --help
Options:
-h --help Shows this screen.
--in=DIR Directory to crawl down for estimator files
[default: ./]
--out=DIR Ouput directory where structure factor data is saved
[default: ./]
--replot_from=DIR Replots from pickled data stored in DIR
"""
import os
import pickle
import math
import numpy as np
from docopt import docopt
import logging
from itertools import izip
# Temporary patch to avoid installing the dmrg_helpers package.
import inspect
import sys
script_full_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
sys.path.insert(0, os.path.dirname(os.path.dirname(script_full_path)))
# patch ends
from dmrg_helpers.extract.extract import create_db_from_dir
from dmrg_helpers.analyze.structure_factors import (
calculate_spin_struct_factor, calculate_density_struct_factor)
from dmrg_helpers.view.xy_data import XYDataDict
# Let's import the stuff to make plot look good for publication in APS.
import matplotlib.pyplot as plt
import matplotlib as mpl
from dmrg_helpers.view.matplotlib_APS_rc_params import aps
mpl.rcParams.update(aps['params'])
# Band structure calculations for the system of free fermions
# -----------------------------------------------------------
#
# These are a few functions to help you determine the band structure and the
# chemical potential at half-filling.
def theta(dispersion, k , mu):
if dispersion(k) > mu:
return 0.0
else:
return 1.0
def calculate_number_of_electrons_for_mu(mu, dispersion, length):
return 2*sum([theta(dispersion, 2*math.pi*k/length, mu)
for k in xrange(length)])
def determine_mu_at_half_filling(dispersion, k_max, k_min, length):
tmp = [(mu, calculate_number_of_electrons_for_mu(mu, dispersion, length))
for mu in np.linspace(dispersion(k_min), dispersion(k_max), 100)]
return next(x[0] for x in tmp if x[1] > length -1)
def find_fermi_momenta(mu, dispersion, length):
momenta = [2*math.pi*k/length for k in xrange(length)]
indexes = np.nonzero(
np.diff([theta(dispersion, k, mu) for k in momenta]))[0].tolist()
return [momenta[i] for i in indexes]
def find_fermi_momenta_at_half_filling(dispersion, k_max, k_min,
number_of_sites):
mu = determine_mu_at_half_filling(dispersion, k_max, k_min,
number_of_sites)
return find_fermi_momenta(mu, dispersion, number_of_sites)
def calculate_K_over_t(estimator):
"""Calculates the value of :math:`K/t` for an estimator.
You can get estimators from calling the `get_estimator` function on the
Database class, or as the result of calling the functions that calculate
structure factors.
Parameters
----------
estimator: an XYDataDict object.
The thing you want to calculate the parameter from.
Returns
-------
A dictionary with the same keys as the estimator and :math:`K/t` as values.
"""
k_over_t = []
for k in estimator.data.iterkeys():
params = estimator.get_metadata_as_dict(k)
try:
tmp = str(float(params['Kring'])/float(params['t']))
except ZeroDivisionError:
tmp = r"$\infty$"
k_over_t.append(tmp)
return dict(izip(estimator.data.iterkeys(), k_over_t))
def plot_structure_factor(structure_factor, k_fs, y_label, selected_keys=None):
"""Pretty plots the structure factor.
"""
# Make the figure nice
fig = plt.figure()
#ax = fig.add_subplot(111)
ax = plt.axes(aps['axes'])
ax.set_xlabel(r'$q$')
ax.set_ylabel(y_label)
ax.yaxis.set_label_coords(-0.08, 0.5)
ax.set_xlim((0.0, math.pi))
ax.set_xticks([0.0, math.pi/2, math.pi, (k_fs[2] - k_fs[1]) % math.pi,
(k_fs[0] - k_fs[3]) % math.pi,
(k_fs[2] - k_fs[0]) % math.pi])
ax.set_xticklabels(['0', r'$\pi/2$', r'$\pi', r'$2k_{F1}$', r'$2k_{F2}$',
r'$k_{F2}-k_{F1}$'])
max_y = structure_factor.get_max_y()
min_y = structure_factor.get_min_y()
ax.set_yticks([0.0, max_y - min_y])
#ax.set_yticks([min_y, max_y])
ax.set_yticklabels(['0', "{0:.2f}".format(max_y-min_y)])
ax.set_ylim([0.0, 1.1 * (max_y - min_y)]);
# Get the data and plot
data = structure_factor.get_data_for_plots(calculate_K_over_t)
for k, v in data.iteritems():
if selected_keys is not None:
if k not in selected_keys:
pass
else:
ax.plot(v[1], v[2]-v[2][0], lw=0.5 )
return fig
def main(args):
output_dir = args['--out']
if not args['--replot_from']:
# Create a database with all the files under dir
db = create_db_from_dir(args['--in'])
# Calculate the structure factors
logging.info('Calculating spin structure factors')
spin_struct_factor = calculate_spin_struct_factor(db)
spin_struct_factor.save('spin_structure_factor.p', output_dir)
logging.info('Calculating charge structure factors')
charge_struct_factor = calculate_density_struct_factor(db)
charge_struct_factor.save('charge_structure_factor.p', output_dir)
# Find the Fermi momenta to use in the structure factor plots
#
# You use need the band dispersion and to determine a few parameters
def two_bands(k, t_p=0.75):
return -2*math.cos(k)-2*t_p*math.cos(2*k)
# hard-coded shit
number_of_sites = 96
k_max = math.acos(-1.0/3)
k_min = 0.0
k_fs = find_fermi_momenta_at_half_filling(two_bands, k_max, k_min,
number_of_sites)
pickle.dump(k_fs, open(os.path.join(output_dir, 'k_fs.p'), "wb"))
else:
replot_from = args['--replot_from']
spin_struct_factor = XYDataDict.load('spin_structure_factor.p',
replot_from)
charge_struct_factor = XYDataDict.load('charge_structure_factor.p',
replot_from)
f = os.path.join(os.path.abspath(replot_from), 'k_fs.p')
k_fs = pickle.load(open(f, "rb"))
# Plot the structure factors
y_label = r'$\langle \vec{S}_{q}\cdot\vec{S}_{-q}\rangle$'
spin_struct_plot = plot_structure_factor(spin_struct_factor, k_fs, y_label)
f = os.path.join(os.path.abspath(output_dir), 'spin_struct_factor.pdf')
spin_struct_plot.savefig(f)
y_label = r'$\langle \delta n_{q}\delta n_{-q}\rangle$'
charge_struct_plot = plot_structure_factor(charge_struct_factor, k_fs,
y_label)
f = os.path.join(os.path.abspath(output_dir), 'charge_struct_factor.pdf')
#charge_struct_plot.tight_layout()
charge_struct_plot.savefig(f)
if __name__ == '__main__':
args = docopt(__doc__, version = 0.1)
main(args)
| |
# Many pieces shamelessly borrowed from scikit-learn
# Licence: BSD
from abc import abstractmethod, abstractproperty
from abc import ABCMeta
import inspect
import warnings
import numpy as np
import six
###############################################################################
class ModelMixin(object):
""" Base mixin class for models and algorithms
All RL/IRL algorithms should specify their required parameters in their
respective ``__init__`` methods with explicit keyword arguiments only
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("skirl algorithms should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
args.sort()
return args
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s ' 'for estimator %s'
% (key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
########################################################################
# Reward model
class MDPReward(six.with_metaclass(ABCMeta, ModelMixin)):
""" Reward function base class """
_template = '_feature_'
def __init__(self, world, kind='linfa'):
# keep a reference to parent MDP to get access to S, A
self._world = world
self.kind = kind
@abstractmethod
def __call__(self, state, action):
""" Evaluate the reward function for the (state, action) pair
Compute :math:`r(s, a) = f(s, a, w)` where :math:`f` is a function
approximator for the reward parameterized by :math:`w`
"""
raise NotImplementedError('Abstract method')
@property
def dim(self):
""" Dimension of the reward function """
# - count all class members named '_feature_{x}'
features = self.__class__.__dict__
dim = sum([f[0].startswith(self._template) for f in features])
return dim
# Reward Loss Functions
class RewardLoss(six.with_metaclass(ABCMeta, ModelMixin)):
"""Reward loss function """
def __init__(self, name):
self.name = name
@abstractmethod
def __call__(self, r1, r2):
""" Reward loss between ``r1`` and ``r2`` """
raise NotImplementedError('Abstract')
class TrajQualityLoss(RewardLoss):
""" Trajectory quality loss :math:`||Q(s) - Q(s)||_p` """
def __init__(self, p=1, name='tqloss'):
super(TrajQualityLoss, self).__init__(name)
self.p = p
def __call__(self, QE, QPi):
ql = sum([sum((Qe - Qp)**self.p
for Qe, Qp in zip(QE, Q_i))
for Q_i in QPi])
return ql
########################################################################
class LocalController(six.with_metaclass(ABCMeta, ModelMixin)):
""" GraphMDP local controller """
def __init__(self, world, kind='abstract'):
self._world = world
self.kind = kind
@abstractmethod
def __call__(self, state, action, duration):
""" Execute a local controller at `state` using `action`
for period lasting `duration`
"""
raise NotImplementedError('Abstract method')
@abstractmethod
def trajectory(self, source, target):
""" Generate a trajectory by executing the local controller
Execute the local controller between the given two states to generate
a local trajectory which encapsulates the meta-action
"""
raise NotImplementedError('Abstract method')
########################################################################
class MDP(six.with_metaclass(ABCMeta, ModelMixin)):
""" Markov Decision Process Model
Parameters
------------
discount : float
MDP discount factor
reward : `SocialNavReward` object
Reward function for social navigation task
Attributes
-----------
gamma : float
MDP discount factor
_reward : :class:`SocialNavReward` object
Reward function for social navigation task
"""
def __init__(self, discount, reward):
if 0.0 > discount >= 1.0:
raise ValueError('The `discount` must be in [0, 1)')
self.gamma = discount
self.reward = reward
@abstractmethod
def terminal(self, state):
""" Check if a state is terminal (goal state) """
raise NotImplementedError('Abstract method')
@abstractproperty
def state_dimension(self):
return 0
@abstractproperty
def start_states(self):
return None
@abstractproperty
def goal_state(self):
return None
# An interface for MDP representation
class MDPRepresentation(six.with_metaclass(ABCMeta, ModelMixin)):
""" MDP Representation """
def __init__(self, mdp):
self._mdp = mdp
@abstractproperty
def kind(self):
raise NotImplementedError('Abstract property')
@property
def mdp(self):
return self._mdp
########################################################################
class Environment(six.with_metaclass(ABCMeta, ModelMixin)):
""" The environment that the MDP is defined on
This is largely a data container for all the things in the environment
that the MDP should care about, for use in computing reward functions
etc
Also contains limits of the environment
"""
def __init__(self, start, goal):
self.start = start
self.goal = goal
@abstractmethod
def in_world(self, state):
raise NotImplementedError('Abstract')
| |
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.rename_doc import get_link_fields
from frappe.model.dynamic_links import dynamic_link_queries
from frappe.permissions import reset_perms
def execute():
frappe.reload_doctype("DocType")
frappe.reload_doctype("Communication")
reset_perms("Communication")
migrate_comments()
frappe.delete_doc("DocType", "Comment")
# frappe.db.sql_ddl("drop table `tabComment`")
migrate_feed()
frappe.delete_doc("DocType", "Feed")
# frappe.db.sql_ddl("drop table `tabFeed`")
update_timeline_doc_for("Blogger")
def migrate_comments():
from_fields = ""
to_fields = ""
if "reference_doctype" in frappe.db.get_table_columns("Comment"):
from_fields = "reference_doctype as link_doctype, reference_name as link_name,"
to_fields = "link_doctype, link_name,"
# comments
frappe.db.sql("""insert ignore into `tabCommunication` (
subject,
content,
sender,
sender_full_name,
comment_type,
communication_date,
reference_doctype,
reference_name,
{to_fields}
name,
user,
owner,
creation,
modified_by,
modified,
status,
sent_or_received,
communication_type,
seen
)
select
substring(comment, 1, 100) as subject,
comment as content,
comment_by as sender,
comment_by_fullname as sender_full_name,
comment_type,
ifnull(timestamp(comment_date, comment_time), creation) as communication_date,
comment_doctype as reference_doctype,
comment_docname as reference_name,
{from_fields}
name,
owner as user,
owner,
creation,
modified_by,
modified,
'Linked' as status,
'Sent' as sent_or_received,
'Comment' as communication_type,
1 as seen
from `tabComment` where comment_doctype is not null and comment_doctype not in ('Message', 'My Company')"""
.format(to_fields=to_fields, from_fields=from_fields))
# chat and assignment notifications
frappe.db.sql("""insert ignore into `tabCommunication` (
subject,
content,
sender,
sender_full_name,
comment_type,
communication_date,
reference_doctype,
reference_name,
{to_fields}
name,
user,
owner,
creation,
modified_by,
modified,
status,
sent_or_received,
communication_type,
seen
)
select
case
when parenttype='Assignment' then %(assignment)s
else substring(comment, 1, 100)
end
as subject,
comment as content,
comment_by as sender,
comment_by_fullname as sender_full_name,
comment_type,
ifnull(timestamp(comment_date, comment_time), creation) as communication_date,
'User' as reference_doctype,
comment_docname as reference_name,
{from_fields}
name,
owner as user,
owner,
creation,
modified_by,
modified,
'Linked' as status,
'Sent' as sent_or_received,
case
when parenttype='Assignment' then 'Notification'
else 'Chat'
end
as communication_type,
1 as seen
from `tabComment` where comment_doctype in ('Message', 'My Company')"""
.format(to_fields=to_fields, from_fields=from_fields), {"assignment": _("Assignment")})
def migrate_feed():
# migrate delete feed
for doctype in frappe.db.sql("""select distinct doc_type from `tabFeed` where subject=%(deleted)s""", {"deleted": _("Deleted")}):
frappe.db.sql("""insert ignore into `tabCommunication` (
subject,
sender,
sender_full_name,
comment_type,
communication_date,
reference_doctype,
name,
user,
owner,
creation,
modified_by,
modified,
status,
sent_or_received,
communication_type,
seen
)
select
concat_ws(" ", %(_doctype)s, doc_name) as subject,
owner as sender,
full_name as sender_full_name,
'Deleted' as comment_type,
creation as communication_date,
doc_type as reference_doctype,
name,
owner as user,
owner,
creation,
modified_by,
modified,
'Linked' as status,
'Sent' as sent_or_received,
'Comment' as communication_type,
1 as seen
from `tabFeed` where subject=%(deleted)s and doc_type=%(doctype)s""", {
"deleted": _("Deleted"),
"doctype": doctype,
"_doctype": _(doctype)
})
# migrate feed type login or empty
frappe.db.sql("""insert ignore into `tabCommunication` (
subject,
sender,
sender_full_name,
comment_type,
communication_date,
reference_doctype,
reference_name,
name,
user,
owner,
creation,
modified_by,
modified,
status,
sent_or_received,
communication_type,
seen
)
select
subject,
owner as sender,
full_name as sender_full_name,
case
when feed_type='Login' then 'Info'
else 'Updated'
end as comment_type,
creation as communication_date,
doc_type as reference_doctype,
doc_name as reference_name,
name,
owner as user,
owner,
creation,
modified_by,
modified,
'Linked' as status,
'Sent' as sent_or_received,
'Comment' as communication_type,
1 as seen
from `tabFeed` where (feed_type in ('Login', '') or feed_type is null)""")
def update_timeline_doc_for(timeline_doctype):
"""NOTE: This method may be used by other apps for patching. It also has COMMIT after each update."""
# find linked doctypes
# link fields
update_for_linked_docs(timeline_doctype)
# dynamic link fields
update_for_dynamically_linked_docs(timeline_doctype)
def update_for_linked_docs(timeline_doctype):
for df in get_link_fields(timeline_doctype):
if df.issingle:
continue
reference_doctype = df.parent
if not is_valid_timeline_doctype(reference_doctype, timeline_doctype):
continue
for doc in frappe.get_all(reference_doctype, fields=["name", df.fieldname]):
timeline_name = doc.get(df.fieldname)
update_communication(timeline_doctype, timeline_name, reference_doctype, doc.name)
def update_for_dynamically_linked_docs(timeline_doctype):
dynamic_link_fields = []
for query in dynamic_link_queries:
for df in frappe.db.sql(query, as_dict=True):
dynamic_link_fields.append(df)
for df in dynamic_link_fields:
reference_doctype = df.parent
if not is_valid_timeline_doctype(reference_doctype, timeline_doctype):
continue
try:
docs = frappe.get_all(reference_doctype, fields=["name", df.fieldname],
filters={ df.options: timeline_doctype })
except frappe.SQLError, e:
if e.args and e.args[0]==1146:
# single
continue
else:
raise
for doc in docs:
timeline_name = doc.get(df.fieldname)
update_communication(timeline_doctype, timeline_name, reference_doctype, doc.name)
def update_communication(timeline_doctype, timeline_name, reference_doctype, reference_name):
if not timeline_name:
return
frappe.db.sql("""update `tabCommunication` set timeline_doctype=%(timeline_doctype)s, timeline_name=%(timeline_name)s
where (reference_doctype=%(reference_doctype)s and reference_name=%(reference_name)s)
and (timeline_doctype is null or timeline_doctype='')
and (timeline_name is null or timeline_name='')""", {
"timeline_doctype": timeline_doctype,
"timeline_name": timeline_name,
"reference_doctype": reference_doctype,
"reference_name": reference_name
})
frappe.db.commit()
def is_valid_timeline_doctype(reference_doctype, timeline_doctype):
# for reloading timeline_field
frappe.reload_doctype(reference_doctype)
# make sure the timeline field's doctype is same as timeline doctype
meta = frappe.get_meta(reference_doctype)
if not meta.timeline_field:
return False
doctype = meta.get_link_doctype(meta.timeline_field)
if doctype != timeline_doctype:
return False
return True
| |
"""Timeseries plotting functions."""
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats, interpolate
import matplotlib as mpl
import matplotlib.pyplot as plt
from .external.six import string_types
from . import utils
from . import algorithms as algo
from .palettes import color_palette
def tsplot(data, time=None, unit=None, condition=None, value=None,
err_style="ci_band", ci=68, interpolate=True, color=None,
estimator=np.mean, n_boot=5000, err_palette=None, err_kws=None,
legend=True, ax=None, **kwargs):
"""Plot one or more timeseries with flexible representation of uncertainty.
This function can take data specified either as a long-form (tidy)
DataFrame or as an ndarray with dimensions for sampling unit, time, and
(optionally) condition. The interpretation of some of the other parameters
changes depending on the type of object passed as data.
Parameters
----------
data : DataFrame or ndarray
Data for the plot. Should either be a "long form" dataframe or an
array with dimensions (unit, time, condition). In both cases, the
condition field/dimension is optional. The type of this argument
determines the interpretation of the next few parameters.
time : string or series-like
Either the name of the field corresponding to time in the data
DataFrame or x values for a plot when data is an array. If a Series,
the name will be used to label the x axis.
unit : string
Field in the data DataFrame identifying the sampling unit (e.g.
subject, neuron, etc.). The error representation will collapse over
units at each time/condition observation. This has no role when data
is an array.
value : string
Either the name of the field corresponding to the data values in
the data DataFrame (i.e. the y coordinate) or a string that forms
the y axis label when data is an array.
condition : string or Series-like
Either the name of the field identifying the condition an observation
falls under in the data DataFrame, or a sequence of names with a length
equal to the size of the third dimension of data. There will be a
separate trace plotted for each condition. If condition is a Series
with a name attribute, the name will form the title for the plot
legend (unless legend is set to False).
err_style : string or list of strings or None
Names of ways to plot uncertainty across units from set of
{ci_band, ci_bars, boot_traces, boot_kde, unit_traces, unit_points}.
Can use one or more than one method.
ci : float or list of floats in [0, 100]
Confidence interaval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
interpolate : boolean
Whether to do a linear interpolation between each timepoint when
plotting. The value of this parameter also determines the marker
used for the main plot traces, unless marker is specified as a keyword
argument.
color : seaborn palette or matplotlib color name or dictionary
Palette or color for the main plots and error representation (unless
plotting by unit, which can be separately controlled with err_palette).
If a dictionary, should map condition name to color spec.
estimator : callable
Function to determine central tendency and to pass to bootstrap
must take an ``axis`` argument.
n_boot : int
Number of bootstrap iterations.
err_palette: seaborn palette
Palette name or list of colors used when plotting data for each unit.
err_kws : dict, optional
Keyword argument dictionary passed through to matplotlib function
generating the error plot,
ax : axis object, optional
Plot in given axis; if None creates a new figure
kwargs :
Other keyword arguments are passed to main plot() call
Returns
-------
ax : matplotlib axis
axis with plot data
"""
# Sort out default values for the parameters
if ax is None:
ax = plt.gca()
if err_kws is None:
err_kws = {}
# Handle different types of input data
if isinstance(data, pd.DataFrame):
xlabel = time
ylabel = value
# Condition is optional
if condition is None:
condition = pd.Series(np.ones(len(data)))
legend = False
legend_name = None
n_cond = 1
else:
legend = True and legend
legend_name = condition
n_cond = len(data[condition].unique())
else:
data = np.asarray(data)
# Data can be a timecourse from a single unit or
# several observations in one condition
if data.ndim == 1:
data = data[np.newaxis, :, np.newaxis]
elif data.ndim == 2:
data = data[:, :, np.newaxis]
n_unit, n_time, n_cond = data.shape
# Units are experimental observations. Maybe subjects, or neurons
if unit is None:
units = np.arange(n_unit)
unit = "unit"
units = np.repeat(units, n_time * n_cond)
ylabel = None
# Time forms the xaxis of the plot
if time is None:
times = np.arange(n_time)
else:
times = np.asarray(time)
xlabel = None
if hasattr(time, "name"):
xlabel = time.name
time = "time"
times = np.tile(np.repeat(times, n_cond), n_unit)
# Conditions split the timeseries plots
if condition is None:
conds = range(n_cond)
legend = False
if isinstance(color, dict):
err = "Must have condition names if using color dict."
raise ValueError(err)
else:
conds = np.asarray(condition)
legend = True and legend
if hasattr(condition, "name"):
legend_name = condition.name
else:
legend_name = None
condition = "cond"
conds = np.tile(conds, n_unit * n_time)
# Value forms the y value in the plot
if value is None:
ylabel = None
else:
ylabel = value
value = "value"
# Convert to long-form DataFrame
data = pd.DataFrame(dict(value=data.ravel(),
time=times,
unit=units,
cond=conds))
# Set up the err_style and ci arguments for the loop below
if isinstance(err_style, string_types):
err_style = [err_style]
elif err_style is None:
err_style = []
if not hasattr(ci, "__iter__"):
ci = [ci]
# Set up the color palette
if color is None:
current_palette = mpl.rcParams["axes.color_cycle"]
if len(current_palette) < n_cond:
colors = color_palette("husl", n_cond)
else:
colors = color_palette(n_colors=n_cond)
elif isinstance(color, dict):
colors = [color[c] for c in data[condition].unique()]
else:
try:
colors = color_palette(color, n_cond)
except ValueError:
color = mpl.colors.colorConverter.to_rgb(color)
colors = [color] * n_cond
# Do a groupby with condition and plot each trace
for c, (cond, df_c) in enumerate(data.groupby(condition, sort=False)):
df_c = df_c.pivot(unit, time, value)
x = df_c.columns.values.astype(np.float)
# Bootstrap the data for confidence intervals
boot_data = algo.bootstrap(df_c.values, n_boot=n_boot,
axis=0, func=estimator)
cis = [utils.ci(boot_data, v, axis=0) for v in ci]
central_data = estimator(df_c.values, axis=0)
# Get the color for this condition
color = colors[c]
# Use subroutines to plot the uncertainty
for style in err_style:
# Allow for null style (only plot central tendency)
if style is None:
continue
# Grab the function from the global environment
try:
plot_func = globals()["_plot_%s" % style]
except KeyError:
raise ValueError("%s is not a valid err_style" % style)
# Possibly set up to plot each observation in a different color
if err_palette is not None and "unit" in style:
orig_color = color
color = color_palette(err_palette, len(df_c.values))
# Pass all parameters to the error plotter as keyword args
plot_kwargs = dict(ax=ax, x=x, data=df_c.values,
boot_data=boot_data,
central_data=central_data,
color=color, err_kws=err_kws)
# Plot the error representation, possibly for multiple cis
for ci_i in cis:
plot_kwargs["ci"] = ci_i
plot_func(**plot_kwargs)
if err_palette is not None and "unit" in style:
color = orig_color
# Plot the central trace
kwargs.setdefault("marker", "" if interpolate else "o")
ls = kwargs.pop("ls", "-" if interpolate else "")
kwargs.setdefault("linestyle", ls)
label = cond if legend else "_nolegend_"
ax.plot(x, central_data, color=color, label=label, **kwargs)
# Pad the sides of the plot only when not interpolating
ax.set_xlim(x.min(), x.max())
x_diff = x[1] - x[0]
if not interpolate:
ax.set_xlim(x.min() - x_diff, x.max() + x_diff)
# Add the plot labels
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if legend:
ax.legend(loc=0, title=legend_name)
return ax
# Subroutines for tsplot errorbar plotting
# ----------------------------------------
def _plot_ci_band(ax, x, ci, color, err_kws, **kwargs):
"""Plot translucent error bands around the central tendancy."""
low, high = ci
if "alpha" not in err_kws:
err_kws["alpha"] = 0.2
ax.fill_between(x, low, high, color=color, **err_kws)
def _plot_ci_bars(ax, x, central_data, ci, color, err_kws, **kwargs):
"""Plot error bars at each data point."""
for x_i, y_i, (low, high) in zip(x, central_data, ci.T):
ax.plot([x_i, x_i], [low, high], color=color,
solid_capstyle="round", **err_kws)
def _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs):
"""Plot 250 traces from bootstrap."""
err_kws.setdefault("alpha", 0.25)
err_kws.setdefault("linewidth", 0.25)
if "lw" in err_kws:
err_kws["linewidth"] = err_kws.pop("lw")
ax.plot(x, boot_data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_traces(ax, x, data, ci, color, err_kws, **kwargs):
"""Plot a trace for each observation in the original data."""
if isinstance(color, list):
if "alpha" not in err_kws:
err_kws["alpha"] = .5
for i, obs in enumerate(data):
ax.plot(x, obs, color=color[i], label="_nolegend_", **err_kws)
else:
if "alpha" not in err_kws:
err_kws["alpha"] = .2
ax.plot(x, data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_points(ax, x, data, color, err_kws, **kwargs):
"""Plot each original data point discretely."""
if isinstance(color, list):
for i, obs in enumerate(data):
ax.plot(x, obs, "o", color=color[i], alpha=0.8, markersize=4,
label="_nolegend_", **err_kws)
else:
ax.plot(x, data.T, "o", color=color, alpha=0.5, markersize=4,
label="_nolegend_", **err_kws)
def _plot_boot_kde(ax, x, boot_data, color, **kwargs):
"""Plot the kernal density estimate of the bootstrap distribution."""
kwargs.pop("data")
_ts_kde(ax, x, boot_data, color, **kwargs)
def _plot_unit_kde(ax, x, data, color, **kwargs):
"""Plot the kernal density estimate over the sample."""
_ts_kde(ax, x, data, color, **kwargs)
def _ts_kde(ax, x, data, color, **kwargs):
"""Upsample over time and plot a KDE of the bootstrap distribution."""
kde_data = []
y_min, y_max = data.min(), data.max()
y_vals = np.linspace(y_min, y_max, 100)
upsampler = interpolate.interp1d(x, data)
data_upsample = upsampler(np.linspace(x.min(), x.max(), 100))
for pt_data in data_upsample.T:
pt_kde = stats.kde.gaussian_kde(pt_data)
kde_data.append(pt_kde(y_vals))
kde_data = np.transpose(kde_data)
rgb = mpl.colors.ColorConverter().to_rgb(color)
img = np.zeros((kde_data.shape[0], kde_data.shape[1], 4))
img[:, :, :3] = rgb
kde_data /= kde_data.max(axis=0)
kde_data[kde_data > 1] = 1
img[:, :, 3] = kde_data
ax.imshow(img, interpolation="spline16", zorder=2,
extent=(x.min(), x.max(), y_min, y_max),
aspect="auto", origin="lower")
| |
#!/usr/bin/env python
# encoding: utf-8
import json
import base64
from threading import Timer
from observable import Observable
from .events import Events
from ..core import tostr, clean_decrypted
RENEW_HANDICAP = 60
stripped = lambda s: ''.join([c for c in s if ord(c) > 31 or ord(c) == 9])
class Subscription(Observable):
def __init__(self, platform):
Observable.__init__(self)
self._platform = platform
self._event_filters = []
self._timeout = None
self._subscription = {
'eventFilters': [],
'expirationTime': '', # 2014-03-12T19:54:35.613Z
'expiresIn': 0,
'deliveryMode': {
'transportType': 'PubNub',
'encryption': False,
'address': '',
'subscriberKey': '',
'secretKey': ''
},
'id': '',
'creationTime': '', # 2014-03-12T19:54:35.613Z
'status': '', # Active
'uri': ''
}
self._pubnub = None
def pubnub(self):
return self._pubnub
def register(self, events=None):
if self.alive():
return self.renew(events=events)
else:
return self.subscribe(events=events)
def add_events(self, events):
self._event_filters += events
pass
def set_events(self, events):
self._event_filters = events
def subscribe(self, events=None):
if events:
self.set_events(events)
if not self._event_filters or len(self._event_filters) == 0:
raise Exception('Events are undefined')
try:
response = self._platform.post('/restapi/v1.0/subscription', body={
'eventFilters': self._get_full_events_filter(),
'deliveryMode': {
'transportType': 'PubNub'
}
})
self.set_subscription(response.json_dict())
self._subscribe_at_pubnub()
self.trigger(Events.subscribeSuccess, response)
return response
except Exception as e:
self.reset()
self.trigger(Events.subscribeError, e)
raise
def renew(self, events=None):
if events:
self.set_events(events)
if not self.alive():
raise Exception('Subscription is not alive')
if not self._event_filters or len(self._event_filters) == 0:
raise Exception('Events are undefined')
self._clear_timeout()
try:
response = self._platform.put('/restapi/v1.0/subscription/' + self._subscription['id'], body={
'eventFilters': self._get_full_events_filter()
})
self.set_subscription(response.json_dict())
self.trigger(Events.renewSuccess, response)
return response
except Exception as e:
self.reset()
self.trigger(Events.renewError, e)
raise
def remove(self):
if not self.alive():
raise Exception('Subscription is not alive')
try:
response = self._platform.delete('/restapi/v1.0/subscription/' + self._subscription['id'])
self.reset()
self.trigger(Events.removeSuccess, response)
return response
except Exception as e:
self.reset()
self.trigger(Events.removeError, e)
raise
def alive(self):
s = self._subscription
return s and \
('deliveryMode' in s and s['deliveryMode']) and \
('subscriberKey' in s['deliveryMode'] and s['deliveryMode']['subscriberKey']) and \
('address' in s['deliveryMode'] and s['deliveryMode']['address'])
def subscription(self):
return self._subscription
def set_subscription(self, data):
self._clear_timeout()
self._subscription = data
self._set_timeout()
def reset(self):
self._clear_timeout()
self._unsubscribe_at_pubnub()
self._subscription = None
def destroy(self):
self.reset()
self.off()
def _subscribe_at_pubnub(self):
if not self.alive():
raise Exception('Subscription is not alive')
from pubnub.pubnub import PubNub
from pubnub.pnconfiguration import PNConfiguration
from pubnub.callbacks import SubscribeCallback
from pubnub.enums import PNStatusCategory
pnconf = PNConfiguration()
pnconf.subscribe_key = self._subscription['deliveryMode']['subscriberKey']
self._pubnub = PubNub(pnconf)
subscription = self
class SubscribeCallbackImpl(SubscribeCallback):
def presence(self, pubnub, presence):
pass # handle incoming presence data
def status(self, pubnub, status):
if status.category == PNStatusCategory.PNUnexpectedDisconnectCategory:
subscription.trigger(Events.connectionError, 'Connectivity loss')
pass
def message(self, pubnub, pnmessage): # instance of PNMessageResult
subscription._notify(pnmessage.message)
self._pubnub.add_listener(SubscribeCallbackImpl())
self._pubnub.subscribe().channels(self._subscription['deliveryMode']['address']).execute()
def _notify(self, message):
message = self._decrypt(message)
self.trigger(Events.notification, message)
def _decrypt(self, message):
if not self.alive():
raise Exception('Subscription is not alive')
from Crypto.Cipher import AES
delivery_mode = self._subscription['deliveryMode']
is_encrypted = ('encryption' in delivery_mode) and ('encryptionKey' in delivery_mode)
if is_encrypted:
key = base64.b64decode(self._subscription['deliveryMode']['encryptionKey'])
data = base64.b64decode(message)
cipher = AES.new(key, AES.MODE_ECB)
decrypted = clean_decrypted(tostr(cipher.decrypt(data)))
message = stripped(decrypted)
message = json.loads(decrypted)
return message
def _unsubscribe_at_pubnub(self):
if self._pubnub and self.alive():
self._pubnub.unsubscribe().channels(self._subscription['deliveryMode']['address']).execute()
def _get_full_events_filter(self):
return [self._platform.create_url(e) for e in self._event_filters]
def _set_timeout(self):
time_to_expiration = self._subscription['expiresIn'] - RENEW_HANDICAP
self._timeout = Timer(time_to_expiration, self.renew)
self._timeout.start()
def _clear_timeout(self):
if self._timeout:
self._timeout.cancel()
if __name__ == '__main__':
pass
| |
#!/usr/bin/env python
# Copyright (c) 2013 Shane Quigley, < shane at softwareontheside.info >
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import sys
PY3 = sys.version_info[0] == 3
class PdfParser:
def __init__(self, file_name):
self.file_name = file_name
f = open(file_name, 'rb')
self.encrypted = f.read()
f.close()
self.process = True
psr = re.compile(b'PDF-\d\.\d')
try:
self.pdf_spec = psr.findall(self.encrypted)[0]
except IndexError:
sys.stderr.write("%s is not a PDF file!\n" % file_name)
self.process = False
def parse(self):
if not self.process:
return
try:
trailer = self.get_trailer()
except RuntimeError:
e = sys.exc_info()[1]
sys.stdout.write("%s : %s\n" % (self.file_name, str(e)))
return
object_id = self.get_encrypted_object_id(trailer)
encryption_dictionary = self.get_encryption_dictionary(object_id)
dr = re.compile(b'\d+')
vr = re.compile(b'\/V \d')
rr = re.compile(b'\/R \d')
v = dr.findall(vr.findall(encryption_dictionary)[0])[0]
r = dr.findall(rr.findall(encryption_dictionary)[0])[0]
lr = re.compile(b'\/Length \d+')
longest = 0
length = ''
for le in lr.findall(encryption_dictionary):
if(int(dr.findall(le)[0]) > longest):
longest = int(dr.findall(le)[0])
length = dr.findall(le)[0]
pr = re.compile(b'\/P -\d+')
p = pr.findall(encryption_dictionary)[0]
pr = re.compile(b'-\d+')
p = pr.findall(p)[0]
meta = self.is_meta_data_encrypted(encryption_dictionary)
idr = re.compile(b'\/ID\s*\[\s*<\w+>\s*<\w+>\s*\]')
try:
i_d = idr.findall(trailer)[0] # id key word
except IndexError:
# some pdf files use () instead of <>
idr = re.compile(b'\/ID\s*\[\s*\(\w+\)\s*\(\w+\)\s*\]')
i_d = idr.findall(trailer)[0] # id key word
idr = re.compile(b'<\w+>')
try:
i_d = idr.findall(trailer)[0]
except IndexError:
idr = re.compile(b'\(\w+\)')
i_d = idr.findall(trailer)[0]
i_d = i_d.replace(b'<',b'')
i_d = i_d.replace(b'>',b'')
i_d = i_d.lower()
passwords = self.get_passwords_for_JtR(encryption_dictionary)
output = self.file_name+':$pdf$'+v.decode('ascii')+'*'+r.decode('ascii')+'*'+length.decode('ascii')+'*'
output += p.decode('ascii')+'*'+meta+'*'
output += str(int(len(i_d)/2))+'*'+i_d.decode('ascii')+'*'+passwords
sys.stdout.write("%s\n" % output)
def get_passwords_for_JtR(self, encryption_dictionary):
output = ""
letters = [b"U", b"O"]
if(b"1.7" in self.pdf_spec):
letters = [b"U", b"O", b"UE", b"OE"]
for let in letters:
pr_str = b'\/' + let + b'\s*\([^)]+\)'
pr = re.compile(pr_str)
pas = pr.findall(encryption_dictionary)
if(len(pas) > 0):
pas = pr.findall(encryption_dictionary)[0]
#Because regexs in python suck
while(pas[-2] == b'\\'):
pr_str += b'[^)]+\)'
pr = re.compile(pr_str)
pas = pr.findall(encryption_dictionary)[0]
output += self.get_password_from_byte_string(pas)+"*"
else:
pr = re.compile(let + b'\s*<\w+>')
pas = pr.findall(encryption_dictionary)[0]
pr = re.compile(b'<\w+>')
pas = pr.findall(pas)[0]
pas = pas.replace(b"<",b"")
pas = pas.replace(b">",b"")
if PY3:
output += str(int(len(pas)/2))+'*'+str(pas.lower(),'ascii')+'*'
else:
output += str(int(len(pas)/2))+'*'+pas.lower()+'*'
return output[:-1]
def is_meta_data_encrypted(self, encryption_dictionary):
mr = re.compile(b'\/EncryptMetadata\s\w+')
if(len(mr.findall(encryption_dictionary)) > 0):
wr = re.compile(b'\w+')
is_encrypted = wr.findall(mr.findall(encryption_dictionary)[0])[-1]
if(is_encrypted == "false"):
return "0"
else:
return "1"
else:
return "1"
def get_encryption_dictionary(self, object_id):
encryption_dictionary = \
self.get_data_between(object_id+b" obj", b"endobj")
for o in encryption_dictionary.split(b"endobj"):
if(object_id+b" obj" in o):
encryption_dictionary = o
return encryption_dictionary
def get_encrypted_object_id(self, trailer):
oir = re.compile(b'\/Encrypt\s\d+\s\d\sR')
object_id = oir.findall(trailer)[0]
oir = re.compile(b'\d+ \d')
object_id = oir.findall(object_id)[0]
return object_id
def get_trailer(self):
trailer = self.get_data_between(b"trailer", b">>")
if(trailer == b""):
trailer = self.get_data_between(b"DecodeParms", b"stream")
if(trailer == ""):
raise RuntimeError("Can't find trailer")
if(trailer != "" and trailer.find(b"Encrypt") == -1):
print(trailer)
raise RuntimeError("File not encrypted")
return trailer
def get_data_between(self, s1, s2):
output = b""
inside_first = False
lines = self.encrypted.split(b'\n')
for line in lines:
inside_first = inside_first or line.find(s1) != -1
if(inside_first):
output += line
if(line.find(s2) != -1):
break
return output
def get_hex_byte(self, o_or_u, i):
if PY3:
return hex(o_or_u[i]).replace('0x', '')
else:
return hex(ord(o_or_u[i])).replace('0x', '')
def get_password_from_byte_string(self, o_or_u):
pas = ""
escape_seq = False
escapes = 0
excluded_indexes = [0, 1, 2]
#For UE & OE in 1.7 spec
if not PY3:
if(o_or_u[2] != '('):
excluded_indexes.append(3)
else:
if(o_or_u[2] != 40):
excluded_indexes.append(3)
for i in range(len(o_or_u)):
if(i not in excluded_indexes):
if(len(self.get_hex_byte(o_or_u, i)) == 1 \
and o_or_u[i] != "\\"[0]):
pas += "0" # need to be 2 digit hex numbers
is_back_slash = True
if not PY3:
is_back_slash = o_or_u[i] != "\\"[0]
else:
is_back_slash = o_or_u[i] != 92
if(is_back_slash or escape_seq):
if(escape_seq):
if not PY3:
esc = "\\"+o_or_u[i]
else:
esc = "\\"+chr(o_or_u[i])
esc = self.unescape(esc)
if(len(hex(ord(esc[0])).replace('0x', '')) == 1):
pas += "0"
pas += hex(ord(esc[0])).replace('0x', '')
escape_seq = False
else:
pas += self.get_hex_byte(o_or_u, i)
else:
escape_seq = True
escapes += 1
output = len(o_or_u)-(len(excluded_indexes)+1)-escapes
return str(output)+'*'+pas[:-2]
def unescape(self, esc):
escape_seq_map = {'\\n':"\n", '\\s':"\s", '\\e':"\e",
'\\r':"\r", '\\t':"\t", '\\v':"\v", '\\f':"\f",
'\\b':"\b", '\\a':"\a", "\\)":")",
"\\(":"(", "\\\\":"\\" }
return escape_seq_map[esc]
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s <PDF file(s)>\n" % \
sys.argv[0])
sys.exit(-1)
for j in range(1, len(sys.argv)):
# sys.stderr.write("Analyzing %s\n" % sys.argv[j])
parser = PdfParser(sys.argv[j])
parser.parse()
| |
import shutil
import sys
from pathlib import Path
import pytest
from porcupine import get_tab_manager
from porcupine.plugins import directory_tree as plugin_module
from porcupine.plugins.directory_tree import _focus_treeview, _stringify_path, get_path
def test_adding_nested_projects(tree, tmp_path):
def get_paths():
return [get_path(project) for project in tree.get_children()]
(tmp_path / "a" / "b").mkdir(parents=True)
assert get_paths() == []
tree.add_project(tmp_path / "a")
assert get_paths() == [tmp_path / "a"]
tree.add_project(tmp_path / "a" / "b")
assert get_paths() == [tmp_path / "a" / "b", tmp_path / "a"]
tree.add_project(tmp_path)
assert get_paths() == [tmp_path, tmp_path / "a" / "b", tmp_path / "a"]
@pytest.mark.skipif(sys.platform == "win32", reason="rmtree can magically fail on windows")
def test_deleting_project(tree, tmp_path, tabmanager):
def get_project_names():
return [get_path(project).name for project in tree.get_children()]
(tmp_path / "a").mkdir(parents=True)
(tmp_path / "b").mkdir(parents=True)
(tmp_path / "a" / "README").touch()
(tmp_path / "b" / "README").touch()
a_tab = tabmanager.open_file(tmp_path / "a" / "README")
assert get_project_names() == ["a"]
tabmanager.close_tab(a_tab)
shutil.rmtree(tmp_path / "a")
tabmanager.open_file(tmp_path / "b" / "README")
assert get_project_names() == ["b"]
def test_autoclose(tree, tmp_path, tabmanager, monkeypatch):
def get_project_names():
return [get_path(project).name for project in tree.get_children()]
(tmp_path / "a").mkdir(parents=True)
(tmp_path / "b").mkdir(parents=True)
(tmp_path / "c").mkdir(parents=True)
(tmp_path / "a" / "README").touch()
(tmp_path / "b" / "README").touch()
(tmp_path / "c" / "README").touch()
monkeypatch.setattr(plugin_module, "_MAX_PROJECTS", 2)
assert get_project_names() == []
a_tab = tabmanager.open_file(tmp_path / "a" / "README")
assert get_project_names() == ["a"]
b_tab = tabmanager.open_file(tmp_path / "b" / "README")
assert get_project_names() == ["b", "a"]
c_tab = tabmanager.open_file(tmp_path / "c" / "README")
assert get_project_names() == ["c", "b", "a"]
tabmanager.close_tab(b_tab)
assert get_project_names() == ["c", "a"]
tabmanager.close_tab(c_tab)
assert get_project_names() == ["c", "a"]
tabmanager.close_tab(a_tab)
assert get_project_names() == ["c", "a"]
def open_as_if_user_clicked(tree, item):
tree.selection_set(item)
tree.item(item, open=True)
tree.event_generate("<<TreeviewOpen>>")
tree.update()
def test_select_file(tree, tmp_path, tabmanager):
(tmp_path / "a").mkdir(parents=True)
(tmp_path / "b").mkdir(parents=True)
(tmp_path / "a" / "README").touch()
(tmp_path / "b" / "README").touch()
(tmp_path / "b" / "file1").touch()
(tmp_path / "b" / "file2").touch()
a_readme = tabmanager.open_file(tmp_path / "a" / "README")
b_file1 = tabmanager.open_file(tmp_path / "b" / "file1")
b_file2 = tabmanager.open_file(tmp_path / "b" / "file2")
tree.update()
tabmanager.select(a_readme)
tree.update()
assert get_path(tree.selection()[0]) == tmp_path / "a"
tabmanager.select(b_file1)
tree.update()
assert get_path(tree.selection()[0]) == tmp_path / "b"
open_as_if_user_clicked(tree, tree.selection()[0])
tabmanager.select(b_file1)
tree.update()
assert get_path(tree.selection()[0]) == tmp_path / "b" / "file1"
tabmanager.select(b_file2)
tree.update()
assert get_path(tree.selection()[0]) == tmp_path / "b" / "file2"
b_file2.save_as(tmp_path / "b" / "file3")
tree.update()
assert get_path(tree.selection()[0]) == tmp_path / "b" / "file3"
tabmanager.close_tab(a_readme)
tabmanager.close_tab(b_file1)
tabmanager.close_tab(b_file2)
def test_focusing_treeview_with_keyboard_updates_selection(tree, tmp_path):
(tmp_path / "README").touch()
(tmp_path / "hello.py").touch()
tree.add_project(tmp_path, refresh=False)
_focus_treeview(tree)
assert tree.selection()
def test_all_files_deleted(tree, tmp_path, tabmanager):
(tmp_path / "README").touch()
(tmp_path / "hello.py").touch()
tree.add_project(tmp_path)
project_id = tree.get_children()[0]
tree.selection_set(project_id)
# Simulate user opening selected item
tree.item(tree.selection()[0], open=True)
tree.event_generate("<<TreeviewOpen>>")
tree.update()
assert len(tree.get_children(project_id)) == 2
(tmp_path / "README").unlink()
(tmp_path / "hello.py").unlink()
get_tab_manager().event_generate("<<FileSystemChanged>>")
assert tree.contains_dummy(project_id)
def test_nested_projects(tree, tmp_path, tabmanager):
(tmp_path / "README").touch()
(tmp_path / "subdir").mkdir()
(tmp_path / "subdir" / "README").touch()
tree.add_project(tmp_path)
[outer_project_id] = [id for id in tree.get_children("") if get_path(id) == tmp_path]
open_as_if_user_clicked(tree, outer_project_id)
[subdir_inside_other_project] = [
item_id
for item_id in tree.get_children(outer_project_id)
if get_path(item_id) == tmp_path / "subdir"
]
open_as_if_user_clicked(tree, subdir_inside_other_project)
assert not tree.contains_dummy(subdir_inside_other_project)
tree.add_project(tmp_path / "subdir")
assert tree.contains_dummy(subdir_inside_other_project)
dummy_id = tree.get_children(subdir_inside_other_project)[0]
assert tree.item(dummy_id, "text") == "(open as a separate project)"
[subdir_id] = [id for id in tree.get_children("") if get_path(id) == tmp_path / "subdir"]
tree.select_file(tmp_path / "subdir" / "README")
assert tree.selection() == (subdir_id,)
open_as_if_user_clicked(tree, subdir_id)
tree.select_file(tmp_path / "subdir" / "README")
assert get_path(tree.selection()[0]) == tmp_path / "subdir" / "README"
def test_home_folder_displaying():
assert _stringify_path(Path.home()) == "~"
assert _stringify_path(Path.home() / "lol") in ["~/lol", r"~\lol"]
assert "~" not in _stringify_path(Path.home().parent / "asdfggg")
def test_cycling_through_items(tree, tmp_path, tabmanager):
(tmp_path / "README").touch()
(tmp_path / "foo.txt").touch()
(tmp_path / "bar.txt").touch()
(tmp_path / "baz.txt").touch()
tree.add_project(tmp_path)
[project_id] = [id for id in tree.get_children("") if get_path(id) == tmp_path]
open_as_if_user_clicked(tree, project_id)
open_as_if_user_clicked(tree, tree.get_children(project_id)[0])
tree.update()
tree.focus_force()
tree.event_generate("f")
assert get_path(tree.selection()[0]) == tmp_path / "foo.txt"
tree.event_generate("b")
assert get_path(tree.selection()[0]) == tmp_path / "bar.txt"
tree.event_generate("b")
assert get_path(tree.selection()[0]) == tmp_path / "baz.txt"
tree.event_generate("b")
assert get_path(tree.selection()[0]) == tmp_path / "bar.txt"
tree.event_generate("R")
assert get_path(tree.selection()[0]) == tmp_path / "README"
tree.event_generate("R")
assert get_path(tree.selection()[0]) == tmp_path / "README"
tree.event_generate("x")
assert get_path(tree.selection()[0]) == tmp_path / "README"
def test_empty_directory_refreshing(tree, tmp_path):
new_dir_path = tmp_path / "a"
new_dir_path.mkdir()
tree.add_project(tmp_path)
[project_id] = [id for id in tree.get_children("") if get_path(id) == tmp_path]
open_as_if_user_clicked(tree, project_id)
new_dir_id = tree.get_id_from_path(new_dir_path, project_id)
open_as_if_user_clicked(tree, new_dir_id)
new_file_path = new_dir_path / "foo.txt"
new_file_path.touch()
tree.refresh()
new_dir_children = tree.get_children(new_dir_id)
assert len(new_dir_children) == 1 and new_dir_children[0] == tree.get_id_from_path(
new_file_path, project_id
)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import units
import six
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow.types import failure as ft
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LW
from cinder import objects
from cinder.objects import fields
from cinder import policy
from cinder import quota
from cinder import quota_utils
from cinder import utils
from cinder.volume.flows import common
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
ACTION = 'volume:create'
CONF = cfg.CONF
GB = units.Gi
QUOTAS = quota.QUOTAS
# Only in these 'sources' status can we attempt to create a volume from a
# source volume or a source snapshot, other status states we can not create
# from, 'error' being the common example.
SNAPSHOT_PROCEED_STATUS = (fields.SnapshotStatus.AVAILABLE,)
SRC_VOL_PROCEED_STATUS = ('available', 'in-use',)
REPLICA_PROCEED_STATUS = ('active', 'active-stopped',)
CG_PROCEED_STATUS = ('available', 'creating',)
CGSNAPSHOT_PROCEED_STATUS = ('available',)
GROUP_PROCEED_STATUS = ('available', 'creating',)
class ExtractVolumeRequestTask(flow_utils.CinderTask):
"""Processes an api request values into a validated set of values.
This tasks responsibility is to take in a set of inputs that will form
a potential volume request and validates those values against a set of
conditions and/or translates those values into a valid set and then returns
the validated/translated values for use by other tasks.
Reversion strategy: N/A
"""
# This task will produce the following outputs (said outputs can be
# saved to durable storage in the future so that the flow can be
# reconstructed elsewhere and continued).
default_provides = set(['availability_zone', 'size', 'snapshot_id',
'source_volid', 'volume_type', 'volume_type_id',
'encryption_key_id', 'source_replicaid',
'consistencygroup_id', 'cgsnapshot_id',
'qos_specs', 'group_id'])
def __init__(self, image_service, availability_zones, **kwargs):
super(ExtractVolumeRequestTask, self).__init__(addons=[ACTION],
**kwargs)
self.image_service = image_service
self.availability_zones = availability_zones
@staticmethod
def _extract_resource(resource, allowed_vals, exc, resource_name,
props=('status',)):
"""Extracts the resource id from the provided resource.
This method validates the input resource dict and checks that the
properties which names are passed in `props` argument match
corresponding lists in `allowed` argument. In case of mismatch
exception of type exc is raised.
:param resource: Resource dict.
:param allowed_vals: Tuple of allowed values lists.
:param exc: Exception type to raise.
:param resource_name: Name of resource - used to construct log message.
:param props: Tuple of resource properties names to validate.
:return: Id of a resource.
"""
resource_id = None
if resource:
for prop, allowed_states in zip(props, allowed_vals):
if resource[prop] not in allowed_states:
msg = _("Originating %(res)s %(prop)s must be one of "
"'%(vals)s' values")
msg = msg % {'res': resource_name,
'prop': prop,
'vals': ', '.join(allowed_states)}
# TODO(harlowja): what happens if the status changes after
# this initial resource status check occurs??? Seems like
# someone could delete the resource after this check passes
# but before the volume is officially created?
raise exc(reason=msg)
resource_id = resource['id']
return resource_id
def _extract_consistencygroup(self, consistencygroup):
return self._extract_resource(consistencygroup, (CG_PROCEED_STATUS,),
exception.InvalidConsistencyGroup,
'consistencygroup')
def _extract_group(self, group):
return self._extract_resource(group, (GROUP_PROCEED_STATUS,),
exception.InvalidGroup,
'group')
def _extract_cgsnapshot(self, cgsnapshot):
return self._extract_resource(cgsnapshot, (CGSNAPSHOT_PROCEED_STATUS,),
exception.InvalidCgSnapshot,
'CGSNAPSHOT')
def _extract_snapshot(self, snapshot):
return self._extract_resource(snapshot, (SNAPSHOT_PROCEED_STATUS,),
exception.InvalidSnapshot, 'snapshot')
def _extract_source_volume(self, source_volume):
return self._extract_resource(source_volume, (SRC_VOL_PROCEED_STATUS,),
exception.InvalidVolume, 'source volume')
def _extract_source_replica(self, source_replica):
return self._extract_resource(source_replica, (SRC_VOL_PROCEED_STATUS,
REPLICA_PROCEED_STATUS),
exception.InvalidVolume,
'replica', ('status',
'replication_status'))
@staticmethod
def _extract_size(size, source_volume, snapshot):
"""Extracts and validates the volume size.
This function will validate or when not provided fill in the provided
size variable from the source_volume or snapshot and then does
validation on the size that is found and returns said validated size.
"""
def validate_snap_size(size):
if snapshot and size < snapshot.volume_size:
msg = _("Volume size '%(size)s'GB cannot be smaller than"
" the snapshot size %(snap_size)sGB. "
"They must be >= original snapshot size.")
msg = msg % {'size': size,
'snap_size': snapshot.volume_size}
raise exception.InvalidInput(reason=msg)
def validate_source_size(size):
if source_volume and size < source_volume['size']:
msg = _("Volume size '%(size)s'GB cannot be smaller than "
"original volume size %(source_size)sGB. "
"They must be >= original volume size.")
msg = msg % {'size': size,
'source_size': source_volume['size']}
raise exception.InvalidInput(reason=msg)
def validate_int(size):
if not isinstance(size, six.integer_types) or size <= 0:
msg = _("Volume size '%(size)s' must be an integer and"
" greater than 0") % {'size': size}
raise exception.InvalidInput(reason=msg)
# Figure out which validation functions we should be applying
# on the size value that we extract.
validator_functors = [validate_int]
if source_volume:
validator_functors.append(validate_source_size)
elif snapshot:
validator_functors.append(validate_snap_size)
# If the size is not provided then try to provide it.
if not size and source_volume:
size = source_volume['size']
elif not size and snapshot:
size = snapshot.volume_size
size = utils.as_int(size)
LOG.debug("Validating volume '%(size)s' using %(functors)s" %
{'size': size,
'functors': ", ".join([common.make_pretty_name(func)
for func in validator_functors])})
for func in validator_functors:
func(size)
return size
def _check_image_metadata(self, context, image_id, size):
"""Checks image existence and validates that the image metadata."""
# Check image existence
if image_id is None:
return
# NOTE(harlowja): this should raise an error if the image does not
# exist, this is expected as it signals that the image_id is missing.
image_meta = self.image_service.show(context, image_id)
# check whether image is active
if image_meta['status'] != 'active':
msg = _('Image %(image_id)s is not active.')\
% {'image_id': image_id}
raise exception.InvalidInput(reason=msg)
# Check image size is not larger than volume size.
image_size = utils.as_int(image_meta['size'], quiet=False)
image_size_in_gb = (image_size + GB - 1) // GB
if image_size_in_gb > size:
msg = _('Size of specified image %(image_size)sGB'
' is larger than volume size %(volume_size)sGB.')
msg = msg % {'image_size': image_size_in_gb, 'volume_size': size}
raise exception.InvalidInput(reason=msg)
# Check image min_disk requirement is met for the particular volume
min_disk = image_meta.get('min_disk', 0)
if size < min_disk:
msg = _('Volume size %(volume_size)sGB cannot be smaller'
' than the image minDisk size %(min_disk)sGB.')
msg = msg % {'volume_size': size, 'min_disk': min_disk}
raise exception.InvalidInput(reason=msg)
def _get_image_volume_type(self, context, image_id):
"""Get cinder_img_volume_type property from the image metadata."""
# Check image existence
if image_id is None:
return None
image_meta = self.image_service.show(context, image_id)
# check whether image is active
if image_meta['status'] != 'active':
msg = (_('Image %(image_id)s is not active.') %
{'image_id': image_id})
raise exception.InvalidInput(reason=msg)
# Retrieve 'cinder_img_volume_type' property from glance image
# metadata.
image_volume_type = "cinder_img_volume_type"
properties = image_meta.get('properties')
if properties:
try:
img_vol_type = properties.get(image_volume_type)
if img_vol_type is None:
return None
volume_type = volume_types.get_volume_type_by_name(
context,
img_vol_type)
except exception.VolumeTypeNotFoundByName:
LOG.warning(_LW("Failed to retrieve volume_type from image "
"metadata. '%(img_vol_type)s' doesn't match "
"any volume types."),
{'img_vol_type': img_vol_type})
return None
LOG.debug("Retrieved volume_type from glance image metadata. "
"image_id: %(image_id)s, "
"image property: %(image_volume_type)s, "
"volume_type: %(volume_type)s." %
{'image_id': image_id,
'image_volume_type': image_volume_type,
'volume_type': volume_type})
return volume_type
def _extract_availability_zone(self, availability_zone, snapshot,
source_volume, group):
"""Extracts and returns a validated availability zone.
This function will extract the availability zone (if not provided) from
the snapshot or source_volume and then performs a set of validation
checks on the provided or extracted availability zone and then returns
the validated availability zone.
"""
# If the volume will be created in a group, it should be placed in
# in same availability zone as the group.
if group:
try:
availability_zone = group['availability_zone']
except (TypeError, KeyError):
pass
# Try to extract the availability zone from the corresponding snapshot
# or source volume if either is valid so that we can be in the same
# availability zone as the source.
if availability_zone is None:
if snapshot:
try:
availability_zone = snapshot['volume']['availability_zone']
except (TypeError, KeyError):
pass
if source_volume and availability_zone is None:
try:
availability_zone = source_volume['availability_zone']
except (TypeError, KeyError):
pass
if availability_zone is None:
if CONF.default_availability_zone:
availability_zone = CONF.default_availability_zone
else:
# For backwards compatibility use the storage_availability_zone
availability_zone = CONF.storage_availability_zone
if availability_zone not in self.availability_zones:
if CONF.allow_availability_zone_fallback:
original_az = availability_zone
availability_zone = (
CONF.default_availability_zone or
CONF.storage_availability_zone)
LOG.warning(_LW("Availability zone '%(s_az)s' "
"not found, falling back to "
"'%(s_fallback_az)s'."),
{'s_az': original_az,
's_fallback_az': availability_zone})
else:
msg = _("Availability zone '%(s_az)s' is invalid.")
msg = msg % {'s_az': availability_zone}
raise exception.InvalidInput(reason=msg)
# If the configuration only allows cloning to the same availability
# zone then we need to enforce that.
if CONF.cloned_volume_same_az:
snap_az = None
try:
snap_az = snapshot['volume']['availability_zone']
except (TypeError, KeyError):
pass
if snap_az and snap_az != availability_zone:
msg = _("Volume must be in the same "
"availability zone as the snapshot")
raise exception.InvalidInput(reason=msg)
source_vol_az = None
try:
source_vol_az = source_volume['availability_zone']
except (TypeError, KeyError):
pass
if source_vol_az and source_vol_az != availability_zone:
msg = _("Volume must be in the same "
"availability zone as the source volume")
raise exception.InvalidInput(reason=msg)
return availability_zone
def _get_encryption_key_id(self, key_manager, context, volume_type_id,
snapshot, source_volume):
encryption_key_id = None
if volume_types.is_encrypted(context, volume_type_id):
if snapshot is not None: # creating from snapshot
encryption_key_id = snapshot['encryption_key_id']
elif source_volume is not None: # cloning volume
encryption_key_id = source_volume['encryption_key_id']
# NOTE(joel-coffman): References to the encryption key should *not*
# be copied because the key is deleted when the volume is deleted.
# Clone the existing key and associate a separate -- but
# identical -- key with each volume.
if encryption_key_id is not None:
encryption_key_id = key_manager.store(
context, key_manager.get(context, encryption_key_id))
else:
volume_type_encryption = (
volume_types.get_volume_type_encryption(context,
volume_type_id))
cipher = volume_type_encryption.cipher
length = volume_type_encryption.key_size
# NOTE(kaitlin-farr): dm-crypt expects the cipher in a
# hyphenated format (aes-xts-plain64). The algorithm needs
# to be parsed out to pass to the key manager (aes).
algorithm = cipher.split('-')[0] if cipher else None
encryption_key_id = key_manager.create_key(context,
algorithm=algorithm,
length=length)
return encryption_key_id
def _get_volume_type_id(self, volume_type, source_volume, snapshot):
if not volume_type and source_volume:
return source_volume['volume_type_id']
elif snapshot is not None:
if volume_type:
current_volume_type_id = volume_type.get('id')
if current_volume_type_id != snapshot['volume_type_id']:
msg = _LW("Volume type will be changed to "
"be the same as the source volume.")
LOG.warning(msg)
return snapshot['volume_type_id']
else:
return volume_type.get('id')
def execute(self, context, size, snapshot, image_id, source_volume,
availability_zone, volume_type, metadata, key_manager,
source_replica, consistencygroup, cgsnapshot, group):
utils.check_exclusive_options(snapshot=snapshot,
imageRef=image_id,
source_volume=source_volume)
policy.enforce_action(context, ACTION)
# TODO(harlowja): what guarantee is there that the snapshot or source
# volume will remain available after we do this initial verification??
snapshot_id = self._extract_snapshot(snapshot)
source_volid = self._extract_source_volume(source_volume)
source_replicaid = self._extract_source_replica(source_replica)
size = self._extract_size(size, source_volume, snapshot)
consistencygroup_id = self._extract_consistencygroup(consistencygroup)
cgsnapshot_id = self._extract_cgsnapshot(cgsnapshot)
group_id = self._extract_group(group)
self._check_image_metadata(context, image_id, size)
availability_zone = self._extract_availability_zone(availability_zone,
snapshot,
source_volume,
group)
# TODO(joel-coffman): This special handling of snapshots to ensure that
# their volume type matches the source volume is too convoluted. We
# should copy encryption metadata from the encrypted volume type to the
# volume upon creation and propagate that information to each snapshot.
# This strategy avoids any dependency upon the encrypted volume type.
def_vol_type = volume_types.get_default_volume_type()
if not volume_type and not source_volume and not snapshot:
image_volume_type = self._get_image_volume_type(context, image_id)
volume_type = (image_volume_type if image_volume_type else
def_vol_type)
# When creating a clone of a replica (replication test), we can't
# use the volume type of the replica, therefore, we use the default.
# NOTE(ronenkat): this assumes the default type is not replicated.
if source_replicaid:
volume_type = def_vol_type
volume_type_id = self._get_volume_type_id(volume_type,
source_volume, snapshot)
encryption_key_id = self._get_encryption_key_id(key_manager,
context,
volume_type_id,
snapshot,
source_volume)
specs = {}
if volume_type_id:
qos_specs = volume_types.get_volume_type_qos_specs(volume_type_id)
if qos_specs['qos_specs']:
specs = qos_specs['qos_specs'].get('specs', {})
# Determine default replication status
extra_specs = volume_types.get_volume_type_extra_specs(
volume_type_id)
if not specs:
# to make sure we don't pass empty dict
specs = None
extra_specs = None
if vol_utils.is_replicated_spec(extra_specs):
replication_status = fields.ReplicationStatus.ENABLED
else:
replication_status = fields.ReplicationStatus.DISABLED
return {
'size': size,
'snapshot_id': snapshot_id,
'source_volid': source_volid,
'availability_zone': availability_zone,
'volume_type': volume_type,
'volume_type_id': volume_type_id,
'encryption_key_id': encryption_key_id,
'qos_specs': specs,
'source_replicaid': source_replicaid,
'consistencygroup_id': consistencygroup_id,
'cgsnapshot_id': cgsnapshot_id,
'group_id': group_id,
'replication_status': replication_status,
}
class EntryCreateTask(flow_utils.CinderTask):
"""Creates an entry for the given volume creation in the database.
Reversion strategy: remove the volume_id created from the database.
"""
default_provides = set(['volume_properties', 'volume_id', 'volume'])
def __init__(self):
requires = ['availability_zone', 'description', 'metadata',
'name', 'reservations', 'size', 'snapshot_id',
'source_volid', 'volume_type_id', 'encryption_key_id',
'source_replicaid', 'consistencygroup_id',
'cgsnapshot_id', 'multiattach', 'qos_specs',
'group_id', ]
super(EntryCreateTask, self).__init__(addons=[ACTION],
requires=requires)
def execute(self, context, optional_args, **kwargs):
"""Creates a database entry for the given inputs and returns details.
Accesses the database and creates a new entry for the to be created
volume using the given volume properties which are extracted from the
input kwargs (and associated requirements this task needs). These
requirements should be previously satisfied and validated by a
pre-cursor task.
"""
src_volid = kwargs.get('source_volid')
src_vol = None
if src_volid is not None:
src_vol = objects.Volume.get_by_id(context, src_volid)
bootable = False
if src_vol is not None:
bootable = src_vol.bootable
volume_properties = {
'size': kwargs.pop('size'),
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': fields.VolumeAttachStatus.DETACHED,
'encryption_key_id': kwargs.pop('encryption_key_id'),
# Rename these to the internal name.
'display_description': kwargs.pop('description'),
'display_name': kwargs.pop('name'),
'multiattach': kwargs.pop('multiattach'),
'bootable': bootable,
}
# Merge in the other required arguments which should provide the rest
# of the volume property fields (if applicable).
volume_properties.update(kwargs)
volume = objects.Volume(context=context, **volume_properties)
volume.create()
# FIXME(dulek): We're passing this volume_properties dict through RPC
# in request_spec. This shouldn't be needed, most data is replicated
# in both volume and other places. We should make Newton read data
# from just one correct place and leave just compatibility code.
#
# Right now - let's move it to versioned objects to be able to make
# non-backward compatible changes.
volume_properties = objects.VolumeProperties(**volume_properties)
return {
'volume_id': volume['id'],
'volume_properties': volume_properties,
# NOTE(harlowja): it appears like further usage of this volume
# result actually depend on it being a sqlalchemy object and not
# just a plain dictionary so that's why we are storing this here.
#
# In the future where this task results can be serialized and
# restored automatically for continued running we will need to
# resolve the serialization & recreation of this object since raw
# sqlalchemy objects can't be serialized.
'volume': volume,
}
def revert(self, context, result, optional_args, **kwargs):
if isinstance(result, ft.Failure):
# We never produced a result and therefore can't destroy anything.
return
if optional_args['is_quota_committed']:
# If quota got committed we shouldn't rollback as the volume has
# already been created and the quota has already been absorbed.
return
volume = result['volume']
try:
volume.destroy()
except exception.CinderException:
# We are already reverting, therefore we should silence this
# exception since a second exception being active will be bad.
#
# NOTE(harlowja): Being unable to destroy a volume is pretty
# bad though!!
LOG.exception(_LE("Failed destroying volume entry %s"), volume.id)
class QuotaReserveTask(flow_utils.CinderTask):
"""Reserves a single volume with the given size & the given volume type.
Reversion strategy: rollback the quota reservation.
Warning Warning: if the process that is running this reserve and commit
process fails (or is killed before the quota is rolled back or committed
it does appear like the quota will never be rolled back). This makes
software upgrades hard (inflight operations will need to be stopped or
allowed to complete before the upgrade can occur). *In the future* when
taskflow has persistence built-in this should be easier to correct via
an automated or manual process.
"""
default_provides = set(['reservations'])
def __init__(self):
super(QuotaReserveTask, self).__init__(addons=[ACTION])
def execute(self, context, size, volume_type_id, optional_args):
try:
values = {'per_volume_gigabytes': size}
QUOTAS.limit_check(context, project_id=context.project_id,
**values)
except exception.OverQuota as e:
quotas = e.kwargs['quotas']
raise exception.VolumeSizeExceedsLimit(
size=size, limit=quotas['per_volume_gigabytes'])
try:
reserve_opts = {'volumes': 1, 'gigabytes': size}
QUOTAS.add_volume_type_opts(context, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(context, **reserve_opts)
return {
'reservations': reservations,
}
except exception.OverQuota as e:
quota_utils.process_reserve_over_quota(context, e,
resource='volumes',
size=size)
def revert(self, context, result, optional_args, **kwargs):
# We never produced a result and therefore can't destroy anything.
if isinstance(result, ft.Failure):
return
if optional_args['is_quota_committed']:
# The reservations have already been committed and can not be
# rolled back at this point.
return
# We actually produced an output that we can revert so lets attempt
# to use said output to rollback the reservation.
reservations = result['reservations']
try:
QUOTAS.rollback(context, reservations)
except exception.CinderException:
# We are already reverting, therefore we should silence this
# exception since a second exception being active will be bad.
LOG.exception(_LE("Failed rolling back quota for"
" %s reservations"), reservations)
class QuotaCommitTask(flow_utils.CinderTask):
"""Commits the reservation.
Reversion strategy: N/A (the rollback will be handled by the task that did
the initial reservation (see: QuotaReserveTask).
Warning Warning: if the process that is running this reserve and commit
process fails (or is killed before the quota is rolled back or committed
it does appear like the quota will never be rolled back). This makes
software upgrades hard (inflight operations will need to be stopped or
allowed to complete before the upgrade can occur). *In the future* when
taskflow has persistence built-in this should be easier to correct via
an automated or manual process.
"""
def __init__(self):
super(QuotaCommitTask, self).__init__(addons=[ACTION])
def execute(self, context, reservations, volume_properties,
optional_args):
QUOTAS.commit(context, reservations)
# updating is_quota_committed attribute of optional_args dictionary
optional_args['is_quota_committed'] = True
return {'volume_properties': volume_properties}
def revert(self, context, result, **kwargs):
# We never produced a result and therefore can't destroy anything.
if isinstance(result, ft.Failure):
return
volume = result['volume_properties']
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume['volume_type_id'])
reservations = QUOTAS.reserve(context,
project_id=context.project_id,
**reserve_opts)
if reservations:
QUOTAS.commit(context, reservations,
project_id=context.project_id)
except Exception:
LOG.exception(_LE("Failed to update quota for deleting "
"volume: %s"), volume['id'])
class VolumeCastTask(flow_utils.CinderTask):
"""Performs a volume create cast to the scheduler or to the volume manager.
This will signal a transition of the api workflow to another child and/or
related workflow on another component.
Reversion strategy: rollback source volume status and error out newly
created volume.
"""
def __init__(self, scheduler_rpcapi, volume_rpcapi, db):
requires = ['image_id', 'scheduler_hints', 'snapshot_id',
'source_volid', 'volume_id', 'volume', 'volume_type',
'volume_properties', 'source_replicaid',
'consistencygroup_id', 'cgsnapshot_id', 'group_id', ]
super(VolumeCastTask, self).__init__(addons=[ACTION],
requires=requires)
self.volume_rpcapi = volume_rpcapi
self.scheduler_rpcapi = scheduler_rpcapi
self.db = db
def _cast_create_volume(self, context, request_spec, filter_properties):
source_volume_ref = None
source_volid = (request_spec['source_volid'] or
request_spec['source_replicaid'])
volume = request_spec['volume']
snapshot_id = request_spec['snapshot_id']
image_id = request_spec['image_id']
cgroup_id = request_spec['consistencygroup_id']
cgsnapshot_id = request_spec['cgsnapshot_id']
group_id = request_spec['group_id']
if cgroup_id:
# If cgroup_id existed, we should cast volume to the scheduler
# to choose a proper pool whose backend is same as CG's backend.
cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id)
request_spec['CG_backend'] = vol_utils.extract_host(cgroup.host)
elif group_id:
# If group_id exists, we should cast volume to the scheduler
# to choose a proper pool whose backend is same as group's backend.
group = objects.Group.get_by_id(context, group_id)
# FIXME(wanghao): group_backend got added before request_spec was
# converted to versioned objects. We should make sure that this
# will be handled by object version translations once we add
# RequestSpec object.
request_spec['group_backend'] = vol_utils.extract_host(group.host)
elif snapshot_id and CONF.snapshot_same_host:
# NOTE(Rongze Zhu): A simple solution for bug 1008866.
#
# If snapshot_id is set and CONF.snapshot_same_host is True, make
# the call create volume directly to the volume host where the
# snapshot resides instead of passing it through the scheduler, so
# snapshot can be copied to the new volume.
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
source_volume_ref = snapshot.volume
elif source_volid:
source_volume_ref = objects.Volume.get_by_id(context, source_volid)
if not source_volume_ref:
# Cast to the scheduler and let it handle whatever is needed
# to select the target host for this volume.
self.scheduler_rpcapi.create_volume(
context,
volume,
snapshot_id=snapshot_id,
image_id=image_id,
request_spec=request_spec,
filter_properties=filter_properties)
else:
# Bypass the scheduler and send the request directly to the volume
# manager.
volume.host = source_volume_ref.host
volume.cluster_name = source_volume_ref.cluster_name
volume.scheduled_at = timeutils.utcnow()
volume.save()
if not cgsnapshot_id:
self.volume_rpcapi.create_volume(
context,
volume,
request_spec,
filter_properties,
allow_reschedule=False)
def execute(self, context, **kwargs):
scheduler_hints = kwargs.pop('scheduler_hints', None)
db_vt = kwargs.pop('volume_type')
kwargs['volume_type'] = None
if db_vt:
kwargs['volume_type'] = objects.VolumeType()
objects.VolumeType()._from_db_object(context,
kwargs['volume_type'], db_vt)
request_spec = objects.RequestSpec(**kwargs)
filter_properties = {}
if scheduler_hints:
filter_properties['scheduler_hints'] = scheduler_hints
self._cast_create_volume(context, request_spec, filter_properties)
def revert(self, context, result, flow_failures, volume, **kwargs):
if isinstance(result, ft.Failure):
return
# Restore the source volume status and set the volume to error status.
common.restore_source_status(context, self.db, kwargs)
common.error_out(volume)
LOG.error(_LE("Volume %s: create failed"), volume.id)
exc_info = False
if all(flow_failures[-1].exc_info):
exc_info = flow_failures[-1].exc_info
LOG.error(_LE('Unexpected build error:'), exc_info=exc_info)
def get_flow(db_api, image_service_api, availability_zones, create_what,
scheduler_rpcapi=None, volume_rpcapi=None):
"""Constructs and returns the api entrypoint flow.
This flow will do the following:
1. Inject keys & values for dependent tasks.
2. Extracts and validates the input keys & values.
3. Reserves the quota (reverts quota on any failures).
4. Creates the database entry.
5. Commits the quota.
6. Casts to volume manager or scheduler for further processing.
"""
flow_name = ACTION.replace(":", "_") + "_api"
api_flow = linear_flow.Flow(flow_name)
api_flow.add(ExtractVolumeRequestTask(
image_service_api,
availability_zones,
rebind={'size': 'raw_size',
'availability_zone': 'raw_availability_zone',
'volume_type': 'raw_volume_type'}))
api_flow.add(QuotaReserveTask(),
EntryCreateTask(),
QuotaCommitTask())
if scheduler_rpcapi and volume_rpcapi:
# This will cast it out to either the scheduler or volume manager via
# the rpc apis provided.
api_flow.add(VolumeCastTask(scheduler_rpcapi, volume_rpcapi, db_api))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(api_flow, store=create_what)
| |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# yardstick comment: this is a modified copy of
# rally/rally/benchmark/runners/base.py
import importlib
import logging
import multiprocessing
import subprocess
import time
import traceback
from oslo_config import cfg
import yardstick.common.utils as utils
from yardstick.benchmark.scenarios import base as base_scenario
from yardstick.dispatcher.base import Base as DispatcherBase
log = logging.getLogger(__name__)
CONF = cfg.CONF
def _output_serializer_main(filename, queue):
'''entrypoint for the singleton subprocess writing to outfile
Use of this process enables multiple instances of a scenario without
messing up the output file.
'''
config = {}
config["type"] = CONF.dispatcher.capitalize()
config["file_path"] = filename
dispatcher = DispatcherBase.get(config)
while True:
# blocks until data becomes available
record = queue.get()
if record == '_TERMINATE_':
dispatcher.flush_result_data()
break
else:
dispatcher.record_result_data(record)
def _execute_shell_command(command):
'''execute shell script with error handling'''
exitcode = 0
output = []
try:
output = subprocess.check_output(command, shell=True)
except Exception:
exitcode = -1
output = traceback.format_exc()
log.error("exec command '%s' error:\n " % command)
log.error(traceback.format_exc())
return exitcode, output
def _single_action(seconds, command, queue):
'''entrypoint for the single action process'''
log.debug("single action, fires after %d seconds (from now)", seconds)
time.sleep(seconds)
log.debug("single action: executing command: '%s'", command)
ret_code, data = _execute_shell_command(command)
if ret_code < 0:
log.error("single action error! command:%s" % command)
queue.put({'single-action-data': data})
return
log.debug("single action data: \n%s" % data)
queue.put({'single-action-data': data})
def _periodic_action(interval, command, queue):
'''entrypoint for the periodic action process'''
log.debug("periodic action, fires every: %d seconds", interval)
time_spent = 0
while True:
time.sleep(interval)
time_spent += interval
log.debug("periodic action, executing command: '%s'", command)
ret_code, data = _execute_shell_command(command)
if ret_code < 0:
log.error("periodic action error! command:%s", command)
queue.put({'periodic-action-data': data})
break
log.debug("periodic action data: \n%s" % data)
queue.put({'periodic-action-data': data})
class Runner(object):
queue = None
dump_process = None
runners = []
@staticmethod
def get_cls(runner_type):
'''return class of specified type'''
for runner in utils.itersubclasses(Runner):
if runner_type == runner.__execution_type__:
return runner
raise RuntimeError("No such runner_type %s" % runner_type)
@staticmethod
def get_types():
'''return a list of known runner type (class) names'''
types = []
for runner in utils.itersubclasses(Runner):
types.append(runner)
return types
@staticmethod
def get(config):
"""Returns instance of a scenario runner for execution type.
"""
# if there is no runner, start the output serializer subprocess
if len(Runner.runners) == 0:
log.debug("Starting dump process file '%s'" %
config["output_filename"])
Runner.queue = multiprocessing.Queue()
Runner.dump_process = multiprocessing.Process(
target=_output_serializer_main,
name="Dumper",
args=(config["output_filename"], Runner.queue))
Runner.dump_process.start()
return Runner.get_cls(config["type"])(config, Runner.queue)
@staticmethod
def release_dump_process():
'''Release the dumper process'''
log.debug("Stopping dump process")
if Runner.dump_process:
Runner.queue.put('_TERMINATE_')
Runner.dump_process.join()
Runner.dump_process = None
@staticmethod
def release(runner):
'''Release the runner'''
if runner in Runner.runners:
Runner.runners.remove(runner)
# if this was the last runner, stop the output serializer subprocess
if len(Runner.runners) == 0:
Runner.release_dump_process()
@staticmethod
def terminate(runner):
'''Terminate the runner'''
if runner.process and runner.process.is_alive():
runner.process.terminate()
@staticmethod
def terminate_all():
'''Terminate all runners (subprocesses)'''
log.debug("Terminating all runners")
# release dumper process as some errors before any runner is created
if len(Runner.runners) == 0:
Runner.release_dump_process()
return
for runner in Runner.runners:
log.debug("Terminating runner: %s", runner)
if runner.process:
runner.process.terminate()
runner.process.join()
if runner.periodic_action_process:
log.debug("Terminating periodic action process")
runner.periodic_action_process.terminate()
runner.periodic_action_process = None
Runner.release(runner)
def __init__(self, config, queue):
self.config = config
self.periodic_action_process = None
self.result_queue = queue
self.process = None
self.aborted = multiprocessing.Event()
Runner.runners.append(self)
def run_post_stop_action(self):
'''run a potentially configured post-stop action'''
if "post-stop-action" in self.config:
command = self.config["post-stop-action"]["command"]
log.debug("post stop action: command: '%s'" % command)
ret_code, data = _execute_shell_command(command)
if ret_code < 0:
log.error("post action error! command:%s", command)
self.result_queue.put({'post-stop-action-data': data})
return
log.debug("post-stop data: \n%s" % data)
self.result_queue.put({'post-stop-action-data': data})
def run(self, scenario_cfg, context_cfg):
scenario_type = scenario_cfg["type"]
class_name = base_scenario.Scenario.get(scenario_type)
path_split = class_name.split(".")
module_path = ".".join(path_split[:-1])
module = importlib.import_module(module_path)
cls = getattr(module, path_split[-1])
self.config['object'] = class_name
self.aborted.clear()
# run a potentially configured pre-start action
if "pre-start-action" in self.config:
command = self.config["pre-start-action"]["command"]
log.debug("pre start action: command: '%s'" % command)
ret_code, data = _execute_shell_command(command)
if ret_code < 0:
log.error("pre-start action error! command:%s", command)
self.result_queue.put({'pre-start-action-data': data})
return
log.debug("pre-start data: \n%s" % data)
self.result_queue.put({'pre-start-action-data': data})
if "single-shot-action" in self.config:
single_action_process = multiprocessing.Process(
target=_single_action,
name="single-shot-action",
args=(self.config["single-shot-action"]["after"],
self.config["single-shot-action"]["command"],
self.result_queue))
single_action_process.start()
if "periodic-action" in self.config:
self.periodic_action_process = multiprocessing.Process(
target=_periodic_action,
name="periodic-action",
args=(self.config["periodic-action"]["interval"],
self.config["periodic-action"]["command"],
self.result_queue))
self.periodic_action_process.start()
self._run_benchmark(cls, "run", scenario_cfg, context_cfg)
def abort(self):
'''Abort the execution of a scenario'''
self.aborted.set()
def join(self, timeout=None):
self.process.join(timeout)
if self.periodic_action_process:
self.periodic_action_process.terminate()
self.periodic_action_process = None
self.run_post_stop_action()
return self.process.exitcode
| |
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
#
#
# Parts of this code is from IPyVolume (24.05.2017), used here under
# this copyright and license with permission from the author
# (see https://github.com/jupyter-widgets/ipywidgets/pull/1387)
"""
Functions for generating embeddable HTML/javascript of a widget.
"""
import json
import re
from .widgets import Widget, DOMWidget
from .widgets.widget_link import Link
from .widgets.docutils import doc_subst
from ._version import __html_manager_version__
snippet_template = """
{load}
<script type="application/vnd.jupyter.widget-state+json">
{json_data}
</script>
{widget_views}
"""
load_template = """<script src="{embed_url}"{use_cors}></script>"""
load_requirejs_template = """
<!-- Load require.js. Delete this if your page already loads require.js -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" crossorigin="anonymous"></script>
<script src="{embed_url}"{use_cors}></script>
"""
requirejs_snippet_template = """
<script type="application/vnd.jupyter.widget-state+json">
{json_data}
</script>
{widget_views}
"""
html_template = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>{title}</title>
</head>
<body>
{snippet}
</body>
</html>
"""
widget_view_template = """<script type="application/vnd.jupyter.widget-view+json">
{view_spec}
</script>"""
DEFAULT_EMBED_SCRIPT_URL = 'https://unpkg.com/@jupyter-widgets/html-manager@%s/dist/embed.js'%__html_manager_version__
DEFAULT_EMBED_REQUIREJS_URL = 'https://unpkg.com/@jupyter-widgets/html-manager@%s/dist/embed-amd.js'%__html_manager_version__
_doc_snippets = {}
_doc_snippets['views_attribute'] = """
views: widget or collection of widgets or None
The widgets to include views for. If None, all DOMWidgets are
included (not just the displayed ones).
"""
_doc_snippets['embed_kwargs'] = """
drop_defaults: boolean
Whether to drop default values from the widget states.
state: dict or None (default)
The state to include. When set to None, the state of all widgets
know to the widget manager is included. Otherwise it uses the
passed state directly. This allows for end users to include a
smaller state, under the responsibility that this state is
sufficient to reconstruct the embedded views.
indent: integer, string or None
The indent to use for the JSON state dump. See `json.dumps` for
full description.
embed_url: string or None
Allows for overriding the URL used to fetch the widget manager
for the embedded code. This defaults (None) to an `unpkg` CDN url.
requirejs: boolean (True)
Enables the requirejs-based embedding, which allows for custom widgets.
If True, the embed_url should point to an AMD module.
cors: boolean (True)
If True avoids sending user credentials while requesting the scripts.
When opening an HTML file from disk, some browsers may refuse to load
the scripts.
"""
def _find_widget_refs_by_state(widget, state):
"""Find references to other widgets in a widget's state"""
# Copy keys to allow changes to state during iteration:
keys = tuple(state.keys())
for key in keys:
value = getattr(widget, key)
# Trivial case: Direct references to other widgets:
if isinstance(value, Widget):
yield value
# Also check for buried references in known, JSON-able structures
# Note: This might miss references buried in more esoteric structures
elif isinstance(value, (list, tuple)):
for item in value:
if isinstance(item, Widget):
yield item
elif isinstance(value, dict):
for item in value.values():
if isinstance(item, Widget):
yield item
def _get_recursive_state(widget, store=None, drop_defaults=False):
"""Gets the embed state of a widget, and all other widgets it refers to as well"""
if store is None:
store = dict()
state = widget._get_embed_state(drop_defaults=drop_defaults)
store[widget.model_id] = state
# Loop over all values included in state (i.e. don't consider excluded values):
for ref in _find_widget_refs_by_state(widget, state['state']):
if ref.model_id not in store:
_get_recursive_state(ref, store, drop_defaults=drop_defaults)
return store
def add_resolved_links(store, drop_defaults):
"""Adds the state of any link models between two models in store"""
for widget_id, widget in Widget.widgets.items(): # go over all widgets
if isinstance(widget, Link) and widget_id not in store:
if widget.source[0].model_id in store and widget.target[0].model_id in store:
store[widget.model_id] = widget._get_embed_state(drop_defaults=drop_defaults)
def dependency_state(widgets, drop_defaults=True):
"""Get the state of all widgets specified, and their dependencies.
This uses a simple dependency finder, including:
- any widget directly referenced in the state of an included widget
- any widget in a list/tuple attribute in the state of an included widget
- any widget in a dict attribute in the state of an included widget
- any jslink/jsdlink between two included widgets
What this alogrithm does not do:
- Find widget references in nested list/dict structures
- Find widget references in other types of attributes
Note that this searches the state of the widgets for references, so if
a widget reference is not included in the serialized state, it won't
be considered as a dependency.
Parameters
----------
widgets: single widget or list of widgets.
This function will return the state of every widget mentioned
and of all their dependencies.
drop_defaults: boolean
Whether to drop default values from the widget states.
Returns
-------
A dictionary with the state of the widgets and any widget they
depend on.
"""
# collect the state of all relevant widgets
if widgets is None:
# Get state of all widgets, no smart resolution needed.
state = Widget.get_manager_state(drop_defaults=drop_defaults, widgets=None)['state']
else:
try:
widgets[0]
except (IndexError, TypeError):
widgets = [widgets]
state = {}
for widget in widgets:
_get_recursive_state(widget, state, drop_defaults)
# Add any links between included widgets:
add_resolved_links(state, drop_defaults)
return state
@doc_subst(_doc_snippets)
def embed_data(views, drop_defaults=True, state=None):
"""Gets data for embedding.
Use this to get the raw data for embedding if you have special
formatting needs.
Parameters
----------
{views_attribute}
drop_defaults: boolean
Whether to drop default values from the widget states.
state: dict or None (default)
The state to include. When set to None, the state of all widgets
know to the widget manager is included. Otherwise it uses the
passed state directly. This allows for end users to include a
smaller state, under the responsibility that this state is
sufficient to reconstruct the embedded views.
Returns
-------
A dictionary with the following entries:
manager_state: dict of the widget manager state data
view_specs: a list of widget view specs
"""
if views is None:
views = [w for w in Widget.widgets.values() if isinstance(w, DOMWidget)]
else:
try:
views[0]
except (IndexError, TypeError):
views = [views]
if state is None:
# Get state of all known widgets
state = Widget.get_manager_state(drop_defaults=drop_defaults, widgets=None)['state']
# Rely on ipywidget to get the default values
json_data = Widget.get_manager_state(widgets=[])
# but plug in our own state
json_data['state'] = state
view_specs = [w.get_view_spec() for w in views]
return dict(manager_state=json_data, view_specs=view_specs)
script_escape_re = re.compile(r'<(script|/script|!--)', re.IGNORECASE)
def escape_script(s):
"""Escape a string that will be the content of an HTML script tag.
We replace the opening bracket of <script, </script, and <!-- with the unicode
equivalent. This is inspired by the documentation for the script tag at
https://html.spec.whatwg.org/multipage/scripting.html#restrictions-for-contents-of-script-elements
We only replace these three cases so that most html or other content
involving `<` is readable.
"""
return script_escape_re.sub(r'\\u003c\1', s)
@doc_subst(_doc_snippets)
def embed_snippet(views,
drop_defaults=True,
state=None,
indent=2,
embed_url=None,
requirejs=True,
cors=True
):
"""Return a snippet that can be embedded in an HTML file.
Parameters
----------
{views_attribute}
{embed_kwargs}
Returns
-------
A unicode string with an HTML snippet containing several `<script>` tags.
"""
data = embed_data(views, drop_defaults=drop_defaults, state=state)
widget_views = '\n'.join(
widget_view_template.format(view_spec=escape_script(json.dumps(view_spec)))
for view_spec in data['view_specs']
)
if embed_url is None:
embed_url = DEFAULT_EMBED_REQUIREJS_URL if requirejs else DEFAULT_EMBED_SCRIPT_URL
load = load_requirejs_template if requirejs else load_template
use_cors = ' crossorigin="anonymous"' if cors else ''
values = {
'load': load.format(embed_url=embed_url, use_cors=use_cors),
'json_data': escape_script(json.dumps(data['manager_state'], indent=indent)),
'widget_views': widget_views,
}
return snippet_template.format(**values)
@doc_subst(_doc_snippets)
def embed_minimal_html(fp, views, title='IPyWidget export', template=None, **kwargs):
"""Write a minimal HTML file with widget views embedded.
Parameters
----------
fp: filename or file-like object
The file to write the HTML output to.
{views_attribute}
title: title of the html page.
template: Template in which to embed the widget state.
This should be a Python string with placeholders
`{{title}}` and `{{snippet}}`. The `{{snippet}}` placeholder
will be replaced by all the widgets.
{embed_kwargs}
"""
snippet = embed_snippet(views, **kwargs)
values = {
'title': title,
'snippet': snippet,
}
if template is None:
template = html_template
html_code = template.format(**values)
# Check if fp is writable:
if hasattr(fp, 'write'):
fp.write(html_code)
else:
# Assume fp is a filename:
with open(fp, "w") as f:
f.write(html_code)
| |
# -*- coding: UTF-8 -*-
import sys
import time
from vulkan import *
from PySide2 import (QtGui, QtCore)
import numpy as np
from PIL import Image
import glm
validationLayers = [
'VK_LAYER_LUNARG_standard_validation'
]
deviceExtensions = [
VK_KHR_SWAPCHAIN_EXTENSION_NAME
]
enableValidationLayers = True
class InstanceProcAddr(object):
T = None
def __init__(self, func):
self.__func = func
def __call__(self, *args, **kwargs):
funcName = self.__func.__name__
func = InstanceProcAddr.procfunc(funcName)
if func:
return func(*args, **kwargs)
else:
return VK_ERROR_EXTENSION_NOT_PRESENT
@staticmethod
def procfunc(funcName):
return vkGetInstanceProcAddr(InstanceProcAddr.T, funcName)
class DeviceProcAddr(InstanceProcAddr):
@staticmethod
def procfunc(funcName):
return vkGetDeviceProcAddr(InstanceProcAddr.T, funcName)
# instance ext functions
@InstanceProcAddr
def vkCreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator):
pass
@InstanceProcAddr
def vkDestroyDebugReportCallbackEXT(instance, pCreateInfo, pAllocator):
pass
@InstanceProcAddr
def vkCreateWin32SurfaceKHR(instance, pCreateInfo, pAllocator):
pass
@InstanceProcAddr
def vkDestroySurfaceKHR(instance, surface, pAllocator):
pass
@InstanceProcAddr
def vkGetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface):
pass
@InstanceProcAddr
def vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface):
pass
@InstanceProcAddr
def vkGetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface):
pass
@InstanceProcAddr
def vkGetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface):
pass
# device ext functions
@DeviceProcAddr
def vkCreateSwapchainKHR(device, pCreateInfo, pAllocator):
pass
@DeviceProcAddr
def vkDestroySwapchainKHR(device, swapchain, pAllocator):
pass
@DeviceProcAddr
def vkGetSwapchainImagesKHR(device, swapchain):
pass
@DeviceProcAddr
def vkAcquireNextImageKHR(device, swapchain, timeout, semaphore, fence):
pass
@DeviceProcAddr
def vkQueuePresentKHR(queue, pPresentInfo):
pass
def debugCallback(*args):
print('DEBUG: {} {}'.format(args[5], args[6]))
return 0
class Win32misc(object):
@staticmethod
def getInstance(hWnd):
from cffi import FFI as _FFI
_ffi = _FFI()
_ffi.cdef('long __stdcall GetWindowLongA(void* hWnd, int nIndex);')
_lib = _ffi.dlopen('User32.dll')
return _lib.GetWindowLongA(_ffi.cast('void*', hWnd), -6) # GWL_HINSTANCE
class QueueFamilyIndices(object):
def __init__(self):
self.graphicsFamily = -1
self.presentFamily = -1
@property
def isComplete(self):
return self.graphicsFamily >= 0 and self.presentFamily >= 0
class SwapChainSupportDetails(object):
def __init__(self):
self.capabilities = None
self.formats = None
self.presentModes = None
class Vertex(object):
POS = np.array([0, 0], np.float32)
COLOR = np.array([0, 0, 0], np.float32)
# def __init__(self):
# self.pos = []
# self.color = []
@staticmethod
def getBindingDescription():
bindingDescription = VkVertexInputBindingDescription(
binding=0,
stride=Vertex.POS.nbytes + Vertex.COLOR.nbytes,
inputRate=VK_VERTEX_INPUT_RATE_VERTEX
)
return bindingDescription
@staticmethod
def getAttributeDescriptions():
pos = VkVertexInputAttributeDescription(
location=0,
binding=0,
format=VK_FORMAT_R32G32_SFLOAT,
offset=0
)
color = VkVertexInputAttributeDescription(
location=1,
binding=0,
format=VK_FORMAT_R32G32B32_SFLOAT,
offset=Vertex.POS.nbytes,
)
return [pos, color]
class UniformBufferObject(object):
def __init__(self):
self.model = np.identity(4, np.float32)
self.view = np.identity(4, np.float32)
self.proj = np.identity(4, np.float32)
def toArray(self):
return np.concatenate((self.model, self.view, self.proj))
@property
def nbytes(self):
return self.proj.nbytes + self.view.nbytes + self.model.nbytes
class HelloTriangleApplication(QtGui.QWindow):
def __init__(self):
super(HelloTriangleApplication, self).__init__()
self.setWidth(1280)
self.setHeight(720)
self.setMinimumWidth(40)
self.setMinimumHeight(40)
self.setTitle("Vulkan Python - PySide2")
# self.setSurfaceType(self.OpenGLSurface)
self.__instance = None
self.__callbcak = None
self.__surface = None
self.__physicalDevice = None
self.__device = None
self.__graphicQueue = None
self.__presentQueue = None
self.__swapChain = None
self.__swapChainImages = []
self.__swapChainImageFormat = None
self.__swapChainExtent = None
self.__swapChainImageViews = []
self.__swapChainFramebuffers = []
self.__renderpass = None
self.__pipeline = None
self.__pipelineLayout = None
self.__commandPool = None
self.__commandBuffers = []
self.__imageAvailableSemaphore = None
self.__renderFinishedSemaphore = None
self.__textureImage = None
self.__textureImageMemory = None
self.__textureImageView = None
self.__textureSampler = None
self.__vertexBuffer = None
self.__vertexBufferMemory = None
self.__indexBuffer = None
self.__indexBufferMemory = None
self.__descriptorPool = None
self.__descriptorSet = None
self.__descriptorSetLayout = None
self.__uniformBuffer = None
self.__uniformBufferMemory = None
self.__vertices = np.array([
# pos color
-.5, -.5, 1, 0, 0,
.5, -.5, 0, 1, 0,
.5, .5, 0, 0, 1,
-.5, .5, 1, 1, 1
], np.float32)
self.__indices = np.array([0, 1, 2, 2, 3, 0], np.uint16)
self.__ubo = UniformBufferObject()
self.__startTime = time.time()
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.render)
self.initVulkan()
self.timer.start()
def __del__(self):
vkDeviceWaitIdle(self.__device)
if self.__textureSampler:
vkDestroySampler(self.__device, self.__textureSampler, None)
if self.__textureImageView:
vkDestroyImageView(self.__device, self.__textureImageView, None)
if self.__textureImage:
vkDestroyImage(self.__device, self.__textureImage, None)
if self.__textureImageMemory:
vkFreeMemory(self.__device, self.__textureImageMemory, None)
if self.__descriptorPool:
vkDestroyDescriptorPool(self.__device, self.__descriptorPool, None)
if self.__uniformBuffer:
vkDestroyBuffer(self.__device, self.__uniformBuffer, None)
if self.__uniformBufferMemory:
vkFreeMemory(self.__device, self.__uniformBufferMemory, None)
if self.__vertexBuffer:
vkDestroyBuffer(self.__device, self.__vertexBuffer, None)
if self.__vertexBufferMemory:
vkFreeMemory(self.__device, self.__vertexBufferMemory, None)
if self.__indexBuffer:
vkDestroyBuffer(self.__device, self.__indexBuffer, None)
if self.__indexBufferMemory:
vkFreeMemory(self.__device, self.__indexBufferMemory, None)
if self.__imageAvailableSemaphore:
vkDestroySemaphore(self.__device, self.__imageAvailableSemaphore, None)
if self.__renderFinishedSemaphore:
vkDestroySemaphore(self.__device, self.__renderFinishedSemaphore, None)
if self.__descriptorSetLayout:
vkDestroyDescriptorSetLayout(self.__device, self.__descriptorSetLayout, None)
self.__cleanupSwapChain()
if self.__commandPool:
vkDestroyCommandPool(self.__device, self.__commandPool, None)
if self.__device:
vkDestroyDevice(self.__device, None)
if self.__callbcak:
vkDestroyDebugReportCallbackEXT(self.__instance, self.__callbcak, None)
if self.__surface:
vkDestroySurfaceKHR(self.__instance, self.__surface, None)
if self.__instance:
vkDestroyInstance(self.__instance, None)
print('instance destroyed')
self.destroy()
def __cleanupSwapChain(self):
[vkDestroyFramebuffer(self.__device, i, None) for i in self.__swapChainFramebuffers]
self.__swapChainFramebuffers = []
vkFreeCommandBuffers(self.__device, self.__commandPool, len(self.__commandBuffers), self.__commandBuffers)
self.__swapChainFramebuffers = []
vkDestroyPipeline(self.__device, self.__pipeline, None)
vkDestroyPipelineLayout(self.__device, self.__pipelineLayout, None)
vkDestroyRenderPass(self.__device, self.__renderpass, None)
[vkDestroyImageView(self.__device, i, None) for i in self.__swapChainImageViews]
self.__swapChainImageViews = []
vkDestroySwapchainKHR(self.__device, self.__swapChain, None)
def __recreateSwapChain(self):
vkDeviceWaitIdle(self.__device)
self.__cleanupSwapChain()
self.__createSwapChain()
self.__createImageViews()
self.__createRenderPass()
self.__createGraphicsPipeline()
self.__createFrambuffers()
self.__createCommandBuffers()
def initVulkan(self):
self.__cretaeInstance()
self.__setupDebugCallback()
self.__createSurface()
self.__pickPhysicalDevice()
self.__createLogicalDevice()
self.__createSwapChain()
self.__createImageViews()
self.__createRenderPass()
self.__createDescriptorSetLayout()
self.__createGraphicsPipeline()
self.__createFrambuffers()
self.__createCommandPool()
self.__createTextureImage()
self.__createTextureImageView()
self.__createTextureSampler()
self.__createVertexBuffer()
self.__createIndexBuffer()
self.__createUniformBuffer()
self.__createDescriptorPool()
self.__createDescriptorSet()
self.__createCommandBuffers()
self.__createSemaphores()
def __cretaeInstance(self):
if enableValidationLayers and not self.__checkValidationLayerSupport():
raise Exception("validation layers requested, but not available!")
appInfo = VkApplicationInfo(
# sType=VK_STRUCTURE_TYPE_APPLICATION_INFO,
pApplicationName='Python VK',
applicationVersion=VK_MAKE_VERSION(1, 0, 0),
pEngineName='pyvulkan',
engineVersion=VK_MAKE_VERSION(1, 0, 0),
apiVersion=VK_API_VERSION
)
extenstions = self.__getRequiredExtensions()
if enableValidationLayers:
instanceInfo = VkInstanceCreateInfo(
pApplicationInfo=appInfo,
# enabledLayerCount=len(validationLayers),
ppEnabledLayerNames=validationLayers,
# enabledExtensionCount=len(extenstions),
ppEnabledExtensionNames=extenstions
)
else:
instanceInfo = VkInstanceCreateInfo(
pApplicationInfo=appInfo,
enabledLayerCount=0,
# enabledExtensionCount=len(extenstions),
ppEnabledExtensionNames=extenstions
)
self.__instance = vkCreateInstance(instanceInfo, None)
InstanceProcAddr.T = self.__instance
def __setupDebugCallback(self):
if not enableValidationLayers:
return
createInfo = VkDebugReportCallbackCreateInfoEXT(
flags=VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_ERROR_BIT_EXT,
pfnCallback=debugCallback
)
self.__callbcak = vkCreateDebugReportCallbackEXT(self.__instance, createInfo, None)
def __createSurface(self):
if sys.platform == 'win32':
hwnd = self.winId()
hinstance = Win32misc.getInstance(hwnd)
createInfo = VkWin32SurfaceCreateInfoKHR(
hinstance=hinstance,
hwnd=hwnd
)
self.__surface = vkCreateWin32SurfaceKHR(self.__instance, createInfo, None)
# elif sys.platform == 'linux':
# pass
def __pickPhysicalDevice(self):
physicalDevices = vkEnumeratePhysicalDevices(self.__instance)
for device in physicalDevices:
if self.__isDeviceSuitable(device):
self.__physicalDevice = device
break
assert self.__physicalDevice != None
def __createLogicalDevice(self):
indices = self.__findQueueFamilies(self.__physicalDevice)
uniqueQueueFamilies = {}.fromkeys([indices.graphicsFamily, indices.presentFamily])
queueCreateInfos = []
for i in uniqueQueueFamilies:
queueCreateInfo = VkDeviceQueueCreateInfo(
queueFamilyIndex=i,
queueCount=1,
pQueuePriorities=[1.0]
)
queueCreateInfos.append(queueCreateInfo)
deviceFeatures = VkPhysicalDeviceFeatures()
deviceFeatures.samplerAnisotropy = True
if enableValidationLayers:
createInfo = VkDeviceCreateInfo(
# queueCreateInfoCount=len(queueCreateInfos),
pQueueCreateInfos=queueCreateInfos,
# enabledExtensionCount=len(deviceExtensions),
ppEnabledExtensionNames=deviceExtensions,
# enabledLayerCount=len(validationLayers),
ppEnabledLayerNames=validationLayers,
pEnabledFeatures=deviceFeatures
)
else:
createInfo = VkDeviceCreateInfo(
queueCreateInfoCount=1,
pQueueCreateInfos=queueCreateInfo,
# enabledExtensionCount=len(deviceExtensions),
ppEnabledExtensionNames=deviceExtensions,
enabledLayerCount=0,
pEnabledFeatures=deviceFeatures
)
self.__device = vkCreateDevice(self.__physicalDevice, createInfo, None)
DeviceProcAddr.T = self.__device
self.__graphicQueue = vkGetDeviceQueue(self.__device, indices.graphicsFamily, 0)
self.__presentQueue = vkGetDeviceQueue(self.__device, indices.presentFamily, 0)
def __createSwapChain(self):
swapChainSupport = self.__querySwapChainSupport(self.__physicalDevice)
surfaceFormat = self.__chooseSwapSurfaceFormat(swapChainSupport.formats)
presentMode = self.__chooseSwapPresentMode(swapChainSupport.presentModes)
extent = self.__chooseSwapExtent(swapChainSupport.capabilities)
imageCount = swapChainSupport.capabilities.minImageCount + 1
if swapChainSupport.capabilities.maxImageCount > 0 and imageCount > swapChainSupport.capabilities.maxImageCount:
imageCount = swapChainSupport.capabilities.maxImageCount
indices = self.__findQueueFamilies(self.__physicalDevice)
queueFamily = {}.fromkeys([indices.graphicsFamily, indices.presentFamily])
queueFamilies = list(queueFamily.keys())
if len(queueFamilies) > 1:
createInfo = VkSwapchainCreateInfoKHR(
surface=self.__surface,
minImageCount=imageCount,
imageFormat=surfaceFormat.format,
imageColorSpace=surfaceFormat.colorSpace,
imageExtent=extent,
imageArrayLayers=1,
imageUsage=VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
# queueFamilyIndexCount=len(queueFamilies),
pQueueFamilyIndices=queueFamilies,
imageSharingMode=VK_SHARING_MODE_CONCURRENT,
preTransform=swapChainSupport.capabilities.currentTransform,
compositeAlpha=VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
presentMode=presentMode,
clipped=True
)
else:
createInfo = VkSwapchainCreateInfoKHR(
surface=self.__surface,
minImageCount=imageCount,
imageFormat=surfaceFormat.format,
imageColorSpace=surfaceFormat.colorSpace,
imageExtent=extent,
imageArrayLayers=1,
imageUsage=VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
# queueFamilyIndexCount=len(queueFamilies),
pQueueFamilyIndices=queueFamilies,
imageSharingMode=VK_SHARING_MODE_EXCLUSIVE,
preTransform=swapChainSupport.capabilities.currentTransform,
compositeAlpha=VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
presentMode=presentMode,
clipped=True
)
self.__swapChain = vkCreateSwapchainKHR(self.__device, createInfo, None)
assert self.__swapChain != None
self.__swapChainImages = vkGetSwapchainImagesKHR(self.__device, self.__swapChain)
self.__swapChainImageFormat = surfaceFormat.format
self.__swapChainExtent = extent
def __createImageViews(self):
self.__swapChainImageViews = []
for i, image in enumerate(self.__swapChainImages):
self.__swapChainImageViews.append(self.__createImageView(image, self.__swapChainImageFormat))
def __createRenderPass(self):
colorAttachment = VkAttachmentDescription(
format=self.__swapChainImageFormat,
samples=VK_SAMPLE_COUNT_1_BIT,
loadOp=VK_ATTACHMENT_LOAD_OP_CLEAR,
storeOp=VK_ATTACHMENT_STORE_OP_STORE,
stencilLoadOp=VK_ATTACHMENT_LOAD_OP_DONT_CARE,
stencilStoreOp=VK_ATTACHMENT_STORE_OP_DONT_CARE,
initialLayout=VK_IMAGE_LAYOUT_UNDEFINED,
finalLayout=VK_IMAGE_LAYOUT_PRESENT_SRC_KHR
)
colorAttachmentRef = VkAttachmentReference(
0,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
)
subpass = VkSubpassDescription(
pipelineBindPoint=VK_PIPELINE_BIND_POINT_GRAPHICS,
pColorAttachments=[colorAttachmentRef]
)
renderPassInfo = VkRenderPassCreateInfo(
pAttachments=[colorAttachment],
pSubpasses=[subpass]
)
self.__renderpass = vkCreateRenderPass(self.__device, renderPassInfo, None)
def __createDescriptorSetLayout(self):
uboLayoutBinding = VkDescriptorSetLayoutBinding(
binding=0,
descriptorType=VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
descriptorCount=1,
stageFlags=VK_SHADER_STAGE_VERTEX_BIT
)
layoutInfo = VkDescriptorSetLayoutCreateInfo(
pBindings=[uboLayoutBinding]
)
self.__descriptorSetLayout = vkCreateDescriptorSetLayout(self.__device, layoutInfo, None)
def __createGraphicsPipeline(self):
vertexShaderMode = self.__createShaderModule('shader/vert.spv')
fragmentShaderMode = self.__createShaderModule('shader/frag.spv')
vertexShaderStageInfo = VkPipelineShaderStageCreateInfo(
stage=VK_SHADER_STAGE_VERTEX_BIT,
module=vertexShaderMode,
pName='main'
)
fragmentShaderStageInfo = VkPipelineShaderStageCreateInfo(
stage=VK_SHADER_STAGE_FRAGMENT_BIT,
module=fragmentShaderMode,
pName='main'
)
shaderStageInfos = [vertexShaderStageInfo, fragmentShaderStageInfo]
bindingDescription = Vertex.getBindingDescription()
attributeDescription = Vertex.getAttributeDescriptions()
vertexInputInfo = VkPipelineVertexInputStateCreateInfo(
# vertexBindingDescriptionCount=0,
pVertexBindingDescriptions=[bindingDescription],
# vertexAttributeDescriptionCount=0,
pVertexAttributeDescriptions=attributeDescription,
)
inputAssembly = VkPipelineInputAssemblyStateCreateInfo(
topology=VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
primitiveRestartEnable=False
)
viewport = VkViewport(0.0, 0.0,
float(self.__swapChainExtent.width),
float(self.__swapChainExtent.height),
0.0, 1.0)
scissor = VkRect2D([0, 0], self.__swapChainExtent)
viewportStage = VkPipelineViewportStateCreateInfo(
viewportCount=1,
pViewports=viewport,
scissorCount=1,
pScissors=scissor
)
rasterizer = VkPipelineRasterizationStateCreateInfo(
depthClampEnable=False,
rasterizerDiscardEnable=False,
polygonMode=VK_POLYGON_MODE_FILL,
lineWidth=1.0,
cullMode=VK_CULL_MODE_BACK_BIT,
frontFace=VK_FRONT_FACE_CLOCKWISE,
depthBiasEnable=False
)
multisampling = VkPipelineMultisampleStateCreateInfo(
sampleShadingEnable=False,
rasterizationSamples=VK_SAMPLE_COUNT_1_BIT
)
colorBlendAttachment = VkPipelineColorBlendAttachmentState(
colorWriteMask=VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT,
blendEnable=False
)
colorBending = VkPipelineColorBlendStateCreateInfo(
logicOpEnable=False,
logicOp=VK_LOGIC_OP_COPY,
attachmentCount=1,
pAttachments=colorBlendAttachment,
blendConstants=[0.0, 0.0, 0.0, 0.0]
)
pipelineLayoutInfo = VkPipelineLayoutCreateInfo(
# setLayoutCount=0,
pushConstantRangeCount=0,
pSetLayouts=[self.__descriptorSetLayout]
)
self.__pipelineLayout = vkCreatePipelineLayout(self.__device, pipelineLayoutInfo, None)
pipelineInfo = VkGraphicsPipelineCreateInfo(
# stageCount=len(shaderStageInfos),
pStages=shaderStageInfos,
pVertexInputState=vertexInputInfo,
pInputAssemblyState=inputAssembly,
pViewportState=viewportStage,
pRasterizationState=rasterizer,
pMultisampleState=multisampling,
pColorBlendState=colorBending,
layout=self.__pipelineLayout,
renderPass=self.__renderpass,
subpass=0,
basePipelineHandle=VK_NULL_HANDLE
)
self.__pipeline = vkCreateGraphicsPipelines(self.__device, VK_NULL_HANDLE, 1, pipelineInfo, None)#[0]
vkDestroyShaderModule(self.__device, vertexShaderMode, None)
vkDestroyShaderModule(self.__device, fragmentShaderMode, None)
def __createFrambuffers(self):
self.__swapChainFramebuffers = []
for i, iv in enumerate(self.__swapChainImageViews):
framebufferInfo = VkFramebufferCreateInfo(
renderPass=self.__renderpass,
pAttachments=[iv],
width=self.__swapChainExtent.width,
height=self.__swapChainExtent.height,
layers=1
)
self.__swapChainFramebuffers.append(vkCreateFramebuffer(self.__device, framebufferInfo, None))
def __createCommandPool(self):
queueFamilyIndices = self.__findQueueFamilies(self.__physicalDevice)
createInfo = VkCommandPoolCreateInfo(
queueFamilyIndex=queueFamilyIndices.graphicsFamily
)
self.__commandPool = vkCreateCommandPool(self.__device, createInfo, None)
def __createTextureImage(self):
_image = Image.open('textures/texture.jpg')
_image.putalpha(1)
width = _image.width
height = _image.height
imageSize = width * height * 4
stagingBuffer, stagingMem = self.__createBuffer(imageSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
data = vkMapMemory(self.__device, stagingMem, 0, imageSize, 0)
ffi.memmove(data, _image.tobytes(), imageSize)
vkUnmapMemory(self.__device, stagingMem)
del _image
self.__textureImage, self.__textureImageMemory = self.__createImage(width, height,
VK_FORMAT_R8G8B8A8_UNORM,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
self.__transitionImageLayout(self.__textureImage, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
self.__copyBufferToImage(stagingBuffer, self.__textureImage, width, height)
self.__transitionImageLayout(self.__textureImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
vkDestroyBuffer(self.__device, stagingBuffer, None)
vkFreeMemory(self.__device, stagingMem, None)
def __createTextureImageView(self):
self.__textureImageView = self.__createImageView(self.__textureImage, VK_FORMAT_R8G8B8A8_UNORM)
def __createTextureSampler(self):
samplerInfo = VkSamplerCreateInfo(
magFilter=VK_FILTER_LINEAR,
minFilter=VK_FILTER_LINEAR,
addressModeU=VK_SAMPLER_ADDRESS_MODE_REPEAT,
addressModeV=VK_SAMPLER_ADDRESS_MODE_REPEAT,
addressModeW=VK_SAMPLER_ADDRESS_MODE_REPEAT,
anisotropyEnable=True,
maxAnisotropy=16,
compareEnable=False,
compareOp=VK_COMPARE_OP_ALWAYS,
borderColor=VK_BORDER_COLOR_INT_OPAQUE_BLACK,
unnormalizedCoordinates=False
)
self.__textureSampler = vkCreateSampler(self.__device, samplerInfo, None)
def __createImageView(self, image, imFormat):
ssr = VkImageSubresourceRange(
VK_IMAGE_ASPECT_COLOR_BIT,
0, 1, 0, 1
)
viewInfo = VkImageViewCreateInfo(
image=image,
viewType=VK_IMAGE_VIEW_TYPE_2D,
format=imFormat,
subresourceRange=ssr
)
return vkCreateImageView(self.__device, viewInfo, None)
def __createImage(self, widht, height, imFormat, tiling, usage, properties):
imageInfo = VkImageCreateInfo(
imageType=VK_IMAGE_TYPE_2D,
extent=[widht, height, 1],
mipLevels=1,
arrayLayers=1,
format=imFormat,
samples=VK_SAMPLE_COUNT_1_BIT,
tiling=tiling,
usage=usage,
sharingMode=VK_SHARING_MODE_EXCLUSIVE,
initialLayout=VK_IMAGE_LAYOUT_UNDEFINED
)
image = vkCreateImage(self.__device, imageInfo, None)
memReuirements = vkGetImageMemoryRequirements(self.__device, image)
allocInfo = VkMemoryAllocateInfo(
allocationSize=memReuirements.size,
memoryTypeIndex=self.__findMemoryType(memReuirements.memoryTypeBits, properties)
)
imageMemory = vkAllocateMemory(self.__device, allocInfo, None)
vkBindImageMemory(self.__device, image, imageMemory, 0)
return (image, imageMemory)
def __transitionImageLayout(self, image, oldLayout, newLayout):
cmdBuffer = self.__beginSingleTimeCommands()
subresourceRange = VkImageSubresourceRange(
aspectMask=VK_IMAGE_ASPECT_COLOR_BIT,
baseMipLevel=0,
levelCount=1,
baseArrayLayer=0,
layerCount=1
)
barrier = VkImageMemoryBarrier(
oldLayout=oldLayout,
newLayout=newLayout,
srcQueueFamilyIndex=VK_QUEUE_FAMILY_IGNORED,
dstQueueFamilyIndex=VK_QUEUE_FAMILY_IGNORED,
image=image,
subresourceRange=subresourceRange
)
sourceStage = 0
destinationStage = 0
if oldLayout == VK_IMAGE_LAYOUT_UNDEFINED and newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
barrier.srcAccessMask = 0
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT
sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
destinationStage = VK_PIPELINE_STAGE_TRANSFER_BIT
elif oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL and newLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT
sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT
destinationStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
else:
raise Exception('unsupported layout transition!')
vkCmdPipelineBarrier(cmdBuffer,
sourceStage,
destinationStage,
0,
0, None,
0, None,
1, barrier)
self.__endSingleTimeCommands(cmdBuffer)
def __copyBufferToImage(self, buffer, image, width, height):
cmdbuffer = self.__beginSingleTimeCommands()
subresource = VkImageSubresourceLayers(
aspectMask=VK_IMAGE_ASPECT_COLOR_BIT,
mipLevel=0,
baseArrayLayer=0,
layerCount=1
)
region = VkBufferImageCopy(
bufferOffset=0,
bufferRowLength=0,
bufferImageHeight=0,
imageSubresource=subresource,
imageOffset=[0, 0],
imageExtent=[width, height, 1]
)
vkCmdCopyBufferToImage(cmdbuffer, buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, region)
self.__endSingleTimeCommands(cmdbuffer)
def __createVertexBuffer(self):
bufferSize = self.__vertices.nbytes
stagingBuffer, stagingMemory = self.__createBuffer(bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
data = vkMapMemory(self.__device, stagingMemory, 0, bufferSize, 0)
vertePtr = ffi.cast('float *', self.__vertices.ctypes.data)
ffi.memmove(data, vertePtr, bufferSize)
vkUnmapMemory(self.__device, stagingMemory)
self.__vertexBuffer, self.__vertexBufferMemory = self.__createBuffer(bufferSize,
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
self.__copyBuffer(stagingBuffer, self.__vertexBuffer, bufferSize)
vkDestroyBuffer(self.__device, stagingBuffer, None)
vkFreeMemory(self.__device, stagingMemory, None)
def __createIndexBuffer(self):
bufferSize = self.__indices.nbytes
stagingBuffer, stagingMemory = self.__createBuffer(bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
data = vkMapMemory(self.__device, stagingMemory, 0, bufferSize, 0)
indicesPtr = ffi.cast('uint16_t*', self.__indices.ctypes.data)
ffi.memmove(data, indicesPtr, bufferSize)
vkUnmapMemory(self.__device, stagingMemory)
self.__indexBuffer, self.__indexBufferMemory = self.__createBuffer(bufferSize,
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
self.__copyBuffer(stagingBuffer, self.__indexBuffer, bufferSize)
vkDestroyBuffer(self.__device, stagingBuffer, None)
vkFreeMemory(self.__device, stagingMemory, None)
def __createUniformBuffer(self):
self.__uniformBuffer, self.__uniformBufferMemory = self.__createBuffer(self.__ubo.nbytes,
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
def __createDescriptorPool(self):
poolSize = VkDescriptorPoolSize(
type=VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
descriptorCount=1
)
poolInfo = VkDescriptorPoolCreateInfo(
pPoolSizes=[poolSize],
maxSets=1
)
self.__descriptorPool = vkCreateDescriptorPool(self.__device, poolInfo, None)
def __createDescriptorSet(self):
layouts = [self.__descriptorSetLayout]
allocInfo = VkDescriptorSetAllocateInfo(
descriptorPool=self.__descriptorPool,
pSetLayouts=layouts
)
self.__descriptorSet = vkAllocateDescriptorSets(self.__device, allocInfo)
bufferInfo = VkDescriptorBufferInfo(
buffer=self.__uniformBuffer,
offset=0,
range=self.__ubo.nbytes
)
descriptWrite = VkWriteDescriptorSet(
dstSet=self.__descriptorSet[0],
dstBinding=0,
dstArrayElement=0,
descriptorType=VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
# descriptorCount=1,
pBufferInfo=[bufferInfo]
)
vkUpdateDescriptorSets(self.__device, 1, descriptWrite, 0, None)
def __createBuffer(self, size, usage, properties):
buffer = None
bufferMemory = None
bufferInfo = VkBufferCreateInfo(
size=size,
usage=usage,
sharingMode=VK_SHARING_MODE_EXCLUSIVE
)
buffer = vkCreateBuffer(self.__device, bufferInfo, None)
memRequirements = vkGetBufferMemoryRequirements(self.__device, buffer)
allocInfo = VkMemoryAllocateInfo(
allocationSize=memRequirements.size,
memoryTypeIndex=self.__findMemoryType(memRequirements.memoryTypeBits, properties)
)
bufferMemory = vkAllocateMemory(self.__device, allocInfo, None)
vkBindBufferMemory(self.__device, buffer, bufferMemory, 0)
return (buffer, bufferMemory)
def __beginSingleTimeCommands(self):
allocInfo = VkCommandBufferAllocateInfo(
level=VK_COMMAND_BUFFER_LEVEL_PRIMARY,
commandPool=self.__commandPool,
commandBufferCount=1
)
commandBuffer = vkAllocateCommandBuffers(self.__device, allocInfo)[0]
beginInfo = VkCommandBufferBeginInfo(flags=VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT)
vkBeginCommandBuffer(commandBuffer, beginInfo)
return commandBuffer
def __endSingleTimeCommands(self, commandBuffer):
vkEndCommandBuffer(commandBuffer)
submitInfo = VkSubmitInfo(pCommandBuffers=[commandBuffer])
vkQueueSubmit(self.__graphicQueue, 1, [submitInfo], VK_NULL_HANDLE)
vkQueueWaitIdle(self.__graphicQueue)
vkFreeCommandBuffers(self.__device, self.__commandPool, 1, [commandBuffer])
def __copyBuffer(self, src, dst, bufferSize):
commandBuffer = self.__beginSingleTimeCommands()
# copyRegion = VkBufferCopy(size=bufferSize)
copyRegion = VkBufferCopy(0, 0, bufferSize)
vkCmdCopyBuffer(commandBuffer, src, dst, 1, [copyRegion])
self.__endSingleTimeCommands(commandBuffer)
def __findMemoryType(self, typeFilter, properties):
memProperties = vkGetPhysicalDeviceMemoryProperties(self.__physicalDevice)
for i, prop in enumerate(memProperties.memoryTypes):
if (typeFilter & (1 << i)) and ((prop.propertyFlags & properties) == properties):
return i
return -1
def __createCommandBuffers(self):
self.__commandBuffers = []
allocInfo = VkCommandBufferAllocateInfo(
commandPool=self.__commandPool,
level=VK_COMMAND_BUFFER_LEVEL_PRIMARY,
commandBufferCount=len(self.__swapChainFramebuffers)
)
self.__commandBuffers = vkAllocateCommandBuffers(self.__device, allocInfo)
for i, buffer in enumerate(self.__commandBuffers):
beginInfo = VkCommandBufferBeginInfo(flags=VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)
vkBeginCommandBuffer(buffer, beginInfo)
renderArea = VkRect2D([0, 0], self.__swapChainExtent)
clearColor = VkClearValue(color=[[0.0, 0.0, 0.0, 1.0]])
renderPassInfo = VkRenderPassBeginInfo(
renderPass=self.__renderpass,
framebuffer=self.__swapChainFramebuffers[i],
renderArea=renderArea,
pClearValues=[clearColor]
)
vkCmdBeginRenderPass(buffer, renderPassInfo, VK_SUBPASS_CONTENTS_INLINE)
vkCmdBindPipeline(buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, self.__pipeline)
vkCmdBindVertexBuffers(buffer, 0, 1, [self.__vertexBuffer], [0])
vkCmdBindIndexBuffer(buffer, self.__indexBuffer, 0, VK_INDEX_TYPE_UINT16)
vkCmdBindDescriptorSets(buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, self.__pipelineLayout, 0, 1, self.__descriptorSet, 0, None)
vkCmdDrawIndexed(buffer, len(self.__indices), 1, 0, 0, 0)
vkCmdEndRenderPass(buffer)
vkEndCommandBuffer(buffer)
def __createSemaphores(self):
semaphoreInfo = VkSemaphoreCreateInfo()
self.__imageAvailableSemaphore = vkCreateSemaphore(self.__device, semaphoreInfo, None)
self.__renderFinishedSemaphore = vkCreateSemaphore(self.__device, semaphoreInfo, None)
def __updateUniformBuffer(self):
currentTime = time.time()
t = currentTime - self.__startTime
self.__ubo.model = glm.rotate(np.identity(4, np.float32), 90.0 * t, 0.0, 0.0, 1.0)
self.__ubo.view = glm.lookAt(np.array([2, 2, 2], np.float32), np.array([0, 0, 0], np.float32), np.array([0, 0, 1], np.float32))
self.__ubo.proj = glm.perspective(-45.0, float(self.__swapChainExtent.width) / self.__swapChainExtent.height, 0.1, 10.0)
# self.__ubo.proj[1][1] *= -1
data = vkMapMemory(self.__device, self.__uniformBufferMemory, 0, self.__ubo.nbytes, 0)
ma = self.__ubo.toArray()
dptr = ffi.cast('float *', ma.ctypes.data)
ffi.memmove(data, dptr, self.__ubo.nbytes)
vkUnmapMemory(self.__device, self.__uniformBufferMemory)
def drawFrame(self):
if not self.isExposed():
return
try:
imageIndex = vkAcquireNextImageKHR(self.__device, self.__swapChain, 18446744073709551615,
self.__imageAvailableSemaphore, VK_NULL_HANDLE)
except VkErrorSurfaceLostKhr:
self.__recreateSwapChain()
return
# else:
# raise Exception('faild to acquire next image.')
waitSemaphores = [self.__imageAvailableSemaphore]
signalSemaphores = [self.__renderFinishedSemaphore]
waitStages = [VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT]
submit = VkSubmitInfo(
pWaitSemaphores=waitSemaphores,
pWaitDstStageMask=waitStages,
pCommandBuffers=[self.__commandBuffers[imageIndex]],
pSignalSemaphores=signalSemaphores
)
vkQueueSubmit(self.__graphicQueue, 1, submit, VK_NULL_HANDLE)
presenInfo = VkPresentInfoKHR(
pWaitSemaphores=signalSemaphores,
pSwapchains=[self.__swapChain],
pImageIndices=[imageIndex]
)
try:
vkQueuePresentKHR(self.__presentQueue, presenInfo)
except VkErrorOutOfDateKhr:
self.__recreateSwapChain()
if enableValidationLayers:
vkQueueWaitIdle(self.__presentQueue)
def __createShaderModule(self, shaderFile):
with open(shaderFile, 'rb') as sf:
code = sf.read()
createInfo = VkShaderModuleCreateInfo(
codeSize=len(code),
pCode=code
)
return vkCreateShaderModule(self.__device, createInfo, None)
def __chooseSwapSurfaceFormat(self, formats):
if len(formats) == 1 and formats[0].format == VK_FORMAT_UNDEFINED:
return [VK_FORMAT_B8G8R8_UNORM, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR]
for i in formats:
if i.format == VK_FORMAT_B8G8R8_UNORM and i.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR:
return i
return formats[0]
def __chooseSwapPresentMode(self, presentModes):
bestMode = VK_PRESENT_MODE_FIFO_KHR
for i in presentModes:
if i == VK_PRESENT_MODE_FIFO_KHR:
return i
elif i == VK_PRESENT_MODE_MAILBOX_KHR:
return i
elif i == VK_PRESENT_MODE_IMMEDIATE_KHR:
return i
return bestMode
def __chooseSwapExtent(self, capabilities):
width = max(capabilities.minImageExtent.width, min(capabilities.maxImageExtent.width, self.width()))
height = max(capabilities.minImageExtent.height, min(capabilities.maxImageExtent.height, self.height()))
return VkExtent2D(width, height)
def __querySwapChainSupport(self, device):
detail = SwapChainSupportDetails()
detail.capabilities = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device, self.__surface)
detail.formats = vkGetPhysicalDeviceSurfaceFormatsKHR(device, self.__surface)
detail.presentModes = vkGetPhysicalDeviceSurfacePresentModesKHR(device, self.__surface)
return detail
def __isDeviceSuitable(self, device):
indices = self.__findQueueFamilies(device)
extensionsSupported = self.__checkDeviceExtensionSupport(device)
swapChainAdequate = False
if extensionsSupported:
swapChainSupport = self.__querySwapChainSupport(device)
swapChainAdequate = (swapChainSupport.formats is not None) and (swapChainSupport.presentModes is not None)
supportedFeatures = vkGetPhysicalDeviceFeatures(device)
return indices.isComplete and extensionsSupported and swapChainAdequate and supportedFeatures.samplerAnisotropy
def __checkDeviceExtensionSupport(self, device):
availableExtensions = vkEnumerateDeviceExtensionProperties(device, None)
aen = [i.extensionName for i in availableExtensions]
for i in deviceExtensions:
if i not in aen:
return False
return True
def __findQueueFamilies(self, device):
indices = QueueFamilyIndices()
familyProperties = vkGetPhysicalDeviceQueueFamilyProperties(device)
for i, prop in enumerate(familyProperties):
if prop.queueCount > 0 and prop.queueFlags & VK_QUEUE_GRAPHICS_BIT:
indices.graphicsFamily = i
presentSupport = vkGetPhysicalDeviceSurfaceSupportKHR(device, i, self.__surface)
if prop.queueCount > 0 and presentSupport:
indices.presentFamily = i
if indices.isComplete:
break
return indices
def __getRequiredExtensions(self):
extenstions = [e.extensionName for e in vkEnumerateInstanceExtensionProperties(None)]
if enableValidationLayers:
extenstions.append(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)
return extenstions
def __checkValidationLayerSupport(self):
availableLayers = vkEnumerateInstanceLayerProperties()
for layer in validationLayers:
layerfound = False
for layerProp in availableLayers:
if layer == layerProp.layerName:
layerfound = True
break
return layerfound
return False
def render(self):
self.__updateUniformBuffer()
self.drawFrame()
def resizeEvent(self, event):
if event.size() != event.oldSize():
self.__recreateSwapChain()
super(HelloTriangleApplication, self).resizeEvent(event)
if __name__ == '__main__':
import sys
app = QtGui.QGuiApplication(sys.argv)
win = HelloTriangleApplication()
win.show()
def clenaup():
global win
win.timer.stop()
del win
app.aboutToQuit.connect(clenaup)
sys.exit(app.exec_())
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for linear_qubit_operator.py."""
import multiprocessing
import unittest
import numpy
import scipy.sparse.linalg
from openfermion.ops.operators import QubitOperator
from openfermion.linalg.linear_qubit_operator import (
LinearQubitOperator,
LinearQubitOperatorOptions,
ParallelLinearQubitOperator,
apply_operator,
generate_linear_qubit_operator,
)
from openfermion.linalg.sparse_tools import qubit_operator_sparse
class LinearQubitOperatorOptionsTest(unittest.TestCase):
"""Tests for LinearQubitOperatorOptions class."""
def setUp(self):
"""LinearQubitOperatorOptions test set up."""
self.processes = multiprocessing.cpu_count()
self.options = LinearQubitOperatorOptions(self.processes)
def test_init(self):
"""Tests __init__()."""
self.assertEqual(self.options.processes, self.processes)
self.assertIsNone(self.options.pool)
def test_get_processes_small(self):
"""Tests get_processes() with a small num."""
num = 1
self.assertEqual(self.options.get_processes(num), num)
def test_get_processes_large(self):
"""Tests get_processes() with a large num."""
self.assertEqual(self.options.get_processes(2 * self.processes),
self.processes)
def test_invalid_processes(self):
"""Tests with invalid processes since it's not positive."""
with self.assertRaises(ValueError):
LinearQubitOperatorOptions(0)
def test_get_pool(self):
"""Tests get_pool() without a num."""
self.assertIsNone(self.options.pool)
pool = self.options.get_pool()
self.assertIsNotNone(pool)
def test_get_pool_with_num(self):
"""Tests get_processes() with a num."""
self.assertIsNone(self.options.pool)
pool = self.options.get_pool(2)
self.assertIsNotNone(pool)
class LinearQubitOperatorTest(unittest.TestCase):
"""Tests for LinearQubitOperator class."""
def test_init(self):
"""Tests __init__()."""
qubit_operator = QubitOperator('Z2')
n_qubits = 3
linear_operator = LinearQubitOperator(qubit_operator)
self.assertEqual(linear_operator.qubit_operator, qubit_operator)
self.assertEqual(linear_operator.n_qubits, n_qubits)
# Checks type.
self.assertTrue(
isinstance(linear_operator, scipy.sparse.linalg.LinearOperator))
def test_matvec_wrong_n(self):
"""Testing with wrong n_qubits."""
with self.assertRaises(ValueError):
LinearQubitOperator(QubitOperator('X3'), 1)
def test_matvec_wrong_vec_length(self):
"""Testing with wrong vector length."""
with self.assertRaises(ValueError):
_ = LinearQubitOperator(QubitOperator('X3')) * numpy.zeros(4)
def test_matvec_0(self):
"""Testing with zero term."""
qubit_operator = QubitOperator.zero()
vec = numpy.array([1, 2, 3, 4, 5, 6, 7, 8])
matvec_expected = numpy.zeros(vec.shape)
self.assertTrue(
numpy.allclose(
LinearQubitOperator(qubit_operator, 3) * vec, matvec_expected))
def test_matvec_x(self):
"""Testing product with X."""
vec = numpy.array([1, 2, 3, 4])
matvec_expected = numpy.array([2, 1, 4, 3])
self.assertTrue(
numpy.allclose(
LinearQubitOperator(QubitOperator('X1')) * vec,
matvec_expected))
def test_matvec_y(self):
"""Testing product with Y."""
vec = numpy.array([1, 2, 3, 4], dtype=complex)
matvec_expected = 1.0j * numpy.array([-2, 1, -4, 3], dtype=complex)
self.assertTrue(
numpy.allclose(
LinearQubitOperator(QubitOperator('Y1')) * vec,
matvec_expected))
def test_matvec_z(self):
"""Testing product with Z."""
vec = numpy.array([1, 2, 3, 4])
matvec_expected = numpy.array([1, 2, -3, -4])
self.assertTrue(
numpy.allclose(
LinearQubitOperator(QubitOperator('Z0'), 2) * vec,
matvec_expected))
def test_matvec_z3(self):
"""Testing product with Z^n."""
vec = numpy.array(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
matvec_expected = numpy.array(
[1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16])
self.assertTrue(
numpy.allclose(
LinearQubitOperator(QubitOperator('Z3')) * vec,
matvec_expected))
def test_matvec_zx(self):
"""Testing with multiple factors."""
vec = numpy.array([1, 2, 3, 4])
matvec_expected = numpy.array([2, 1, -4, -3])
self.assertTrue(
numpy.allclose(
LinearQubitOperator(QubitOperator('Z0 X1')) * vec,
matvec_expected))
def test_matvec_multiple_terms(self):
"""Testing with multiple terms."""
qubit_operator = (QubitOperator.identity() + 2 * QubitOperator('Y2') +
QubitOperator(((0, 'Z'), (1, 'X')), 10.0))
vec = numpy.array([1, 2, 3, 4, 5, 6, 7, 8])
matvec_expected = (10 * numpy.array([3, 4, 1, 2, -7, -8, -5, -6]) +
2j * numpy.array([-2, 1, -4, 3, -6, 5, -8, 7]) + vec)
self.assertTrue(
numpy.allclose(
LinearQubitOperator(qubit_operator) * vec, matvec_expected))
def test_matvec_compare(self):
"""Compare LinearQubitOperator with qubit_operator_sparse."""
qubit_operator = QubitOperator('X0 Y1 Z3')
mat_expected = qubit_operator_sparse(qubit_operator)
self.assertTrue(
numpy.allclose(
numpy.transpose(
numpy.array([
LinearQubitOperator(qubit_operator) * v
for v in numpy.identity(16)
])), mat_expected.A))
class ParallelLinearQubitOperatorTest(unittest.TestCase):
"""Tests for ParallelLinearQubitOperator class."""
def setUp(self):
"""ParallelLinearQubitOperator test set up."""
self.qubit_operator = (QubitOperator('Z3') + QubitOperator('Y0') +
QubitOperator('X1'))
self.n_qubits = 4
self.linear_operator = ParallelLinearQubitOperator(self.qubit_operator)
# Vectors for calculations.
self.vec = numpy.array(range(2**self.n_qubits))
expected_matvec = numpy.array([
0,
-1,
2,
-3,
4,
-5,
6,
-7,
8,
-9,
10,
-11,
12,
-13,
14,
-15,
])
expected_matvec = expected_matvec + numpy.array([
-8j,
-9j,
-10j,
-11j,
-12j,
-13j,
-14j,
-15j,
0j,
1j,
2j,
3j,
4j,
5j,
6j,
7j,
])
expected_matvec += numpy.array([
4,
5,
6,
7,
0,
1,
2,
3,
12,
13,
14,
15,
8,
9,
10,
11,
])
self.expected_matvec = expected_matvec
def test_init(self):
"""Tests __init__()."""
self.assertEqual(self.linear_operator.qubit_operator,
self.qubit_operator)
self.assertEqual(self.linear_operator.n_qubits, self.n_qubits)
self.assertIsNone(self.linear_operator.options.pool)
cpu_count = multiprocessing.cpu_count()
default_processes = min(cpu_count, 10)
self.assertEqual(self.linear_operator.options.processes,
default_processes)
# Generated variables.
self.assertEqual(len(self.linear_operator.qubit_operator_groups),
min(multiprocessing.cpu_count(), 3))
self.assertEqual(
QubitOperator.accumulate(
self.linear_operator.qubit_operator_groups),
self.qubit_operator)
for linear_operator in self.linear_operator.linear_operators:
self.assertEqual(linear_operator.n_qubits, self.n_qubits)
self.assertTrue(isinstance(linear_operator, LinearQubitOperator))
# Checks type.
self.assertTrue(
isinstance(self.linear_operator,
scipy.sparse.linalg.LinearOperator))
def test_matvec(self):
"""Tests _matvec() for matrix multiplication with a vector."""
self.assertIsNone(self.linear_operator.options.pool)
self.assertTrue(
numpy.allclose(self.linear_operator * self.vec,
self.expected_matvec))
def test_matvec_0(self):
"""Testing with zero term."""
qubit_operator = QubitOperator.zero()
vec = numpy.array([1, 2, 3, 4, 5, 6, 7, 8])
matvec_expected = numpy.zeros(vec.shape)
self.assertTrue(
numpy.allclose(
ParallelLinearQubitOperator(qubit_operator, 3) * vec,
matvec_expected))
self.assertIsNone(self.linear_operator.options.pool)
def test_closed_workers_not_reused(self):
qubit_operator = QubitOperator('X0')
parallel_qubit_op = ParallelLinearQubitOperator(
qubit_operator, 1, options=LinearQubitOperatorOptions(processes=2))
state = [1.0, 0.0]
parallel_qubit_op.dot(state)
parallel_qubit_op.dot(state)
self.assertIsNone(parallel_qubit_op.options.pool)
class UtilityFunctionTest(unittest.TestCase):
"""Tests for utility functions."""
def test_apply_operator(self):
"""Tests apply_operator() since it's executed on other processors."""
vec = numpy.array([1, 2, 3, 4])
matvec_expected = numpy.array([2, 1, 4, 3])
self.assertTrue(
numpy.allclose(
apply_operator((LinearQubitOperator(QubitOperator('X1')), vec)),
matvec_expected))
def test_generate_linear_operator(self):
"""Tests generate_linear_qubit_operator()."""
qubit_operator = (QubitOperator('Z3') + QubitOperator('X1') +
QubitOperator('Y0'))
n_qubits = 6
# Checks types.
operator = generate_linear_qubit_operator(qubit_operator, n_qubits)
self.assertTrue(isinstance(operator, LinearQubitOperator))
self.assertFalse(isinstance(operator, ParallelLinearQubitOperator))
operator_again = generate_linear_qubit_operator(
qubit_operator, n_qubits, options=LinearQubitOperatorOptions(2))
self.assertTrue(isinstance(operator_again, ParallelLinearQubitOperator))
self.assertFalse(isinstance(operator_again, LinearQubitOperator))
# Checks operators are equivalent.
numpy.random.seed(n_qubits)
vec = numpy.random.rand(2**n_qubits, 1)
self.assertTrue(numpy.allclose(operator * vec, operator_again * vec))
| |
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import user
import key
from boto import handler
import xml.sax
class CompleteMultiPartUpload(object):
"""
Represents a completed MultiPart Upload. Contains the
following useful attributes:
* location - The URI of the completed upload
* bucket_name - The name of the bucket in which the upload
is contained
* key_name - The name of the new, completed key
* etag - The MD5 hash of the completed, combined upload
"""
def __init__(self, bucket=None):
self.bucket = bucket
self.location = None
self.bucket_name = None
self.key_name = None
self.etag = None
def __repr__(self):
return '<CompleteMultiPartUpload: %s.%s>' % (self.bucket_name,
self.key_name)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Location':
self.location = value
elif name == 'Bucket':
self.bucket_name = value
elif name == 'Key':
self.key_name = value
elif name == 'ETag':
self.etag = value
else:
setattr(self, name, value)
class Part(object):
"""
Represents a single part in a MultiPart upload.
Attributes include:
* part_number - The integer part number
* last_modified - The last modified date of this part
* etag - The MD5 hash of this part
* size - The size, in bytes, of this part
"""
def __init__(self, bucket=None):
self.bucket = bucket
self.part_number = None
self.last_modified = None
self.etag = None
self.size = None
def __repr__(self):
if isinstance(self.part_number, int):
return '<Part %d>' % self.part_number
else:
return '<Part %s>' % None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'PartNumber':
self.part_number = int(value)
elif name == 'LastModified':
self.last_modified = value
elif name == 'ETag':
self.etag = value
elif name == 'Size':
self.size = int(value)
else:
setattr(self, name, value)
def part_lister(mpupload, part_number_marker=None):
"""
A generator function for listing parts of a multipart upload.
"""
more_results = True
part = None
while more_results:
parts = mpupload.get_all_parts(None, part_number_marker)
for part in parts:
yield part
part_number_marker = mpupload.next_part_number_marker
more_results= mpupload.is_truncated
class MultiPartUpload(object):
"""
Represents a MultiPart Upload operation.
"""
def __init__(self, bucket=None):
self.bucket = bucket
self.bucket_name = None
self.key_name = None
self.id = id
self.initiator = None
self.owner = None
self.storage_class = None
self.initiated = None
self.part_number_marker = None
self.next_part_number_marker = None
self.max_parts = None
self.is_truncated = False
self._parts = None
def __repr__(self):
return '<MultiPartUpload %s>' % self.key_name
def __iter__(self):
return part_lister(self)
def to_xml(self):
s = '<CompleteMultipartUpload>\n'
for part in self:
s += ' <Part>\n'
s += ' <PartNumber>%d</PartNumber>\n' % part.part_number
s += ' <ETag>%s</ETag>\n' % part.etag
s += ' </Part>\n'
s += '</CompleteMultipartUpload>'
return s
def startElement(self, name, attrs, connection):
if name == 'Initiator':
self.initiator = user.User(self)
return self.initiator
elif name == 'Owner':
self.owner = user.User(self)
return self.owner
elif name == 'Part':
part = Part(self.bucket)
self._parts.append(part)
return part
return None
def endElement(self, name, value, connection):
if name == 'Bucket':
self.bucket_name = value
elif name == 'Key':
self.key_name = value
elif name == 'UploadId':
self.id = value
elif name == 'StorageClass':
self.storage_class = value
elif name == 'PartNumberMarker':
self.part_number_marker = value
elif name == 'NextPartNumberMarker':
self.next_part_number_marker = value
elif name == 'MaxParts':
self.max_parts = int(value)
elif name == 'IsTruncated':
if value == 'true':
self.is_truncated = True
else:
self.is_truncated = False
else:
setattr(self, name, value)
def get_all_parts(self, max_parts=None, part_number_marker=None):
"""
Return the uploaded parts of this MultiPart Upload. This is
a lower-level method that requires you to manually page through
results. To simplify this process, you can just use the
object itself as an iterator and it will automatically handle
all of the paging with S3.
"""
self._parts = []
query_args = 'uploadId=%s' % self.id
if max_parts:
query_args += '&max-parts=%d' % max_parts
if part_number_marker:
query_args += '&part-number-marker=%s' % part_number_marker
response = self.bucket.connection.make_request('GET', self.bucket.name,
self.key_name,
query_args=query_args)
body = response.read()
if response.status == 200:
h = handler.XmlHandler(self, self)
xml.sax.parseString(body, h)
return self._parts
def upload_part_from_file(self, fp, part_num, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
size=None):
"""
Upload another part of this MultiPart Upload.
:type fp: file
:param fp: The file object you want to upload.
:type part_num: int
:param part_num: The number of this part.
The other parameters are exactly as defined for the
:class:`boto.s3.key.Key` set_contents_from_file method.
"""
if part_num < 1:
raise ValueError('Part numbers must be greater than zero')
query_args = 'uploadId=%s&partNumber=%d' % (self.id, part_num)
key = self.bucket.new_key(self.key_name)
key.set_contents_from_file(fp, headers, replace, cb, num_cb, policy,
md5, reduced_redundancy=False,
query_args=query_args, size=size)
def copy_part_from_key(self, src_bucket_name, src_key_name, part_num,
start=None, end=None):
"""
Copy another part of this MultiPart Upload.
:type src_bucket_name: string
:param src_bucket_name: Name of the bucket containing the source key
:type src_key_name: string
:param src_key_name: Name of the source key
:type part_num: int
:param part_num: The number of this part.
:type start: int
:param start: Zero-based byte offset to start copying from
:type end: int
:param end: Zero-based byte offset to copy to
"""
if part_num < 1:
raise ValueError('Part numbers must be greater than zero')
query_args = 'uploadId=%s&partNumber=%d' % (self.id, part_num)
if start is not None and end is not None:
rng = 'bytes=%s-%s' % (start, end)
provider = self.bucket.connection.provider
headers = {provider.copy_source_range_header: rng}
else:
headers = None
return self.bucket.copy_key(self.key_name, src_bucket_name,
src_key_name, storage_class=None,
headers=headers,
query_args=query_args)
def complete_upload(self):
"""
Complete the MultiPart Upload operation. This method should
be called when all parts of the file have been successfully
uploaded to S3.
:rtype: :class:`boto.s3.multipart.CompletedMultiPartUpload`
:returns: An object representing the completed upload.
"""
xml = self.to_xml()
return self.bucket.complete_multipart_upload(self.key_name,
self.id, xml)
def cancel_upload(self):
"""
Cancels a MultiPart Upload operation. The storage consumed by
any previously uploaded parts will be freed. However, if any
part uploads are currently in progress, those part uploads
might or might not succeed. As a result, it might be necessary
to abort a given multipart upload multiple times in order to
completely free all storage consumed by all parts.
"""
self.bucket.cancel_multipart_upload(self.key_name, self.id)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for scan ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def numpy_reverse(x, axis):
length = len(x.shape)
if axis < 0:
axis = length + axis
ix = [
slice(None, None, -1) if i == axis else slice(None) for i in range(length)
]
return x[ix]
def handle_options(func, x, axis, exclusive, reverse):
"""Adds tf options to numpy scan ops."""
length = len(x.shape)
if axis < 0:
axis = length + axis
if reverse:
x = numpy_reverse(x, axis)
if exclusive:
ix_head = [slice(0, 1) if i == axis else slice(None) for i in range(length)]
ix_init = [
slice(0, -1) if i == axis else slice(None) for i in range(length)
]
if func == np.cumsum:
init = np.zeros_like(x[ix_head])
elif func == np.cumprod:
init = np.ones_like(x[ix_head])
else:
raise ValueError("Unknown scan function.")
x = np.concatenate([init, func(x[ix_init], axis)], axis=axis)
else:
x = func(x, axis=axis)
if reverse:
x = numpy_reverse(x, axis)
return x
class CumsumTest(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.int32]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
tf_out = math_ops.cumsum(p, axis, exclusive, reverse).eval(
feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumsum(p, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 10).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 20).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumsum(input_tensor, [0]).eval()
class CumprodTest(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.int32]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
prod = math_ops.cumprod(p, axis, exclusive, reverse)
tf_out = prod.eval(feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumprod(x, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 11).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 21).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumprod(input_tensor, [0]).eval()
if __name__ == "__main__":
test.main()
| |
from django.shortcuts import render
from appCerebro.models import Proyecto, Ejercicio, Variable, Usuario, Realizacion, Datos
from django.views.decorators.csrf import csrf_exempt
import json
from django.utils import simplejson
from datetime import datetime
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render_to_response
from django.contrib import auth
import copy
#COSAS A PEDIR:
#anadirProyecto:
#Problema con el select: como hago para coger el valor seleccionado????
#cambiando para meter mas de un desarrollador
#vistaProyecto:
#scrol en las tablas (si el scroll se puede disimular cuando no haga falta mejor)
#lo del despliegue de las Tablas
#modificarProyecto:
#anadir nuevos atributos
#anadir propiedades en atributos existentes
#cambiar nombre de atributos
#cambiar nombre de propiedades
#eliminar atributos
#eliminar propiedades
#eliminarProyecto:
#procesamiento de Datos:
import os
#import pymongo
import pymongolab
#from pymongo import MongoClient
from pymongolab import MongoClient
from django.conf import settings
#client=pymongo.MongoClient("mongodb://root:evida0@ds037581.mongolab.com:37581/cerebromongo")
client=MongoClient("tQTYL-n7i4oG6QsWO3bua4GRt_CYtX53")
db=client['cerebromongo']
# Create your views here.
projectCol = db["proyectos1"]
projectCollection=db["proyectos"]
userCol= db["usuarios"]
@csrf_exempt
def home(request):
username=request.POST.get("username","")
password=request.POST.get("pass","")
user=auth.authenticate(username=username,password=password) #si no funciona en vez de nick usar "username"
if user is not None and user.is_active:
auth.login(request,user)
#Hacer la distincion de grupos:
#Si es desarrollador le envio a X pagina
#Si es medico le envio a Y pagina
grupo1=user.groups.filter(name="desarrolladores").exists()
grupo2=user.groups.filter(name="doctores").exists()
grupo3=user.groups.filter(name="pacientes").exists()
if grupo1:
return HttpResponseRedirect('/tecnico/')
elif grupo2:
return HttpResponseRedirect('/doctor/')
elif grupo3:
return HttpResponseRedirect('/paciente/')
#else:
#Manejo el error de login
return render_to_response('index2.html')
#------------------------------------------------------------------------------------
#Limitar el acceso de los grupos. Creo que tengo que crear en cada grupo
#permisos del estilo "is_developer" y despues utilizar @permission_required('is_developer')
#(esto de @permission_required esta en la documentacion de la autenticacion de Django por el final)
@csrf_exempt
def vistaTecnico(request):
proyectos = list(projectCollection.find())
longitud=len(proyectos)
listaProyectos=[]
listaParticipantes=[]
listaParticipantesPrueba=[]
context={}
listaUsuarios=[]
listaDesarrolladores=[]
listaDoctores=[]
for p in range(0,longitud):
proy=proyectos[p]
#print proy
proyecto = {"nombre": proy['nombreProyecto']}
listaProyectos.append(proyecto)
cargadoParticipantes(proy,'desarrollador',listaParticipantesPrueba,listaDesarrolladores)
context.update({"proyectos": listaProyectos})
context.update({"participantes":listaParticipantesPrueba})
#print "El context despues"
#print context
return render(request, 'vistaTecnico.html', context)
def cargadoParticipantes(proy,tipo,listaParticipantesPrueba,lista):
#FALLA CON LOS DESARROLLADORES DUPLICADOS!!!
if tipo=='usuario':
clave='usuarios'
claveDict='user'
elif tipo=='desarrollador':
clave='nombreDesarrolladores'
claveDict='dev'
elif tipo=='doctor':
clave='doctores'
claveDict='doc'
numBucle=len(proy[clave])
for i in range(0,numBucle):
encontrado=False
#participante={"user"+str(len(listaParticipantes)+1): proy['usuarios'][i]}
participante={claveDict: proy[clave][i]}
for a in range(0,len(lista)):
nombreLista=lista[a]
#if nombreLista['user'+str(a+1)]==participante['user'+str(len(listaParticipantes)+1)]:
if nombreLista[claveDict]==participante[claveDict]:
encontrado=True
if encontrado==False:
lista.append(participante)
listaParticipantesPrueba.append(participante)
@csrf_exempt
def vistaDoctor(request):
return render_to_response('vistaDoctor.html')
@csrf_exempt
def vistaPaciente(request):
return render_to_rasponse('vistaPaciente.htmA')
@csrf_exempt
def vistaAnadirProyecto11(request):
proyecto={}
nombreProyecto=str(request.POST.get("nombreProyecto",""))
proyecto.update({'nombreProyecto':nombreProyecto})
nombreDesarrolladores=str(request.POST.get("nombreDesarrolladores",""))
#print nombreDesarrolladores
listaDesarrolladores=nombreDesarrolladores.split(",")#
#print listaDesarrolladores
proyecto.update({'nombreDesarrolladores':listaDesarrolladores})#
#listaAtributos=[] #Posicion original de esa variable
#print proyecto
descripcion=str(request.POST.get("descripcionProyecto",""))#no meter enies en el campo
proyecto.update({'descripcion':descripcion})
listaAtributos=[]
numAtributos=request.POST.get("numeroAtributos","0")
if numAtributos=="":
numAtributos=0
print "numeroAtributos: "+str(numAtributos)
numAtributos=int(numAtributos)
numPropiedades=request.POST.get("numeroPropiedades","0,")
numPropiedades=numPropiedades.split(",")
if len(numPropiedades)!=numAtributos:
dif=numAtributos-len(numPropiedades)
for z in range(0,dif):
numPropiedades.insert(numAtributos-dif+z,"0")
print "tras las modificaciones"
print numPropiedades
print len(numPropiedades)
if len(numPropiedades)==1:
if numPropiedades[0]=="":
print "dentro!!!"
numPropiedades[0]="0"
print numPropiedades
for i in range(0,numAtributos):
x=i+1
atributo={}
listaPropiedades=[]
nombreAtributo=str(request.POST.get("nombreAtributo"+str(x),""))
atributo.update({'nombreAtributo':nombreAtributo})
propiedades=int(numPropiedades[i])
for a in range(0,propiedades):
y=a+1
propiedad={}
nombrePropiedad=str(request.POST.get("nombreAtributo"+str(x)+"Propiedad"+str(y),""))
propiedad.update({'nombrePropiedad':nombrePropiedad})
tipoPropiedad=str(request.POST.get("tipoAtributo"+str(x)+"Propiedad"+str(y),""))
propiedad.update({'tipoPropiedad':tipoPropiedad})
valorPropiedad=str(request.POST.get("valorAtributo"+str(x)+"Propiedad"+str(y),""))
propiedad.update({'valorPropiedad':valorPropiedad})
listaPropiedades.append(propiedad)
atributo.update({'propiedades':listaPropiedades})
listaAtributos.append(atributo)
#este update deberia de hacerse despues de tener todos los atributos en la lista
proyecto.update({'atributos':listaAtributos})
print proyecto
#HARIA EL INSERT
if nombreProyecto!="":
projectCollection.insert(proyecto)#comprobar que funciona
#else:
#AVISAR DE QUE TIENE QUE METER EL NOMBRE
#EN PRUEBAS (las siguientes 2 lineas)
if request.method=='POST':
return HttpResponseRedirect('/tecnico/')
return render_to_response('anadirProyecto11.html')
#Siguiente paso: vista en detalle de X proyecto
#Siguiente paso: forma de modificar los datos desde la pagina
#cuando termine de REGISTRAR o MODIFICAR algun proyecto, el submit redirige
#a la misma URL (para que se ejecute el mismo metodo) pero en vez de
#return render(request,'modificarProyecto.html',context) deberia de ser un
#Httpredirect(/tecnico/) o algo asi (y ya si pongo un mensaje de "se ha actualizado" la hostia)
#como pongo los datos? creo unas tablas cuyo tamanio dependa de los datos u otra cosa?
#como agrego usuarios a un proyecto? (el de jon tiene pacientes ya metidos)
#les pongo un "usuarios" y segun el nombre busque "documents" que se correspondan?
#en el caso del robot de leire, los datos de los usuarios no los almaceno en el docs de usuarios
#como hago el envio/recepcion de datos?
#hago una URL en el url.py para que llame a X metodo? y luego ahi cojo su JSON y hago
#lo que necesite y por ultimo envio las mods al MONGOLAB
#eliminar seria tan sencillo como eliminar el docs del proyecto? o tendria que tener en cuenta algo mas?
#ej: si en "usuario" hay un campo con los nombres de los proyectos en los que esta metido
#tendria que recorrer ese campo y eliminar el nombre que coincida con el nombre del proyecto para eliminar
#eso de generar informes...graficas????
#@login_required es para el log sea requerido (como dice zopenco)
#en el caso de la API csrf_exempt (en los de mas no)
#RELACIONES: meter en el tipo??? (similar)
#
@csrf_exempt
def vistaProyecto(request,nombreProyecto):
print "nombre del proyecto"
print nombreProyecto
proyectos = list(projectCollection.find())
longitud=len(proyectos)
#cojo los proyectos
# parseo hasta encontrar aquel que coincide con el nombre
#una vez encontrado hago las cosas nazis
for p in range(0,longitud):
proy=proyectos[p]
nombreP=proy['nombreProyecto']
if nombreP==nombreProyecto:
context=proy
print "dddddddddddddddddd"
print context
#busco longitud de desarrolladores
#hago un for y los meto en una lista listaDesarrolladores=[]
#meto la lista en context
#descripcion=proy['descripcion']
#lo meto en el context
#busco longitud de atributos
#hago un for
#busco el nombre de los atributos
return render(request,'vistaProyecto.html',context)
def eliminarProyecto(request):
#tendria que recibir 2 parametros, el request y el NOMBRE PROYECTO(variable con el nombre de nombreP)
#projectCollection.remove({'nombreProyecto':nombreP}) #asi elimino el proyecto pero y los usuarios???
return HttpResponseRedirect('/tecnico/')
#tras el borrado se redirigira a la vistaTecnico que NO debe conservar lo borrado
@csrf_exempt
def modificarProyecto(request, nombreProyecto):
#modificacion={"$set":cambios}
#print "modificacion"
#print modificacion
#http://blog.pythonisito.com/2012/01/moving-along-with-pymongo.html
context={}
clave={"nombreProyecto":str(nombreProyecto)}
proyectos = list(projectCollection.find())
longitud=len(proyectos)
#------------------------------------------------------------------------------------------------
#para rellenar las casillas
for p in range(0,longitud):
proy=proyectos[p]
nombreP=proy['nombreProyecto']
if nombreP==nombreProyecto:
context=proy
#------------------------------------------------------------------------------------------------
#cojo las casillas comunes
#cambios=context
cambios={}
cambios=copy.deepcopy(context)
print "El original"
print cambios
listaAtributos=[]
nProy=str(request.POST.get("nombreProyecto",""))
cambios.update({"nombreProyecto":nProy})
nombreDesarrolladores=str(request.POST.get("nombreDesarrolladores",""))
listaDesarrolladores=nombreDesarrolladores.split(",")#
#cambios.update({"nombreDesarrolladores":nombreDesarrolladores})
descripcion=str(request.POST.get("descripcionProyecto",""))#OJO! esta vacio de momento!!!
cambios.update({"descripcion":descripcion})
#--------------------------------------------------------
#Sin Modificacion
numAtributos=request.POST.get("numeroAtributos","0")
if numAtributos=="":
numAtributos=0
numAtributos=int(numAtributos)
numPropiedades=request.POST.get("numeroPropiedades","0,")
numPropiedades=numPropiedades.split(",")
if len(numPropiedades)!=numAtributos:
dif=numAtributos-len(numPropiedades)
for z in range(0,dif):
numPropiedades.insert(numAtributos-dif+z,"0")
if len(numPropiedades)==1:
if numPropiedades[0]=="":
numPropiedades[0]="0"
#--------------------------------------------------------
#Con Modificacion
mNumAtributos=request.POST.get("mNumeroAtributos","0")
if mNumAtributos=="":
mNumAtributos=0
mNumAtributos=int(mNumAtributos)
#cojo las casillas de los atributos ANADIDOS
#------------------------------------------------------------------------------------------------
for i in range(0,numAtributos):
x=i+1
atributo={}
listaPropiedades=[]
nombreAtributo=str(request.POST.get("nombreAtributo"+str(x),""))
cambios["atributos"].append({'nombreAtributo':nombreAtributo})
propiedades=int(numPropiedades[i])
for a in range(0,propiedades):
print "dentro del for"
y=a+1
propiedad={}
nombrePropiedad=str(request.POST.get("nombreAtributo"+str(x)+"Propiedad"+str(y),""))
propiedad.update({'nombrePropiedad':nombrePropiedad})
tipoPropiedad=str(request.POST.get("tipoAtributo"+str(x)+"Propiedad"+str(y),""))
propiedad.update({'tipoPropiedad':tipoPropiedad})
#-----------------------------------
valorPropiedad=str(request.POST.get("valorAtributo"+str(x)+"Propiedad"+str(y),""))
propiedad.update({'valorPropiedad':valorPropiedad})
listaPropiedades.append(propiedad)
#-----------------------------------
for l in cambios["atributos"]:
if l["nombreAtributo"]==nombreAtributo:
#print "OOOOOOOOOOO"
#print l
l.update({'propiedades':listaPropiedades})
#cambios["atributos"][nombreAtributo].update({'propiedades':listaPropiedades})
#atributo.update({'propiedades':listaPropiedades})
#listaAtributos.append(atributo)
#este update deberia de hacerse despues de tener todos los atributos en la lista
#cambios.update({'atributos':listaAtributos})
#------------------------------------------------------------------------------------------------
#cojo las casillas de los atributos MODIFICADOS
#------------------------------------------------------------------------------------------------
for i in range(0,mNumAtributos):
x=i+1
atributo={}
listaPropiedades=[]
nombreAtributo=str(request.POST.get("mNombreAtributo"+str(x),""))
filtrado=str(request.POST.get("mLabelNombreAtributo"+str(x),""))
'''
print "SDLKFJASDLKFJSADLKFJSDLKFJSD"
print "mLabelNombreAtributo"+str(x)
print filtrado
'''
print "TTTTTTTR"
print nombreAtributo
for q in cambios["atributos"]:
if q["nombreAtributo"]==filtrado:
q["nombreAtributo"]=nombreAtributo
mNumPropiedades=request.POST.get("mNumeroPropiedades","0")
mNumPropiedades=mNumPropiedades.split(",")
mPropiedades=int(mNumPropiedades[i])
print "NUMERO PROP"
print mPropiedades
print
y=1
#for a in q["propiedades"]:
for a in range(0,mPropiedades):
propiedad={}
nombrePropiedad=str(request.POST.get("mNombreAtributo"+str(x)+"Propiedad"+str(y),""))
tipoPropiedad=str(request.POST.get("mTipoAtributo"+str(x)+"Propiedad"+str(y),""))
#a["nombrePropiedad"]=nombrePropiedad
#a["tipoPropiedad"]=tipoPropiedad
print "EEEEEEE"
propiedad.update({"nombrePropiedad":nombrePropiedad})
propiedad.update({"tipoPropiedad":tipoPropiedad})
propiedad.update({"valorPropiedad":""})
q["propiedades"].append(propiedad)
print "RETWEQRWE"
print propiedad
y+=1
print
print
print "cambios AL FINAL"
print cambios
print
print
#------------------------------------------------------------------------------------------------
modificacion={"$set":cambios}
#print modificacion
if nProy!="":
print "el update"
projectCollection.update(clave,modificacion)
if request.method=='POST':
return HttpResponseRedirect('/tecnico/')
return render(request,'modificarProyecto.html',context)
#------------------------------------------------------------------------------------
def api(request):
if request.method == 'GET':
return HttpResponse("{ 'method': 'GET' }")
elif request.method == 'POST':
print request.body
data = json.loads(request.body)
response = {}
response['method'] = "POST"
#Comprobar campo proyecto
try:
data['proyecto']
except KeyError:
response['error'] = "Falta el nombre del proyecto"
return HttpResponse(simplejson.dumps(response))
try:
pr = Proyecto.objects.get(nombre = data['proyecto'])
except Proyecto.DoesNotExist:
#response['error'] = "No existe el proyecto"
#return HttpResponse(simplejson.dumps(response))
pr = Proyecto.objects.create(nombre = data['proyecto'])
#Comprobar campo ejercicio
try:
data['ejercicio']
except KeyError:
response['error'] = "Falta el nombre del ejercicio"
return HttpResponse(simplejson.dumps(response))
try:
ej = Ejercicio.objects.get(nombre = data['ejercicio'], proyecto = pr)
except Ejercicio.DoesNotExist:
#response['error'] = "No existe el ejercicio para el proyecto " + pr.nombre
#return HttpResponse(simplejson.dumps(response))
ej = Ejercicio.objects.create(nombre=data['ejercicio'], proyecto = pr)
#aqui tengo que haber creado por cojones variables ya
#Comprobar las variables del ejercicio
#for v in ej.variables.all():
# try:
# data[v.nombre]
print "algo"
# except KeyError:
#response['error'] = "Falta la variable " + v.nombre
#return HttpResponse(simplejson.dumps(response))
print "voy a crear el ejercicio"
var = Variable.objects.create(nombre='cosa', tipo=1)
var2 = Variable.objects.create(nombre='asoc', tipo=1)
ej.variables.add(var)
ej.save()
#Comprobar el campo de nivel
try:
data['nivel']
if data['nivel'] == '':
raise KeyError
try:
int(data['nivel'])
except ValueError:
raise KeyError
except KeyError:
response['error'] = "Falta indicar el nivel del ejercicio"
return HttpResponse(simplejson.dumps(response))
#Comprobar el campo de jugador
try:
data['usuario']
if data['usuario'] == '':
raise KeyError
except KeyError:
response['error'] = "Falta el nombre del jugador"
return HttpResponse(simplejson.dumps(response))
#Crear jugador si no existe
try:
u = Usuario.objects.get(nombre = data['usuario'])
except Usuario.DoesNotExist:
#response['error'] = "No existe el jugador con nombre " + ju.nombre
u = Usuario.objects.create(nombre = data['usuario'])
u.proyectos.add(pr)
u.save()
#Crear partida con la fecha actual y crear las puntuaciones para cada variable
realizacion = Realizacion.objects.create(usuario = u, ejercicio = ej, fecha = datetime.now())
print "fuera del for"
for v in ej.variables.all():
print "dentro del for"
print v.nombre
datos = Datos.objects.create(realizacion = realizacion, variable = v, nivel = int(data["nivel"]), valor = data[v.nombre])
print "dentro del for2"
response['upload'] = 'OK'
return HttpResponse(simplejson.dumps(response))
elif request.method == 'DELETE':
return HttpResponse("{ 'method': 'DELETE' }")
elif request.method == 'PUT':
return HttpResponse("{ 'method': 'PUT' }")
def proyectos(request):
pr=Proyecto.objects.all()
context = {"pr": pr} #meto todos los proyectos en una variable pr
return render(request, 'proyectos.html', context)
# if request.method == 'POST':
# return redirect('ejercicios', proyecto = request.POST.get("pr", ""))
def ejercicios(request):
ej0=Ejercicio.objects.all()
context = {"ej": ej0} #meto todos los proyectos en una variable pr
return render(request, 'ejercicios.html', context)
def datos(request, ejercicio):
if ejercicio:
ej = Ejercicio.objects.get(pk = ejercicio)
partidas = Partida.objects.filter(ejercicio = ej)
arrayDatos = []
for p in partidas:
ps = Datos.objects.filter(partida = p).select_related()
for p1 in ps:
arrayDatos.append(p1)
context = {"arrayDatos": arrayDatos, "ejercicio": ej, "variables":ej.variables.all()}
return render(request, 'datos.html', context)
| |
# -*- coding: utf-8 -*-
"""
Tools to manipulate Neo objects.
.. autosummary::
:toctree: _toctree/neo_tools
extract_neo_attributes
get_all_spiketrains
get_all_events
get_all_epochs
:copyright: Copyright 2014-2020 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
import warnings
from itertools import chain
from neo.core.container import unique_objs
from elephant.utils import deprecated_alias
__all__ = [
"extract_neo_attributes",
"get_all_spiketrains",
"get_all_events",
"get_all_epochs"
]
@deprecated_alias(obj='neo_object')
def extract_neo_attributes(neo_object, parents=True, child_first=True,
skip_array=False, skip_none=False):
"""
Given a Neo object, return a dictionary of attributes and annotations.
Parameters
----------
neo_object : neo.BaseNeo
Object to get attributes and annotations.
parents : bool, optional
If True, also include attributes and annotations from parent Neo
objects (if any).
Default: True
child_first : bool, optional
If True, values of child attributes are used over parent attributes in
the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Default: True
skip_array : bool, optional
If True, skip attributes that store non-scalar array values.
Default: False
skip_none : bool, optional
If True, skip annotations and attributes that have a value of None.
Default: False
Returns
-------
dict
A dictionary where the keys are annotations or attribute names and
the values are the corresponding annotation or attribute value.
"""
attrs = neo_object.annotations.copy()
if not skip_array and hasattr(neo_object, "array_annotations"):
# Exclude labels and durations, and any other fields that should not
# be a part of array_annotation.
required_keys = set(neo_object.array_annotations).difference(
dir(neo_object))
for a in required_keys:
if "array_annotations" not in attrs:
attrs["array_annotations"] = {}
attrs["array_annotations"][a] = \
neo_object.array_annotations[a].copy()
for attr in neo_object._necessary_attrs + neo_object._recommended_attrs:
if skip_array and len(attr) >= 3 and attr[2]:
continue
attr = attr[0]
if attr == getattr(neo_object, '_quantity_attr', None):
continue
attrs[attr] = getattr(neo_object, attr, None)
if skip_none:
for attr, value in attrs.copy().items():
if value is None:
del attrs[attr]
if not parents:
return attrs
for parent in getattr(neo_object, 'parents', []):
if parent is None:
continue
newattr = extract_neo_attributes(parent, parents=True,
child_first=child_first,
skip_array=skip_array,
skip_none=skip_none)
if child_first:
newattr.update(attrs)
attrs = newattr
else:
attrs.update(newattr)
return attrs
def extract_neo_attrs(*args, **kwargs):
warnings.warn("'extract_neo_attrs' function is deprecated; "
"use 'extract_neo_attributes'", DeprecationWarning)
return extract_neo_attributes(*args, **kwargs)
def _get_all_objs(container, class_name):
"""
Get all Neo objects of a given type from a container.
The objects can be any list, dict, or other iterable or mapping containing
Neo objects of a particular class, as well as any Neo object that can hold
the object.
Objects are searched recursively, so the objects can be nested (such as a
list of blocks).
Parameters
----------
container : list, tuple, iterable, dict, neo.Container
The container for the Neo objects.
class_name : str
The name of the class, with proper capitalization
(i.e., 'SpikeTrain', not 'Spiketrain' or 'spiketrain').
Returns
-------
list
A list of unique Neo objects.
Raises
------
ValueError
If can not handle containers of the type passed in `container`.
"""
if container.__class__.__name__ == class_name:
return [container]
classholder = class_name.lower() + 's'
if hasattr(container, classholder):
vals = getattr(container, classholder)
elif hasattr(container, 'list_children_by_class'):
vals = container.list_children_by_class(class_name)
elif hasattr(container, 'values') and not hasattr(container, 'ndim'):
vals = container.values()
elif hasattr(container, '__iter__') and not hasattr(container, 'ndim'):
vals = container
else:
raise ValueError('Cannot handle object of type %s' % type(container))
res = list(chain.from_iterable(_get_all_objs(obj, class_name)
for obj in vals))
return unique_objs(res)
def get_all_spiketrains(container):
"""
Get all `neo.Spiketrain` objects from a container.
The objects can be any list, dict, or other iterable or mapping containing
spiketrains, as well as any Neo object that can hold spiketrains:
`neo.Block`, `neo.ChannelIndex`, `neo.Unit`, and `neo.Segment`.
Containers are searched recursively, so the objects can be nested
(such as a list of blocks).
Parameters
----------
container : list, tuple, iterable, dict, neo.Block, neo.Segment, neo.Unit,
neo.ChannelIndex
The container for the spiketrains.
Returns
-------
list
A list of the unique `neo.SpikeTrain` objects in `container`.
"""
return _get_all_objs(container, 'SpikeTrain')
def get_all_events(container):
"""
Get all `neo.Event` objects from a container.
The objects can be any list, dict, or other iterable or mapping containing
events, as well as any neo object that can hold events:
`neo.Block` and `neo.Segment`.
Containers are searched recursively, so the objects can be nested
(such as a list of blocks).
Parameters
----------
container : list, tuple, iterable, dict, neo.Block, neo.Segment
The container for the events.
Returns
-------
list
A list of the unique `neo.Event` objects in `container`.
"""
return _get_all_objs(container, 'Event')
def get_all_epochs(container):
"""
Get all `neo.Epoch` objects from a container.
The objects can be any list, dict, or other iterable or mapping containing
epochs, as well as any neo object that can hold epochs:
`neo.Block` and `neo.Segment`.
Containers are searched recursively, so the objects can be nested
(such as a list of blocks).
Parameters
----------
container : list, tuple, iterable, dict, neo.Block, neo.Segment
The container for the epochs.
Returns
-------
list
A list of the unique `neo.Epoch` objects in `container`.
"""
return _get_all_objs(container, 'Epoch')
| |
"""
Author: Yotam Gingold <yotam (strudel) yotamgingold.com>
License: Public Domain [CC0](http://creativecommons.org/publicdomain/zero/1.0/)
Let the energy E = | G*x - U |^2_M
where:
x is a row-by-col image flattened into a vector with row-by-col entries.
G is the gradient operator,
U are the target gradients (possibly U = G*y for some known function y),
and M is the diagonal mass matrix which is the norm of each derivative in
the summation.
Then E = (G*x - U).T * M * (G*x - U) = x.T*G.T*M*G*x + U.T*M*U -
2*x.T*G.T*M*U
Let L = G.T*M*G.
Note that G has a row for each row and column derivative in the image.
Let S be a matrix that zeros gradients resulting from the application of G
(e.g. G*y).
Formally, let S be an identity matrix whose dimensions are the same as G has
rows, modified to have entries corresponding to the derivatives involving some
grid positions to be zero. Note that S = S*S and S.T = S and S*M = M*S.
Let U = S*G*y for some known other image y, and S has zeros for values in y we
don't trust.
Then
E = x.T*L*x + y.T*G.T*S.T*M*S*G*y - 2*x.T*G.T*M*S*G*y
= x.T*L*x + y.T*G.T*S*M*S*G*y - 2*x.T*G.T*S*M*S*G*y
= x.T*L*x + y.T*L'*y - 2*x.T*L'*y
where L' = G.T*S*M*S*G = G.T*M*S*G = G.T*S*M*G.
The derivative of 1/2 E with respect to x is:
0.5 * dE/dx = L*x - L'*y
so the minimizing x could be found by solving
L*x = L'*y
The G, M, and S in the above description are exactly what is returned by
grad_and_mass() in this module.
mask should be True for every grid location we care about (want a solution to).
skip should be True for every grid location we have a known good value for.
"""
from __future__ import print_function, division
import collections
import numpy
from numpy import *
from scipy import sparse
from util import QuadEnergy, print_progress, rowcol_to_index
def grad_and_mass(rows, cols, mask = None, skip = None):
"""
Returns a gradient operator matrix G for a grid of dimensions
'rows' by 'cols',
a corresponding mass matrix M such that L = G.T*M*G is a
Laplacian operator matrix that is symmetric and normalized such that the
diagonal values of interior vertices equal 1,
and a skip matrix S such that the gradient entries of S*G*x are zero for
(i,j) such that skip[i,j] is False. If skip is None, all entries are
assumed to be True and S will be the identity matrix.
Optional parameter `mask` will result in a gradient operator that entirely
ignores (i,j) such that mask[i,j] is False.
In other words, `mask` should be True for every grid location
you care about (want a solution to via L = G.T*M*G).
`skip` should be True for every grid location you have a known good value
for.
Matrices returned are scipy.sparse matrices.
"""
print_progress(0)
assert rows > 0 and cols > 0
if mask is not None:
mask = asarray(mask, dtype = bool)
assert mask.shape == (rows, cols)
if skip is not None:
skip = asarray(skip, dtype = bool)
assert skip.shape == (rows, cols)
# The number of derivatives in the +row direction is cols * (rows - 1),
# because the bottom row doesn't have them.
num_Grow = cols * (rows - 1)
# The number of derivatives in the +col direction is rows * (cols - 1),
# because the right-most column doesn't have them.
num_Gcol = rows * (cols - 1)
# Gradient matrix
gOnes = numpy.ones(num_Grow + num_Gcol)
vals = numpy.append(-gOnes, gOnes)
del gOnes
gColRange = numpy.arange(rows * cols)
gColRange = gColRange[~(gColRange % cols == (cols - 1))]
colJ = numpy.concatenate([
numpy.arange(num_Grow),
gColRange,
numpy.arange(cols, num_Grow + cols),
gColRange + 1])
del gColRange
# Skip matrix
if(skip is not None):
S_diag = numpy.append(
skip[:-1] & skip[1:], skip[:, :-1] & skip[:, 1:]).astype(int)
else:
S_diag = numpy.ones(num_Grow + num_Gcol)
# Mass diagonal matrix
if(mask is not None):
m = numpy.zeros((rows - 1, cols))
m[:, 1:][mask[:-1, :-1] & mask[1:, :-1]] += 0.125
m[:, :-1][mask[:-1, 1:] & mask[1:, 1:]] += 0.125
mass = m.flatten()
m = numpy.zeros((rows, cols - 1))
m[1:][mask[:-1, :-1] & mask[:-1, 1:]] += 0.125
m[:-1][mask[1:, :-1] & mask[1:, 1:]] += 0.125
mass = numpy.append(mass, m)
else:
m = numpy.hstack([numpy.full((rows - 1, 1), 0.125),
numpy.full((rows - 1, cols - 2), 0.25),
numpy.full((rows - 1, 1), 0.125)])
mass = m.flatten()
m = numpy.vstack([numpy.full((1, cols - 1), 0.125),
numpy.full((rows - 2, cols - 1), 0.25),
numpy.full((1, cols - 1), 0.125)])
mass = numpy.append(mass, m.flatten())
del m
output_row = num_Grow + num_Gcol
if(mask is not None):
keep_rows = numpy.append(numpy.logical_and(mask[:-1], mask[1:]),
numpy.logical_and(mask[:, :-1], mask[:, 1:]))
tiled_keep_rows = numpy.tile(keep_rows, 2)
vals = vals[tiled_keep_rows]
colJ = colJ[tiled_keep_rows]
S_diag = S_diag[keep_rows]
mass = mass[keep_rows]
output_row = numpy.count_nonzero(keep_rows)
# rowI is dependent on the number of output rows.
rowI = numpy.tile(numpy.arange(output_row), 2)
G = sparse.coo_matrix((vals, (rowI, colJ)),
shape=(output_row, rows * cols))
assert G.shape == (output_row, rows * cols)
M = coo_diag(mass)
assert M.shape == (output_row, output_row)
S = coo_diag(S_diag)
assert S.shape == (output_row, output_row)
print_progress(1.0)
print()
return G, M, S
def gen_symmetric_grid_laplacian(rows, cols, mask = None):
"""
Returns a Laplacian operator matrix for a grid of dimensions 'rows' by
'cols'. The matrix is symmetric and normalized such that the diagonal
values of interior vertices equal 1.
Matrices returned are scipy.sparse matrices.
"""
assert rows > 0 and cols > 0
G, M, S = grad_and_mass(rows, cols, mask)
return G.T * M * G
def dirichlet_energy(rows, cols, y, mask = None, skip = None):
"""
Builds the quadratic energy for the Dirichlet.
E = x.T*L*x + y.T*G.T*S.T*M*S*G*y - 2*x.T*G.T*M*S*G*y
= x.T*L*x + y.T*G.T*S*M*S*G*y - 2*x.T*G.T*S*M*S*G*y
= x.T*L*x + y.T*L'*y - 2*x.T*L'*y
L = G.T*M*G
L' = G.T*S*M*G
Inputs:
(rows, cols) - deminsions of texture/2D grid
y - Vector of original value (y above).
mask - Ignore values cooresponding to False
skip - True for good prexisting values
Output:
A QuadEnergy object containing the values for the quadratic, linear,
and constant terms.
"""
assert rows > 0 and cols > 0
G, M, S = grad_and_mass(rows, cols, mask, skip)
G, M, S = G.tocsc(), M.tocsc(), S.tocsc()
L = (G.T.dot(M.dot(G)))
Lp = G.T.dot(S.dot(M.dot(G)))
return QuadEnergy(L, -sparse.csc_matrix(Lp.dot(y)),
sparse.csc_matrix(y.T.dot(Lp.dot(y))))
def coo_diag(vals):
try:
indices = arange(vals.shape[0])
except Exception:
indices = arange(len(vals))
return sparse.coo_matrix((vals, (indices, indices)))
def test_against_dirichlet():
print('=== test_against_dirichlet() ===')
import dirichlet_old
for rows, cols in [(2, 2), (2, 3), (3, 2), (30, 2), (2, 30), (3, 3),
(30, 30)]:
print(rows, 'rows by', cols, 'cols:', (
abs(dirichlet_old.gen_symmetric_grid_laplacian2(rows, cols) -
gen_symmetric_grid_laplacian(rows, cols))).sum())
def test_mask():
print('=== test_mask() ===')
shape = (5, 5)
mask = ones(shape, dtype = bool)
mask[2, 2] = False
G, M, S = grad_and_mass(shape[0], shape[1], mask = mask)
L = G.T * M * G
# set_printoptions(linewidth = 200)
# print(8*L.toarray())
# Print the weight of each grid vertex.
# The weight should be the valence (number of neighbors)/4.
# Multiply by 4 to get whole numbers
print(4 * L.diagonal().reshape(shape))
if __name__ == '__main__':
# test_against_dirichlet()
# test_mask()
sizes = [(4, 4, 1), (10, 10, 1), (100, 100, 1), (1000, 1000, 1)]
for size in sizes:
print("Texture Size: %s" % (size,))
width, height, depth = size
N = width * height
import numpy
diriTex = numpy.linspace(0, 1, width)
diriTex = numpy.tile(numpy.repeat(diriTex, depth).reshape(
(1, width, depth)), (width, 1, 1))
diriTex = diriTex.reshape(N, -1)
inTex = numpy.zeros((N, depth))
# import pdb; pdb.set_trace()
coeff = dirichlet_energy(height, width, inTex)
from seam_erasure import display_quadratic_energy
display_quadratic_energy(coeff, inTex, diriTex, "Dirichlet")
| |
from .._abstract.abstract import BaseSecurityHandler, BaseAGSServer
from ..security.security import AGSTokenSecurityHandler
import json, types
########################################################################
class MobileServiceLayer(BaseAGSServer):
"""
Represents a single mobile service layer
"""
_url = None
_proxy_url = None
_proxy_port = None
_securityHandler = None
_json = None
_json_dict = None
_display = None
_drawingInfo = None
_extent = None
_canModifyLayer = None
_advancedQueryCapabilities = None
_hasLabels = None
_supportsAdvancedQueries = None
_id = None
_currentVersion = None
_geometryType = None
_ownershipBasedAccessControlForFeatures = None
_type = None
_useStandardizedQueries = None
_supportedQueryFormats = None
_maxRecordCount = None
_description = None
_defaultVisibility = None
_typeIdField = None
_displayField = None
_name = None
_supportsStatistics = None
_hasAttachments = None
_fields = None
_maxScale = None
_copyrightText = None
_canScaleSymbols = None
_minScale = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler=None,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
self._url = url
self._securityHandler = securityHandler
if self._securityHandler is not None:
self._referer_url = self._securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" inializes the properties """
params = {
"f" : "json",
}
json_dict = self._do_get(self._url, params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json_dict = json_dict
self._json = json.dumps(self._json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, v)
else:
print k, " - attribute not implemented for Mobile Service Layer."
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""
returns key/value pair
"""
attributes = json.loads(str(self))
for att in attributes.keys():
yield (att, getattr(self, att))
#----------------------------------------------------------------------
@property
def drawingInfo(self):
"""gets the services drawing information"""
if self._drawingInfo is None:
self.__init()
return self._drawingInfo
#----------------------------------------------------------------------
@property
def extent(self):
"""returns the service layer extent"""
if self._extent is None:
self.__init()
return self._extent
#----------------------------------------------------------------------
@property
def canModifyLayer(self):
"""returns value for can modify layer"""
if self._canModifyLayer is None:
self.__init()
return self._canModifyLayer
#----------------------------------------------------------------------
@property
def advancedQueryCapabilities(self):
"""gets the advancedQueryCapabilities value"""
if self._advancedQueryCapabilities is None:
self.__init()
return self._advancedQueryCapabilities
#----------------------------------------------------------------------
@property
def hasLabels(self):
"""returns the has labels value"""
if self._hasLabels is None:
self.__init()
return self._hasLabels
#----------------------------------------------------------------------
@property
def supportsAdvancedQueries(self):
"""returns the supportsAdvancedQueries value"""
if self._supportsAdvancedQueries is None:
self.__init()
return self._supportsAdvancedQueries
#----------------------------------------------------------------------
@property
def id(self):
"""returns the layers' id"""
if self._id is None:
self.__init()
return self._id
#----------------------------------------------------------------------
@property
def currentVersion(self):
"""gets the layers current version"""
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def geometryType(self):
"""retusn the layers geometry type"""
if self._geometryType is None:
self.__init()
return self._geometryType
#----------------------------------------------------------------------
@property
def ownershipBasedAccessControlForFeatures(self):
"""returns the ownershipBasedAccessControlForFeatures value"""
if self._ownershipBasedAccessControlForFeatures is None:
self.__init()
return self._ownershipBasedAccessControlForFeatures
#----------------------------------------------------------------------
@property
def type(self):
"""gets the layer type"""
if self._type is None:
self.__init()
return self._type
#----------------------------------------------------------------------
@property
def useStandardizedQueries(self):
"""gets the useStandardizedQueries value"""
if self._useStandardizedQueries is None:
self.__init()
return self._useStandardizedQueries
#----------------------------------------------------------------------
@property
def hasAttachments(self):
"""returns if the layer has attachments enabled"""
if self._hasAttachments is None:
self.__init()
return self._hasAttachments
#----------------------------------------------------------------------
@property
def supportedQueryFormats(self):
"""returns the supportedQueryFormats value"""
if self._supportedQueryFormats is None:
self.__init()
return self._supportedQueryFormats
#----------------------------------------------------------------------
@property
def maxRecordCount(self):
"""returns the max record count"""
if self._maxRecordCount is None:
self.__init()
return self._maxRecordCount
#----------------------------------------------------------------------
@property
def description(self):
"""returns the service layer description"""
if self._description is None:
self.__init()
return self._description
#----------------------------------------------------------------------
@property
def defaultVisibility(self):
"""returns the defaultVisibility value"""
if self._defaultVisibility is None:
self.__init()
return self._defaultVisibility
#----------------------------------------------------------------------
@property
def typeIdField(self):
"""returns the type id field"""
if self._typeIdField is None:
self.__init()
return self._typeIdField
#----------------------------------------------------------------------
@property
def displayField(self):
"""returns the display field"""
if self._displayField is None:
self.__init()
return self._display
#----------------------------------------------------------------------
@property
def name(self):
"""returns the layers name"""
if self._name is None:
self.__init()
return self._name
#----------------------------------------------------------------------
@property
def supportsStatistics(self):
"""returns the supports statistics value"""
if self._supportsStatistics is None:
self.__init()
return self._supportsStatistics
#----------------------------------------------------------------------
@property
def fields(self):
"""gets the fields for the layer"""
if self._fields is None:
self.__init()
return self._fields
#----------------------------------------------------------------------
@property
def copyrightText(self):
"""gets the copy right text"""
if self._copyrightText is None:
self.__init()
return self._copyrightText
#----------------------------------------------------------------------
@property
def canScaleSymbols(self):
"""returns the can scale symbols value"""
if self._canScaleSymbols is None:
self.__init()
return self._canScaleSymbols
#----------------------------------------------------------------------
@property
def minScale(self):
"""returns the minScale value"""
if self._minScale is None:
self.__init()
return self._minScale
#----------------------------------------------------------------------
@property
def maxScale(self):
"""gets the max scale for the layer"""
if self._maxScale is None:
self.__init()
return self._maxScale
########################################################################
class MobileService(BaseAGSServer):
"""
Represents a single globe layer
"""
_url = None
_proxy_url = None
_proxy_port = None
_securityHandler = None
_json = None
_json_dict = None
_layers = None
_description = None
_initialExtent = None
_spatialReference = None
_mapName = None
_currentVersion = None
_units = None
_fullExtent = None
_serviceDescription = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler=None,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
self._url = url
self._securityHandler = securityHandler
if self._securityHandler is not None:
self._referer_url = self._securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" inializes the properties """
params = {
"f" : "json",
}
json_dict = self._do_get(self._url, params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json_dict = json_dict
self._json = json.dumps(self._json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, v)
else:
print k, " - attribute not implemented for Mobile Service."
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""
returns key/value pair
"""
attributes = json.loads(str(self))
for att in attributes.keys():
yield (att, getattr(self, att))
#----------------------------------------------------------------------
@property
def layers(self):
"""gets the service layers"""
if self._layers is None:
self.__init()
lyrs = []
for lyr in self._layers:
url = self._url + "/%s" % lyr['id']
lyr['object'] = MobileServiceLayer(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=False)
return self._layers
#----------------------------------------------------------------------
@property
def description(self):
"""gets the service description"""
if self._description is None:
self.__init()
return self._description
#----------------------------------------------------------------------
@property
def initialExtent(self):
"""gets the service initial extent"""
if self._initialExtent is None:
self.__init()
return self._initialExtent
#----------------------------------------------------------------------
@property
def spatialReference(self):
"""gets the spatial reference"""
if self._spatialReference is None:
self.__init()
return self._spatialReference
#----------------------------------------------------------------------
@property
def mapName(self):
"""gets the map name"""
if self._mapName is None:
self._mapName
return self._mapName
#----------------------------------------------------------------------
@property
def currentVersion(self):
"""gets the current version"""
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def units(self):
"""gets the units for the service"""
if self._units is None:
self.__init()
return self._units
#----------------------------------------------------------------------
@property
def fullExtent(self):
"""returns the service full extent"""
if self._fullExtent is None:
self.__init()
return self._fullExtent
#----------------------------------------------------------------------
@property
def serviceDescription(self):
"""returns the service description"""
if self._serviceDescription is None:
self.__init()
return self._serviceDescription
| |
import json
import re
from django.contrib.gis.geos import Point
from rest_framework import serializers
from rest_framework import viewsets
from rest_framework.settings import api_settings
from rest_framework_jsonp.renderers import JSONPRenderer
from django_filters import rest_framework as filters
from froide.helper.api_utils import OpenRefineReconciliationMixin
from .models import GeoRegion
GERMAN_PLZ_RE = re.compile(r"\d{5}")
class GeoRegionSerializer(serializers.HyperlinkedModelSerializer):
resource_uri = serializers.HyperlinkedIdentityField(
view_name="api:georegion-detail", lookup_field="pk"
)
part_of = serializers.HyperlinkedRelatedField(
view_name="api:georegion-detail", lookup_field="pk", read_only=True, many=False
)
centroid = serializers.SerializerMethodField()
class Meta:
model = GeoRegion
depth = 0
fields = (
"resource_uri",
"id",
"name",
"slug",
"kind",
"kind_detail",
"level",
"region_identifier",
"global_identifier",
"area",
"population",
"valid_on",
"part_of",
"centroid",
)
def get_centroid(self, obj):
if obj.geom is not None:
return json.loads(obj.geom.centroid.json)
return None
class GeoRegionDetailSerializer(GeoRegionSerializer):
geom = serializers.SerializerMethodField()
gov_seat = serializers.SerializerMethodField()
class Meta(GeoRegionSerializer.Meta):
fields = GeoRegionSerializer.Meta.fields + (
"geom",
"gov_seat",
"centroid",
)
def get_geom(self, obj):
if obj.geom is not None:
return json.loads(obj.geom.json)
return None
def get_gov_seat(self, obj):
if obj.gov_seat is not None:
return json.loads(obj.gov_seat.json)
return None
class GeoRegionFilter(filters.FilterSet):
id = filters.CharFilter(method="id_filter")
q = filters.CharFilter(method="search_filter")
kind = filters.CharFilter(method="kind_filter")
level = filters.NumberFilter(method="level_filter")
ancestor = filters.ModelChoiceFilter(
method="ancestor_filter", queryset=GeoRegion.objects.all()
)
latlng = filters.CharFilter(method="latlng_filter")
name = filters.CharFilter(method="name_filter")
class Meta:
model = GeoRegion
fields = ("name", "level", "kind", "slug")
def name_filter(self, queryset, name, value):
qs = queryset.filter(name=value)
if not qs:
return queryset.filter(name=value.capitalize())
return qs
def search_filter(self, queryset, name, value):
return queryset.filter(name__icontains=value)
def kind_filter(self, queryset, name, value):
return queryset.filter(kind=value)
def level_filter(self, queryset, name, value):
return queryset.filter(level=value)
def id_filter(self, queryset, name, value):
ids = value.split(",")
return queryset.filter(pk__in=ids)
def ancestor_filter(self, queryset, name, value):
descendants = value.get_descendants()
return queryset.filter(id__in=descendants)
def latlng_filter(self, queryset, name, value):
try:
parts = value.split(",", 1)
lat, lng = float(parts[0]), float(parts[1])
return queryset.filter(geom__covers=Point(lng, lat))
except (ValueError, IndexError):
pass
return queryset
class GeoRegionViewSet(OpenRefineReconciliationMixin, viewsets.ReadOnlyModelViewSet):
serializer_action_classes = {
"list": GeoRegionSerializer,
"retrieve": GeoRegionDetailSerializer,
}
queryset = GeoRegion.objects.all()
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = GeoRegionFilter
# OpenRefine needs JSONP responses
# This is OK because authentication is not considered
renderer_classes = tuple(api_settings.DEFAULT_RENDERER_CLASSES) + (JSONPRenderer,)
class RECONCILIATION_META:
name = "GeoRegion"
id = "georegion"
model = GeoRegion
api_list = "api:georegion-list"
obj_short_link = None
filters = ["kind", "level"]
properties = [
{
"id": "population",
"name": "population",
},
{
"id": "area",
"name": "area",
},
{
"id": "geom",
"name": "geom",
},
{"id": "name", "name": "Name"},
{"id": "id", "name": "ID"},
{"id": "slug", "name": "Slug"},
{"id": "kind", "name": "Kind"},
{"id": "region_identifier", "name": "Region identifier"},
{"id": "global_identifier", "name": "Global identifier"},
]
properties_dict = {p["id"]: p for p in properties}
def get_serializer_class(self):
if self.request.user.is_superuser:
return GeoRegionDetailSerializer
try:
return self.serializer_action_classes[self.action]
except (KeyError, AttributeError):
return GeoRegionSerializer
def _search_reconciliation_results(self, query, filters, limit):
qs = GeoRegion.objects.all()
for key, val in filters.items():
qs = qs.filter(**{key: val})
# FIXME: Special German case
match = GERMAN_PLZ_RE.match(query)
zip_region = None
if match:
try:
zip_region = GeoRegion.objects.get(name=query, kind="zipcode")
qs = qs.filter(geom__covers=zip_region.geom.centroid)
except GeoRegion.DoesNotExist:
pass
if not match or not zip_region:
qs = qs.filter(name__contains=query)[:limit]
for r in qs:
yield {
"id": str(r.pk),
"name": r.name,
"type": ["georegion"],
"score": 4,
"match": True, # FIXME: this is quite arbitrary
}
| |
from __future__ import absolute_import
from django.conf import settings
from django.contrib.auth import authenticate, login, get_backends
from django.contrib.auth.views import login as django_login_page, \
logout_then_login as django_logout_then_login
from django.core.urlresolvers import reverse
from zerver.decorator import authenticated_json_post_view, require_post
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect, \
HttpResponseNotFound
from django.middleware.csrf import get_token
from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from django.utils.translation import ugettext as _
from django.core import signing
from six.moves import urllib
from typing import Any, Dict, Optional, Tuple, Text
from confirmation.models import Confirmation
from zerver.forms import HomepageForm, OurAuthenticationForm, \
WRONG_SUBDOMAIN_ERROR
from zerver.lib.request import REQ, has_request_variables, JsonableError
from zerver.lib.response import json_success, json_error
from zerver.lib.utils import get_subdomain, is_subdomain_root_or_alias
from zerver.models import PreregistrationUser, UserProfile, remote_user_to_email, Realm
from zerver.views.registration import create_preregistration_user, get_realm_from_request, \
redirect_and_log_into_subdomain
from zproject.backends import password_auth_enabled, dev_auth_enabled, google_auth_enabled
from zproject.jinja2 import render_to_response
from version import ZULIP_VERSION
import hashlib
import hmac
import jwt
import logging
import requests
import time
import ujson
def maybe_send_to_registration(request, email, full_name=''):
# type: (HttpRequest, Text, Text) -> HttpResponse
form = HomepageForm({'email': email}, realm=get_realm_from_request(request))
request.verified_email = None
if form.is_valid():
# Construct a PreregistrationUser object and send the user over to
# the confirmation view.
prereg_user = None
if settings.ONLY_SSO:
try:
prereg_user = PreregistrationUser.objects.filter(email__iexact=email).latest("invited_at")
except PreregistrationUser.DoesNotExist:
prereg_user = create_preregistration_user(email, request)
else:
prereg_user = create_preregistration_user(email, request)
return redirect("".join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
"/",
# Split this so we only get the part after the /
Confirmation.objects.get_link_for_object(prereg_user).split("/", 3)[3],
'?full_name=',
# urllib does not handle Unicode, so coerece to encoded byte string
# Explanation: http://stackoverflow.com/a/5605354/90777
urllib.parse.quote_plus(full_name.encode('utf8')))))
else:
url = reverse('register')
return render_to_response('zerver/accounts_home.html',
{'form': form, 'current_url': lambda: url},
request=request)
def redirect_to_subdomain_login_url():
# type: () -> HttpResponseRedirect
login_url = reverse('django.contrib.auth.views.login')
redirect_url = login_url + '?subdomain=1'
return HttpResponseRedirect(redirect_url)
def login_or_register_remote_user(request, remote_username, user_profile, full_name='',
invalid_subdomain=False):
# type: (HttpRequest, Text, UserProfile, Text, Optional[bool]) -> HttpResponse
if invalid_subdomain:
# Show login page with an error message
return redirect_to_subdomain_login_url()
elif user_profile is None or user_profile.is_mirror_dummy:
# Since execution has reached here, the client specified a remote user
# but no associated user account exists. Send them over to the
# PreregistrationUser flow.
return maybe_send_to_registration(request, remote_user_to_email(remote_username), full_name)
else:
login(request, user_profile)
if settings.REALMS_HAVE_SUBDOMAINS and user_profile.realm.subdomain is not None:
return HttpResponseRedirect(user_profile.realm.uri)
return HttpResponseRedirect("%s%s" % (settings.EXTERNAL_URI_SCHEME,
request.get_host()))
def remote_user_sso(request):
# type: (HttpRequest) -> HttpResponse
try:
remote_user = request.META["REMOTE_USER"]
except KeyError:
raise JsonableError(_("No REMOTE_USER set."))
user_profile = authenticate(remote_user=remote_user, realm_subdomain=get_subdomain(request))
return login_or_register_remote_user(request, remote_user, user_profile)
@csrf_exempt
def remote_user_jwt(request):
# type: (HttpRequest) -> HttpResponse
subdomain = get_subdomain(request)
try:
auth_key = settings.JWT_AUTH_KEYS[subdomain]
except KeyError:
raise JsonableError(_("Auth key for this subdomain not found."))
try:
json_web_token = request.POST["json_web_token"]
options = {'verify_signature': True}
payload = jwt.decode(json_web_token, auth_key, options=options)
except KeyError:
raise JsonableError(_("No JSON web token passed in request"))
except jwt.InvalidTokenError:
raise JsonableError(_("Bad JSON web token"))
remote_user = payload.get("user", None)
if remote_user is None:
raise JsonableError(_("No user specified in JSON web token claims"))
realm = payload.get('realm', None)
if realm is None:
raise JsonableError(_("No realm specified in JSON web token claims"))
email = "%s@%s" % (remote_user, realm)
try:
# We do all the authentication we need here (otherwise we'd have to
# duplicate work), but we need to call authenticate with some backend so
# that the request.backend attribute gets set.
return_data = {} # type: Dict[str, bool]
user_profile = authenticate(username=email,
realm_subdomain=subdomain,
return_data=return_data,
use_dummy_backend=True)
if return_data.get('invalid_subdomain'):
logging.warning("User attempted to JWT login to wrong subdomain %s: %s" % (subdomain, email,))
raise JsonableError(_("Wrong subdomain"))
except UserProfile.DoesNotExist:
user_profile = None
return login_or_register_remote_user(request, email, user_profile, remote_user)
def google_oauth2_csrf(request, value):
# type: (HttpRequest, str) -> HttpResponse
# In Django 1.10, get_token returns a salted token which changes
# everytime get_token is called.
try:
from django.middleware.csrf import _unsalt_cipher_token
token = _unsalt_cipher_token(get_token(request))
except ImportError:
token = get_token(request)
return hmac.new(token.encode('utf-8'), value.encode("utf-8"), hashlib.sha256).hexdigest()
def start_google_oauth2(request):
# type: (HttpRequest) -> HttpResponse
url = reverse('zerver.views.auth.send_oauth_request_to_google')
return redirect_to_main_site(request, url)
def redirect_to_main_site(request, url):
# type: (HttpRequest, Text) -> HttpResponse
main_site_uri = ''.join((
settings.EXTERNAL_URI_SCHEME,
settings.EXTERNAL_HOST,
url,
))
params = {'subdomain': get_subdomain(request)}
return redirect(main_site_uri + '?' + urllib.parse.urlencode(params))
def start_social_login(request, backend):
# type: (HttpRequest, Text) -> HttpResponse
backend_url = reverse('social:begin', args=[backend])
return redirect_to_main_site(request, backend_url)
def send_oauth_request_to_google(request):
# type: (HttpRequest) -> HttpResponse
subdomain = request.GET.get('subdomain', '')
if settings.REALMS_HAVE_SUBDOMAINS:
if not subdomain or not Realm.objects.filter(string_id=subdomain).exists():
return redirect_to_subdomain_login_url()
google_uri = 'https://accounts.google.com/o/oauth2/auth?'
cur_time = str(int(time.time()))
csrf_state = '{}:{}:{}'.format(
cur_time,
google_oauth2_csrf(request, cur_time + subdomain),
subdomain
)
prams = {
'response_type': 'code',
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'redirect_uri': ''.join((
settings.EXTERNAL_URI_SCHEME,
settings.EXTERNAL_HOST,
reverse('zerver.views.auth.finish_google_oauth2'),
)),
'scope': 'profile email',
'state': csrf_state,
}
return redirect(google_uri + urllib.parse.urlencode(prams))
def finish_google_oauth2(request):
# type: (HttpRequest) -> HttpResponse
error = request.GET.get('error')
if error == 'access_denied':
return redirect('/')
elif error is not None:
logging.warning('Error from google oauth2 login: %s' % (request.GET.get("error"),))
return HttpResponse(status=400)
csrf_state = request.GET.get('state')
if csrf_state is None or len(csrf_state.split(':')) != 3:
logging.warning('Missing Google oauth2 CSRF state')
return HttpResponse(status=400)
value, hmac_value, subdomain = csrf_state.split(':')
if hmac_value != google_oauth2_csrf(request, value + subdomain):
logging.warning('Google oauth2 CSRF error')
return HttpResponse(status=400)
resp = requests.post(
'https://www.googleapis.com/oauth2/v3/token',
data={
'code': request.GET.get('code'),
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'client_secret': settings.GOOGLE_OAUTH2_CLIENT_SECRET,
'redirect_uri': ''.join((
settings.EXTERNAL_URI_SCHEME,
settings.EXTERNAL_HOST,
reverse('zerver.views.auth.finish_google_oauth2'),
)),
'grant_type': 'authorization_code',
},
)
if resp.status_code == 400:
logging.warning('User error converting Google oauth2 login to token: %s' % (resp.text,))
return HttpResponse(status=400)
elif resp.status_code != 200:
logging.error('Could not convert google oauth2 code to access_token: %s' % (resp.text,))
return HttpResponse(status=400)
access_token = resp.json()['access_token']
resp = requests.get(
'https://www.googleapis.com/plus/v1/people/me',
params={'access_token': access_token}
)
if resp.status_code == 400:
logging.warning('Google login failed making info API call: %s' % (resp.text,))
return HttpResponse(status=400)
elif resp.status_code != 200:
logging.error('Google login failed making API call: %s' % (resp.text,))
return HttpResponse(status=400)
body = resp.json()
try:
full_name = body['name']['formatted']
except KeyError:
# Only google+ users have a formated name. I am ignoring i18n here.
full_name = u'{} {}'.format(
body['name']['givenName'], body['name']['familyName']
)
for email in body['emails']:
if email['type'] == 'account':
break
else:
logging.error('Google oauth2 account email not found: %s' % (body,))
return HttpResponse(status=400)
email_address = email['value']
if not subdomain:
# When request was not initiated from subdomain.
user_profile, return_data = authenticate_remote_user(request, email_address)
invalid_subdomain = bool(return_data.get('invalid_subdomain'))
return login_or_register_remote_user(request, email_address, user_profile,
full_name, invalid_subdomain)
try:
realm = Realm.objects.get(string_id=subdomain)
except Realm.DoesNotExist:
return redirect_to_subdomain_login_url()
return redirect_and_log_into_subdomain(realm, full_name, email_address)
def authenticate_remote_user(request, email_address):
# type: (HttpRequest, str) -> Tuple[UserProfile, Dict[str, Any]]
return_data = {} # type: Dict[str, bool]
user_profile = authenticate(username=email_address,
realm_subdomain=get_subdomain(request),
use_dummy_backend=True,
return_data=return_data)
return user_profile, return_data
def log_into_subdomain(request):
# type: (HttpRequest) -> HttpResponse
try:
# Discard state if older than 15 seconds
state = request.get_signed_cookie('subdomain.signature',
salt='zerver.views.auth',
max_age=15)
except KeyError:
logging.warning('Missing subdomain signature cookie.')
return HttpResponse(status=400)
except signing.BadSignature:
logging.warning('Subdomain cookie has bad signature.')
return HttpResponse(status=400)
data = ujson.loads(state)
if data['subdomain'] != get_subdomain(request):
logging.warning('Login attemp on invalid subdomain')
return HttpResponse(status=400)
email_address = data['email']
full_name = data['name']
user_profile, return_data = authenticate_remote_user(request, email_address)
invalid_subdomain = bool(return_data.get('invalid_subdomain'))
return login_or_register_remote_user(request, email_address, user_profile,
full_name, invalid_subdomain)
def get_dev_users(extra_users_count=10):
# type: (int) -> List[UserProfile]
# Development environments usually have only a few users, but
# it still makes sense to limit how many extra users we render to
# support performance testing with DevAuthBackend.
users_query = UserProfile.objects.select_related().filter(is_bot=False, is_active=True)
shakespearian_users = users_query.exclude(email__startswith='extrauser').order_by('email')
extra_users = users_query.filter(email__startswith='extrauser').order_by('email')
# Limit the number of extra users we offer by default
extra_users = extra_users[0:extra_users_count]
users = list(shakespearian_users) + list(extra_users)
return users
def login_page(request, **kwargs):
# type: (HttpRequest, **Any) -> HttpResponse
if request.user.is_authenticated():
return HttpResponseRedirect("/")
if is_subdomain_root_or_alias(request) and settings.REALMS_HAVE_SUBDOMAINS:
redirect_url = reverse('zerver.views.registration.find_my_team')
return HttpResponseRedirect(redirect_url)
extra_context = kwargs.pop('extra_context', {})
if dev_auth_enabled():
users = get_dev_users()
extra_context['direct_admins'] = [u.email for u in users if u.is_realm_admin]
extra_context['direct_users'] = [
u.email for u in users
if not u.is_realm_admin and u.realm.string_id == 'zulip']
extra_context['community_users'] = [
u.email for u in users
if u.realm.string_id != 'zulip']
template_response = django_login_page(
request, authentication_form=OurAuthenticationForm,
extra_context=extra_context, **kwargs)
try:
template_response.context_data['email'] = request.GET['email']
except KeyError:
pass
try:
template_response.context_data['subdomain'] = request.GET['subdomain']
template_response.context_data['wrong_subdomain_error'] = WRONG_SUBDOMAIN_ERROR
except KeyError:
pass
return template_response
def dev_direct_login(request, **kwargs):
# type: (HttpRequest, **Any) -> HttpResponse
# This function allows logging in without a password and should only be called in development environments.
# It may be called if the DevAuthBackend is included in settings.AUTHENTICATION_BACKENDS
if (not dev_auth_enabled()) or settings.PRODUCTION:
# This check is probably not required, since authenticate would fail without an enabled DevAuthBackend.
raise Exception('Direct login not supported.')
email = request.POST['direct_email']
user_profile = authenticate(username=email, realm_subdomain=get_subdomain(request))
if user_profile is None:
raise Exception("User cannot login")
login(request, user_profile)
if settings.REALMS_HAVE_SUBDOMAINS and user_profile.realm.subdomain is not None:
return HttpResponseRedirect(user_profile.realm.uri)
return HttpResponseRedirect("%s%s" % (settings.EXTERNAL_URI_SCHEME,
request.get_host()))
@csrf_exempt
@require_post
@has_request_variables
def api_dev_fetch_api_key(request, username=REQ()):
# type: (HttpRequest, str) -> HttpResponse
"""This function allows logging in without a password on the Zulip
mobile apps when connecting to a Zulip development environment. It
requires DevAuthBackend to be included in settings.AUTHENTICATION_BACKENDS.
"""
if not dev_auth_enabled() or settings.PRODUCTION:
return json_error(_("Dev environment not enabled."))
return_data = {} # type: Dict[str, bool]
user_profile = authenticate(username=username,
realm_subdomain=get_subdomain(request),
return_data=return_data)
if return_data.get("inactive_realm"):
return json_error(_("Your realm has been deactivated."),
data={"reason": "realm deactivated"}, status=403)
if return_data.get("inactive_user"):
return json_error(_("Your account has been disabled."),
data={"reason": "user disable"}, status=403)
login(request, user_profile)
return json_success({"api_key": user_profile.api_key, "email": user_profile.email})
@csrf_exempt
def api_dev_get_emails(request):
# type: (HttpRequest) -> HttpResponse
if not dev_auth_enabled() or settings.PRODUCTION:
return json_error(_("Dev environment not enabled."))
users = get_dev_users()
return json_success(dict(direct_admins=[u.email for u in users if u.is_realm_admin],
direct_users=[u.email for u in users if not u.is_realm_admin]))
@csrf_exempt
@require_post
@has_request_variables
def api_fetch_api_key(request, username=REQ(), password=REQ()):
# type: (HttpRequest, str, str) -> HttpResponse
return_data = {} # type: Dict[str, bool]
if username == "google-oauth2-token":
user_profile = authenticate(google_oauth2_token=password,
realm_subdomain=get_subdomain(request),
return_data=return_data)
else:
user_profile = authenticate(username=username,
password=password,
realm_subdomain=get_subdomain(request),
return_data=return_data)
if return_data.get("inactive_user"):
return json_error(_("Your account has been disabled."),
data={"reason": "user disable"}, status=403)
if return_data.get("inactive_realm"):
return json_error(_("Your realm has been deactivated."),
data={"reason": "realm deactivated"}, status=403)
if return_data.get("password_auth_disabled"):
return json_error(_("Password auth is disabled in your team."),
data={"reason": "password auth disabled"}, status=403)
if user_profile is None:
if return_data.get("valid_attestation"):
# We can leak that the user is unregistered iff they present a valid authentication string for the user.
return json_error(_("This user is not registered; do so from a browser."),
data={"reason": "unregistered"}, status=403)
return json_error(_("Your username or password is incorrect."),
data={"reason": "incorrect_creds"}, status=403)
return json_success({"api_key": user_profile.api_key, "email": user_profile.email})
@csrf_exempt
def api_get_auth_backends(request):
# type: (HttpRequest) -> HttpResponse
# May return a false positive for password auth if it's been disabled
# for a specific realm. Currently only happens for zulip.com on prod
return json_success({"password": password_auth_enabled(None),
"dev": dev_auth_enabled(),
"google": google_auth_enabled(),
"zulip_version": ZULIP_VERSION,
})
@authenticated_json_post_view
@has_request_variables
def json_fetch_api_key(request, user_profile, password=REQ(default='')):
# type: (HttpRequest, UserProfile, str) -> HttpResponse
if password_auth_enabled(user_profile.realm):
if not authenticate(username=user_profile.email, password=password,
realm_subdomain=get_subdomain(request)):
return json_error(_("Your username or password is incorrect."))
return json_success({"api_key": user_profile.api_key})
@csrf_exempt
def api_fetch_google_client_id(request):
# type: (HttpRequest) -> HttpResponse
if not settings.GOOGLE_CLIENT_ID:
return json_error(_("GOOGLE_CLIENT_ID is not configured"), status=400)
return json_success({"google_client_id": settings.GOOGLE_CLIENT_ID})
@require_post
def logout_then_login(request, **kwargs):
# type: (HttpRequest, **Any) -> HttpResponse
return django_logout_then_login(request, kwargs)
| |
from dilap.geometry.vec3 import vec3
from dilap.geometry.pointset import pointset
#import dilap.topology.meshes.trimesh as tmsh
import dilap.topology.trimesh as tmsh
#import dilap.mesh.tools as dtl
import dilap.core.plotting as dtl
import matplotlib.pyplot as plt
import unittest,numpy,math
import pdb
#python3 -m unittest discover -v ./ "*tests.py"
class test_trimesh(unittest.TestCase):
def plotmesh(self):
mesh = self.mesh
ax = dtl.plot_axes()
for f in mesh.faces:
pxs = (mesh.verts[vx][0] for vx in f)
ps = self.pset.gps(pxs)
ax = dtl.plot_polygon(ps,ax)
plt.show()
def avert(self,p):
px = self.pset.ap(p)
nx = self.nset.ap(vec3(0,0,1))
ux = self.uset.ap(vec3(0,0,0))
return self.mesh.avert(px,nx,ux)
def assert_counts(self,vcnt,ecnt,fcnt):
self.assertEqual(self.mesh.vcnt(),vcnt)
self.assertEqual(self.mesh.ecnt(),ecnt)
self.assertEqual(self.mesh.fcnt(),fcnt)
def quad(self):
self.v1 = self.avert(vec3(-1,-1,-1))
self.v2 = self.avert(vec3( 1,-1,-1))
self.v3 = self.avert(vec3( 1, 1,-1))
self.v4 = self.avert(vec3(-1, 1,-1))
self.f1 = self.mesh.aface(self.v1,self.v2,self.v3)
self.f2 = self.mesh.aface(self.v1,self.v3,self.v4)
def test_quad(self):
self.quad()
self.assert_counts(4,6,2)
def cube_symm(self):
self.v1 = self.avert(vec3(-1,-1,-1))
self.v2 = self.avert(vec3( 1,-1,-1))
self.v3 = self.avert(vec3( 1, 1,-1))
self.v4 = self.avert(vec3(-1, 1,-1))
self.v5 = self.avert(vec3(-1,-1, 1))
self.v6 = self.avert(vec3( 1,-1, 1))
self.v7 = self.avert(vec3( 1, 1, 1))
self.v8 = self.avert(vec3(-1, 1, 1))
self.v9 = self.avert(vec3( 0, 0,-1))
self.v10 = self.avert(vec3( 0, 0, 1))
self.v11 = self.avert(vec3(-1, 0, 0))
self.v12 = self.avert(vec3( 1, 0, 0))
self.v13 = self.avert(vec3( 0,-1, 0))
self.v14 = self.avert(vec3( 0, 1, 0))
self.f1 = self.mesh.aface(self.v1,self.v2 ,self.v9)
self.f2 = self.mesh.aface(self.v2,self.v3 ,self.v9)
self.f3 = self.mesh.aface(self.v3,self.v4 ,self.v9)
self.f4 = self.mesh.aface(self.v4,self.v1 ,self.v9)
self.f5 = self.mesh.aface(self.v5,self.v6,self.v10)
self.f6 = self.mesh.aface(self.v6,self.v7,self.v10)
self.f7 = self.mesh.aface(self.v7,self.v8,self.v10)
self.f8 = self.mesh.aface(self.v8,self.v5,self.v10)
self.f9 = self.mesh.aface(self.v1,self.v2,self.v13)
self.f10 = self.mesh.aface(self.v2,self.v6,self.v13)
self.f11 = self.mesh.aface(self.v6,self.v5,self.v13)
self.f12 = self.mesh.aface(self.v5,self.v1,self.v13)
self.f13 = self.mesh.aface(self.v3,self.v4,self.v14)
self.f14 = self.mesh.aface(self.v4,self.v8,self.v14)
self.f15 = self.mesh.aface(self.v8,self.v7,self.v14)
self.f16 = self.mesh.aface(self.v7,self.v3,self.v14)
self.f17 = self.mesh.aface(self.v2,self.v3,self.v12)
self.f18 = self.mesh.aface(self.v3,self.v7,self.v12)
self.f19 = self.mesh.aface(self.v7,self.v6,self.v12)
self.f20 = self.mesh.aface(self.v6,self.v2,self.v12)
self.f21 = self.mesh.aface(self.v4,self.v1,self.v11)
self.f22 = self.mesh.aface(self.v1,self.v5,self.v11)
self.f23 = self.mesh.aface(self.v5,self.v8,self.v11)
self.f24 = self.mesh.aface(self.v8,self.v4,self.v11)
#self.plotmesh()
def cube(self):
self.v1 = self.avert(vec3(-1,-1,-1))
self.v2 = self.avert(vec3( 1,-1,-1))
self.v3 = self.avert(vec3( 1, 1,-1))
self.v4 = self.avert(vec3(-1, 1,-1))
self.v5 = self.avert(vec3(-1,-1, 1))
self.v6 = self.avert(vec3( 1,-1, 1))
self.v7 = self.avert(vec3( 1, 1, 1))
self.v8 = self.avert(vec3(-1, 1, 1))
self.f1 = self.mesh.aface(self.v1,self.v3,self.v2)
self.f2 = self.mesh.aface(self.v1,self.v4,self.v3)
self.f3 = self.mesh.aface(self.v5,self.v6,self.v7)
self.f4 = self.mesh.aface(self.v5,self.v7,self.v8)
self.f5 = self.mesh.aface(self.v1,self.v2,self.v5)
self.f6 = self.mesh.aface(self.v1,self.v5,self.v4)
self.f7 = self.mesh.aface(self.v2,self.v3,self.v6)
self.f8 = self.mesh.aface(self.v2,self.v6,self.v5)
self.f9 = self.mesh.aface(self.v3,self.v4,self.v7)
self.f10 = self.mesh.aface(self.v3,self.v7,self.v6)
self.f11 = self.mesh.aface(self.v4,self.v1,self.v8)
self.f12 = self.mesh.aface(self.v4,self.v8,self.v7)
def test_cube(self):
self.cube()
self.assert_counts(8,36,12)
def test_cube_symm(self):
self.cube_symm()
self.assert_counts(14,72,24)
def setUp(self):
self.mesh = tmsh.trimesh()
self.pset = pointset()
self.nset = pointset()
self.uset = pointset()
def test_init(self):
self.assert_counts(0,0,0)
# NEED A TRULY COMPRREHENSIVE SET OF MASK TESTS...
#def test_mask(self):
# NEED A TRULY COMPRREHENSIVE SET OF MASK TESTS...
def test_mask_v1(self):
self.quad()
v10m = self.mesh.mask(0,self.mesh.verts[self.v1],None,None)
self.assertTrue(self.mesh.verts[self.v2] in v10m)
self.assertTrue(self.mesh.verts[self.v3] in v10m)
self.assertTrue(self.mesh.verts[self.v4] in v10m)
v11m = self.mesh.mask(1,self.mesh.verts[self.v1],None,None)
self.assertTrue((0,1) in v11m)
self.assertTrue((3,0) in v11m)
self.assertTrue((0,2) in v11m)
self.assertTrue((2,0) in v11m)
v12m = self.mesh.mask(2,self.mesh.verts[self.v1],None,None)
self.assertTrue((0,1,2) in v12m)
self.assertTrue((0,2,3) in v12m)
self.assertEqual(len(v10m),3)
self.assertEqual(len(v11m),4)
self.assertEqual(len(v12m),2)
def test_mask_v4(self):
self.quad()
v40m = self.mesh.mask(0,self.mesh.verts[self.v4],None,None)
self.assertTrue(self.mesh.verts[self.v1] in v40m)
self.assertFalse(self.mesh.verts[self.v2] in v40m)
self.assertTrue(self.mesh.verts[self.v3] in v40m)
v41m = self.mesh.mask(1,self.mesh.verts[self.v4],None,None)
self.assertTrue((3,0) in v41m)
self.assertTrue((2,3) in v41m)
v42m = self.mesh.mask(2,self.mesh.verts[self.v4],None,None)
self.assertTrue((0,2,3) in v42m)
self.assertEqual(len(v40m),2)
self.assertEqual(len(v41m),2)
self.assertEqual(len(v42m),1)
#def test_adjc(self):
#def test_vonb(self):
#def test_eonb(self):
#def test_alphan(self):
def test_avert(self):
self.v1 = self.avert(vec3(-1,-1,-1))
self.v2 = self.avert(vec3( 1,-1,-1))
self.v3 = self.avert(vec3( 1, 1,-1))
self.v4 = self.avert(vec3(-1, 1,-1))
self.assert_counts(4,0,0)
#def test_rvert(self):
#def test_aedge(self):
def test_redge(self):
self.quad()
self.mesh.redge((0,1),True)
self.assert_counts(3,3,1)
self.assertTrue((0,2) in self.mesh.edges)
self.assertTrue((2,3) in self.mesh.edges)
self.assertTrue((3,0) in self.mesh.edges)
#def test_sedge(self):
def test_fedge(self):
self.quad()
self.assert_counts(4,6,2)
self.mesh.fedge(0,2)
#call twice to confirm nonexistent edge case
self.mesh.fedge(0,2)
self.assertTrue((1,3) in self.mesh.ef_rings)
self.assertTrue((3,1) in self.mesh.ef_rings)
self.assertFalse((0,2) in self.mesh.ef_rings)
self.assertFalse((2,0) in self.mesh.ef_rings)
self.assertFalse((0,1,2) in self.mesh.fs_mats)
self.assertFalse((0,2,3) in self.mesh.fs_mats)
self.assertTrue((3,0,1) in self.mesh.fs_mats)
self.assertTrue((1,2,3) in self.mesh.fs_mats)
self.assert_counts(4,6,2)
def test_aface(self):
self.v1 = self.avert(vec3(-1,-1,-1))
self.v2 = self.avert(vec3( 1,-1,-1))
self.v3 = self.avert(vec3( 1, 1,-1))
self.v4 = self.avert(vec3(-1, 1,-1))
fx = self.mesh.aface(self.v1,self.v2,self.v3)
self.assert_counts(4,3,1)
def test_rface(self):
self.quad()
self.mesh.rface(self.mesh.faces[self.f1])
self.assert_counts(3,3,1)
self.assertFalse((1,1,1) in self.mesh.verts)
self.assertFalse((0,1) in self.mesh.edges)
self.assertFalse((1,2) in self.mesh.edges)
self.assertFalse((2,0) in self.mesh.edges)
self.assertFalse((0,1,2) in self.mesh.faces)
self.mesh.rface(self.mesh.faces[self.f2])
self.assert_counts(0,0,0)
def test_rface_only(self):
self.quad()
self.mesh.rface(self.mesh.faces[self.f1],False,False)
self.assert_counts(4,6,1)
self.assertTrue((1,1,1) in self.mesh.verts)
self.assertTrue((0,1) in self.mesh.edges)
self.assertTrue((1,2) in self.mesh.edges)
self.assertTrue((2,0) in self.mesh.edges)
self.assertFalse((0,1,2) in self.mesh.faces)
def test_rface_eonly(self):
self.quad()
self.mesh.rface(self.mesh.faces[self.f1],False,True)
self.assert_counts(4,3,1)
self.assertTrue((1,1,1) in self.mesh.verts)
self.assertFalse((0,1) in self.mesh.edges)
self.assertFalse((1,2) in self.mesh.edges)
self.assertFalse((2,0) in self.mesh.edges)
self.assertFalse((0,1,2) in self.mesh.faces)
def test_rface_vonly(self):
self.quad()
self.mesh.rface(self.mesh.faces[self.f1],True,False)
self.assert_counts(1,0,0)
self.assertTrue((3,3,3) in self.mesh.verts)
def test_sface(self):
self.quad()
sv1 = self.avert(vec3(0.5,-0.5,-1))
sv2 = self.avert(vec3(0.5,-0.5,-1))
self.assert_counts(6,6,2)
self.mesh.sface(sv1,self.v1,self.v2,self.v3)
self.assert_counts(6,12,4)
self.mesh.sface(sv2,self.v1,self.v3,self.v4)
self.assert_counts(6,18,6)
#def test_fan(self):
#def test_connected(self):
def atest_tripoly(self):
eb = (vec3(-2,-2,0),vec3(2,-2,0),vec3(2,2,0),vec3(-2,2,0))
ibs = ()
hmin,ref,smo = 1,False,False
self.mesh.tripoly(eb,ibs,hmin,ref,smo)
pdb.set_trace()
if __name__ == '__main__':
unittest.main()
| |
import keyword
from pycharm_generator_utils.util_methods import *
from pycharm_generator_utils.constants import *
class emptylistdict(dict):
"""defaultdict not available before 2.5; simplest reimplementation using [] as default"""
def __getitem__(self, item):
if item in self:
return dict.__getitem__(self, item)
else:
it = []
self.__setitem__(item, it)
return it
class Buf(object):
"""Buffers data in a list, can write to a file. Indentation is provided externally."""
def __init__(self, indenter):
self.data = []
self.indenter = indenter
def put(self, data):
if data:
self.data.append(ensureUnicode(data))
def out(self, indent, *what):
"""Output the arguments, indenting as needed, and adding an eol"""
self.put(self.indenter.indent(indent))
for item in what:
self.put(item)
self.put("\n")
def flush_bytes(self, outfile):
for data in self.data:
outfile.write(data.encode(OUT_ENCODING, "replace"))
def flush_str(self, outfile):
for data in self.data:
outfile.write(data)
if version[0] < 3:
flush = flush_bytes
else:
flush = flush_str
def isEmpty(self):
return len(self.data) == 0
class ClassBuf(Buf):
def __init__(self, name, indenter):
super(ClassBuf, self).__init__(indenter)
self.name = name
#noinspection PyUnresolvedReferences,PyBroadException
class ModuleRedeclarator(object):
def __init__(self, module, outfile, mod_filename, indent_size=4, doing_builtins=False):
"""
Create new instance.
@param module module to restore.
@param outfile output file, must be open and writable.
@param mod_filename filename of binary module (the .dll or .so)
@param indent_size amount of space characters per indent
"""
self.module = module
self.outfile = outfile # where we finally write
self.mod_filename = mod_filename
# we write things into buffers out-of-order
self.header_buf = Buf(self)
self.imports_buf = Buf(self)
self.functions_buf = Buf(self)
self.classes_buf = Buf(self)
self.classes_buffs = list()
self.footer_buf = Buf(self)
self.indent_size = indent_size
self._indent_step = " " * self.indent_size
self.split_modules = False
#
self.imported_modules = {"": the_builtins} # explicit module imports: {"name": module}
self.hidden_imports = {} # {'real_mod_name': 'alias'}; we alias names with "__" since we don't want them exported
# ^ used for things that we don't re-export but need to import, e.g. certain base classes in gnome.
self._defined = {} # stores True for every name defined so far, to break circular refs in values
self.doing_builtins = doing_builtins
self.ret_type_cache = {}
self.used_imports = emptylistdict() # qual_mod_name -> [imported_names,..]: actually used imported names
def _initializeQApp4(self):
try: # QtGui should be imported _before_ QtCore package.
# This is done for the QWidget references from QtCore (such as QSignalMapper). Known bug in PyQt 4.7+
# Causes "TypeError: C++ type 'QWidget*' is not supported as a native Qt signal type"
import PyQt4.QtGui
except ImportError:
pass
# manually instantiate and keep reference to singleton QCoreApplication (we don't want it to be deleted during the introspection)
# use QCoreApplication instead of QApplication to avoid blinking app in Dock on Mac OS
try:
from PyQt4.QtCore import QCoreApplication
self.app = QCoreApplication([])
return
except ImportError:
pass
def _initializeQApp5(self):
try:
from PyQt5.QtCore import QCoreApplication
self.app = QCoreApplication([])
return
except ImportError:
pass
def indent(self, level):
"""Return indentation whitespace for given level."""
return self._indent_step * level
def flush(self):
init = None
try:
if self.split_modules:
mod_path = module_to_package_name(self.outfile)
fname = build_output_name(mod_path, "__init__")
init = fopen(fname, "w")
for buf in (self.header_buf, self.imports_buf, self.functions_buf, self.classes_buf):
buf.flush(init)
data = ""
for buf in self.classes_buffs:
fname = build_output_name(mod_path, buf.name)
dummy = fopen(fname, "w")
self.header_buf.flush(dummy)
self.imports_buf.flush(dummy)
buf.flush(dummy)
data += self.create_local_import(buf.name)
dummy.close()
init.write(data)
self.footer_buf.flush(init)
else:
init = fopen(self.outfile, "w")
for buf in (self.header_buf, self.imports_buf, self.functions_buf, self.classes_buf):
buf.flush(init)
for buf in self.classes_buffs:
buf.flush(init)
self.footer_buf.flush(init)
finally:
if init is not None and not init.closed:
init.close()
# Some builtin classes effectively change __init__ signature without overriding it.
# This callable serves as a placeholder to be replaced via REDEFINED_BUILTIN_SIGS
def fake_builtin_init(self):
pass # just a callable, sig doesn't matter
fake_builtin_init.__doc__ = object.__init__.__doc__ # this forces class's doc to be used instead
def create_local_import(self, name):
if len(name.split(".")) > 1: return ""
data = "from "
if version[0] >= 3:
data += "."
data += name + " import " + name + "\n"
return data
def find_imported_name(self, item):
"""
Finds out how the item is represented in imported modules.
@param item what to check
@return qualified name (like "sys.stdin") or None
"""
# TODO: return a pair, not a glued string
if not isinstance(item, SIMPLEST_TYPES):
for mname in self.imported_modules:
m = self.imported_modules[mname]
for inner_name in m.__dict__:
suspect = getattr(m, inner_name)
if suspect is item:
if mname:
mname += "."
elif self.module is the_builtins: # don't short-circuit builtins
return None
return mname + inner_name
return None
_initializers = (
(dict, "{}"),
(tuple, "()"),
(list, "[]"),
)
def invent_initializer(self, a_type):
"""
Returns an innocuous initializer expression for a_type, or "None"
"""
for initializer_type, r in self._initializers:
if initializer_type == a_type:
return r
# NOTE: here we could handle things like defaultdict, sets, etc if we wanted
return "None"
def fmt_value(self, out, p_value, indent, prefix="", postfix="", as_name=None, seen_values=None):
"""
Formats and outputs value (it occupies an entire line or several lines).
@param out function that does output (a Buf.out)
@param p_value the value.
@param indent indent level.
@param prefix text to print before the value
@param postfix text to print after the value
@param as_name hints which name are we trying to print; helps with circular refs.
@param seen_values a list of keys we've seen if we're processing a dict
"""
SELF_VALUE = "<value is a self-reference, replaced by this string>"
ERR_VALUE = "<failed to retrieve the value>"
if isinstance(p_value, SIMPLEST_TYPES):
out(indent, prefix, reliable_repr(p_value), postfix)
else:
if sys.platform == "cli":
imported_name = None
else:
imported_name = self.find_imported_name(p_value)
if imported_name:
out(indent, prefix, imported_name, postfix)
# TODO: kind of self.used_imports[imported_name].append(p_value) but split imported_name
# else we could potentially return smth we did not otherwise import. but not likely.
else:
if isinstance(p_value, (list, tuple)):
if not seen_values:
seen_values = [p_value]
if len(p_value) == 0:
out(indent, prefix, repr(p_value), postfix)
else:
if isinstance(p_value, list):
lpar, rpar = "[", "]"
else:
lpar, rpar = "(", ")"
out(indent, prefix, lpar)
for value in p_value:
if value in seen_values:
value = SELF_VALUE
elif not isinstance(value, SIMPLEST_TYPES):
seen_values.append(value)
self.fmt_value(out, value, indent + 1, postfix=",", seen_values=seen_values)
out(indent, rpar, postfix)
elif isinstance(p_value, dict):
if len(p_value) == 0:
out(indent, prefix, repr(p_value), postfix)
else:
if not seen_values:
seen_values = [p_value]
out(indent, prefix, "{")
keys = list(p_value.keys())
try:
keys.sort()
except TypeError:
pass # unsortable keys happen, e,g, in py3k _ctypes
for k in keys:
value = p_value[k]
try:
is_seen = value in seen_values
except:
is_seen = False
value = ERR_VALUE
if is_seen:
value = SELF_VALUE
elif not isinstance(value, SIMPLEST_TYPES):
seen_values.append(value)
if isinstance(k, SIMPLEST_TYPES):
self.fmt_value(out, value, indent + 1, prefix=repr(k) + ": ", postfix=",",
seen_values=seen_values)
else:
# both key and value need fancy formatting
self.fmt_value(out, k, indent + 1, postfix=": ", seen_values=seen_values)
self.fmt_value(out, value, indent + 2, seen_values=seen_values)
out(indent + 1, ",")
out(indent, "}", postfix)
else: # something else, maybe representable
# look up this value in the module.
if sys.platform == "cli":
out(indent, prefix, "None", postfix)
return
found_name = ""
for inner_name in self.module.__dict__:
if self.module.__dict__[inner_name] is p_value:
found_name = inner_name
break
if self._defined.get(found_name, False):
out(indent, prefix, found_name, postfix)
elif hasattr(self, "app"):
return
else:
# a forward / circular declaration happens
notice = ""
try:
representation = repr(p_value)
except Exception:
import traceback
traceback.print_exc(file=sys.stderr)
return
real_value = cleanup(representation)
if found_name:
if found_name == as_name:
notice = " # (!) real value is %r" % real_value
real_value = "None"
else:
notice = " # (!) forward: %s, real value is %r" % (found_name, real_value)
if SANE_REPR_RE.match(real_value):
out(indent, prefix, real_value, postfix, notice)
else:
if not found_name:
notice = " # (!) real value is %r" % real_value
out(indent, prefix, "None", postfix, notice)
def get_ret_type(self, attr):
"""
Returns a return type string as given by T_RETURN in tokens, or None
"""
if attr:
ret_type = RET_TYPE.get(attr, None)
if ret_type:
return ret_type
thing = getattr(self.module, attr, None)
if thing:
if not isinstance(thing, type) and is_callable(thing): # a function
return None # TODO: maybe divinate a return type; see pygame.mixer.Channel
return attr
# adds no noticeable slowdown, I did measure. dch.
for im_name, im_module in self.imported_modules.items():
cache_key = (im_name, attr)
cached = self.ret_type_cache.get(cache_key, None)
if cached:
return cached
ret_type = getattr(im_module, attr, None)
if ret_type:
if isinstance(ret_type, type):
# detect a constructor
constr_args = detect_constructor(ret_type)
if constr_args is None:
constr_args = "*(), **{}" # a silly catch-all constructor
reference = "%s(%s)" % (attr, constr_args)
elif is_callable(ret_type): # a function, classes are ruled out above
return None
else:
reference = attr
if im_name:
result = "%s.%s" % (im_name, reference)
else: # built-in
result = reference
self.ret_type_cache[cache_key] = result
return result
# TODO: handle things like "[a, b,..] and (foo,..)"
return None
SIG_DOC_NOTE = "restored from __doc__"
SIG_DOC_UNRELIABLY = "NOTE: unreliably restored from __doc__ "
def restore_by_docstring(self, signature_string, class_name, deco=None, ret_hint=None):
"""
@param signature_string: parameter list extracted from the doc string.
@param class_name: name of the containing class, or None
@param deco: decorator to use
@param ret_hint: return type hint, if available
@return (reconstructed_spec, return_type, note) or (None, _, _) if failed.
"""
action("restoring func %r of class %r", signature_string, class_name)
# parse
parsing_failed = False
ret_type = None
try:
# strict parsing
tokens = paramSeqAndRest.parseString(signature_string, True)
ret_name = None
if tokens:
ret_t = tokens[-1]
if ret_t[0] is T_RETURN:
ret_name = ret_t[1]
ret_type = self.get_ret_type(ret_name) or self.get_ret_type(ret_hint)
except ParseException:
# it did not parse completely; scavenge what we can
parsing_failed = True
tokens = []
try:
# most unrestrictive parsing
tokens = paramSeq.parseString(signature_string, False)
except ParseException:
pass
#
seq = transform_seq(tokens)
# add safe defaults for unparsed
if parsing_failed:
doc_node = self.SIG_DOC_UNRELIABLY
starred = None
double_starred = None
for one in seq:
if type(one) is str:
if one.startswith("**"):
double_starred = one
elif one.startswith("*"):
starred = one
if not starred:
seq.append("*args")
if not double_starred:
seq.append("**kwargs")
else:
doc_node = self.SIG_DOC_NOTE
# add 'self' if needed YYY
if class_name and (not seq or seq[0] != 'self'):
first_param = propose_first_param(deco)
if first_param:
seq.insert(0, first_param)
seq = make_names_unique(seq)
return (seq, ret_type, doc_node)
def parse_func_doc(self, func_doc, func_id, func_name, class_name, deco=None, sip_generated=False):
"""
@param func_doc: __doc__ of the function.
@param func_id: name to look for as identifier of the function in docstring
@param func_name: name of the function.
@param class_name: name of the containing class, or None
@param deco: decorator to use
@return (reconstructed_spec, return_literal, note) or (None, _, _) if failed.
"""
if sip_generated:
overloads = []
for part in func_doc.split('\n'):
signature = func_id + '('
i = part.find(signature)
if i >= 0:
overloads.append(part[i + len(signature):])
if len(overloads) > 1:
docstring_results = [self.restore_by_docstring(overload, class_name, deco) for overload in overloads]
ret_types = []
for result in docstring_results:
rt = result[1]
if rt and rt not in ret_types:
ret_types.append(rt)
if ret_types:
ret_literal = " or ".join(ret_types)
else:
ret_literal = None
param_lists = [result[0] for result in docstring_results]
spec = build_signature(func_name, restore_parameters_for_overloads(param_lists))
return (spec, ret_literal, "restored from __doc__ with multiple overloads")
# find the first thing to look like a definition
prefix_re = re.compile("\s*(?:(\w+)[ \\t]+)?" + func_id + "\s*\(") # "foo(..." or "int foo(..."
match = prefix_re.search(func_doc) # Note: this and previous line may consume up to 35% of time
# parse the part that looks right
if match:
ret_hint = match.group(1)
params, ret_literal, doc_note = self.restore_by_docstring(func_doc[match.end():], class_name, deco, ret_hint)
spec = func_name + flatten(params)
return (spec, ret_literal, doc_note)
else:
return (None, None, None)
def is_predefined_builtin(self, module_name, class_name, func_name):
return self.doing_builtins and module_name == BUILTIN_MOD_NAME and (
class_name, func_name) in PREDEFINED_BUILTIN_SIGS
def redo_function(self, out, p_func, p_name, indent, p_class=None, p_modname=None, classname=None, seen=None):
"""
Restore function argument list as best we can.
@param out output function of a Buf
@param p_func function or method object
@param p_name function name as known to owner
@param indent indentation level
@param p_class the class that contains this function as a method
@param p_modname module name
@param seen {id(func): name} map of functions already seen in the same namespace;
id() because *some* functions are unhashable (eg _elementtree.Comment in py2.7)
"""
action("redoing func %r of class %r", p_name, p_class)
if seen is not None:
other_func = seen.get(id(p_func), None)
if other_func and getattr(other_func, "__doc__", None) is getattr(p_func, "__doc__", None):
# _bisect.bisect == _bisect.bisect_right in py31, but docs differ
out(indent, p_name, " = ", seen[id(p_func)])
out(indent, "")
return
else:
seen[id(p_func)] = p_name
# real work
if classname is None:
classname = p_class and p_class.__name__ or None
if p_class and hasattr(p_class, '__mro__'):
sip_generated = [base_t for base_t in p_class.__mro__ if 'sip.simplewrapper' in str(base_t)]
else:
sip_generated = False
deco = None
deco_comment = ""
mod_class_method_tuple = (p_modname, classname, p_name)
ret_literal = None
is_init = False
# any decorators?
action("redoing decos of func %r of class %r", p_name, p_class)
if self.doing_builtins and p_modname == BUILTIN_MOD_NAME:
deco = KNOWN_DECORATORS.get((classname, p_name), None)
if deco:
deco_comment = " # known case"
elif p_class and p_name in p_class.__dict__:
# detect native methods declared with METH_CLASS flag
descriptor = p_class.__dict__[p_name]
if p_name != "__new__" and type(descriptor).__name__.startswith('classmethod'):
# 'classmethod_descriptor' in Python 2.x and 3.x, 'classmethod' in Jython
deco = "classmethod"
elif type(p_func).__name__.startswith('staticmethod'):
deco = "staticmethod"
if p_name == "__new__":
deco = "staticmethod"
deco_comment = " # known case of __new__"
action("redoing innards of func %r of class %r", p_name, p_class)
if deco and HAS_DECORATORS:
out(indent, "@", deco, deco_comment)
if inspect and inspect.isfunction(p_func):
out(indent, "def ", p_name, restore_by_inspect(p_func), ": # reliably restored by inspect", )
out_doc_attr(out, p_func, indent + 1, p_class)
elif self.is_predefined_builtin(*mod_class_method_tuple):
spec, sig_note = restore_predefined_builtin(classname, p_name)
out(indent, "def ", spec, ": # ", sig_note)
out_doc_attr(out, p_func, indent + 1, p_class)
elif sys.platform == 'cli' and is_clr_type(p_class):
is_static, spec, sig_note = restore_clr(p_name, p_class)
if is_static:
out(indent, "@staticmethod")
if not spec: return
if sig_note:
out(indent, "def ", spec, ": #", sig_note)
else:
out(indent, "def ", spec, ":")
if not p_name in ['__gt__', '__ge__', '__lt__', '__le__', '__ne__', '__reduce_ex__', '__str__']:
out_doc_attr(out, p_func, indent + 1, p_class)
elif mod_class_method_tuple in PREDEFINED_MOD_CLASS_SIGS:
sig, ret_literal = PREDEFINED_MOD_CLASS_SIGS[mod_class_method_tuple]
if classname:
ofwhat = "%s.%s.%s" % mod_class_method_tuple
else:
ofwhat = "%s.%s" % (p_modname, p_name)
out(indent, "def ", p_name, sig, ": # known case of ", ofwhat)
out_doc_attr(out, p_func, indent + 1, p_class)
else:
# __doc__ is our best source of arglist
sig_note = "real signature unknown"
spec = ""
is_init = (p_name == "__init__" and p_class is not None)
funcdoc = None
if is_init and hasattr(p_class, "__doc__"):
if hasattr(p_func, "__doc__"):
funcdoc = p_func.__doc__
if funcdoc == object.__init__.__doc__:
funcdoc = p_class.__doc__
elif hasattr(p_func, "__doc__"):
funcdoc = p_func.__doc__
sig_restored = False
action("parsing doc of func %r of class %r", p_name, p_class)
if isinstance(funcdoc, STR_TYPES):
(spec, ret_literal, more_notes) = self.parse_func_doc(funcdoc, p_name, p_name, classname, deco,
sip_generated)
if spec is None and p_name == '__init__' and classname:
(spec, ret_literal, more_notes) = self.parse_func_doc(funcdoc, classname, p_name, classname, deco,
sip_generated)
sig_restored = spec is not None
if more_notes:
if sig_note:
sig_note += "; "
sig_note += more_notes
if not sig_restored:
# use an allow-all declaration
decl = []
if p_class:
first_param = propose_first_param(deco)
if first_param:
decl.append(first_param)
decl.append("*args")
decl.append("**kwargs")
spec = p_name + "(" + ", ".join(decl) + ")"
out(indent, "def ", spec, ": # ", sig_note)
# to reduce size of stubs, don't output same docstring twice for class and its __init__ method
if not is_init or funcdoc != p_class.__doc__:
out_docstring(out, funcdoc, indent + 1)
# body
if ret_literal and not is_init:
out(indent + 1, "return ", ret_literal)
else:
out(indent + 1, "pass")
if deco and not HAS_DECORATORS:
out(indent, p_name, " = ", deco, "(", p_name, ")", deco_comment)
out(0, "") # empty line after each item
def redo_class(self, out, p_class, p_name, indent, p_modname=None, seen=None, inspect_dir=False):
"""
Restores a class definition.
@param out output function of a relevant buf
@param p_class the class object
@param p_name class name as known to owner
@param indent indentation level
@param p_modname name of module
@param seen {class: name} map of classes already seen in the same namespace
"""
action("redoing class %r of module %r", p_name, p_modname)
if seen is not None:
if p_class in seen:
out(indent, p_name, " = ", seen[p_class])
out(indent, "")
return
else:
seen[p_class] = p_name
bases = get_bases(p_class)
base_def = ""
skipped_bases = []
if bases:
skip_qualifiers = [p_modname, BUILTIN_MOD_NAME, 'exceptions']
skip_qualifiers.extend(KNOWN_FAKE_REEXPORTERS.get(p_modname, ()))
bases_list = [] # what we'll render in the class decl
for base in bases:
if [1 for (cls, mdl) in KNOWN_FAKE_BASES if cls == base and mdl != self.module]:
# our base is a wrapper and our module is not its defining module
skipped_bases.append(str(base))
continue
# somehow import every base class
base_name = base.__name__
qual_module_name = qualifier_of(base, skip_qualifiers)
got_existing_import = False
if qual_module_name:
if qual_module_name in self.used_imports:
import_list = self.used_imports[qual_module_name]
if base in import_list:
bases_list.append(base_name) # unqualified: already set to import
got_existing_import = True
if not got_existing_import:
mangled_qualifier = "__" + qual_module_name.replace('.', '_') # foo.bar -> __foo_bar
bases_list.append(mangled_qualifier + "." + base_name)
self.hidden_imports[qual_module_name] = mangled_qualifier
else:
bases_list.append(base_name)
base_def = "(" + ", ".join(bases_list) + ")"
if self.split_modules:
for base in bases_list:
local_import = self.create_local_import(base)
if local_import:
out(indent, local_import)
out(indent, "class ", p_name, base_def, ":",
skipped_bases and " # skipped bases: " + ", ".join(skipped_bases) or "")
out_doc_attr(out, p_class, indent + 1)
# inner parts
methods = {}
properties = {}
others = {}
we_are_the_base_class = p_modname == BUILTIN_MOD_NAME and p_name == "object"
field_source = {}
try:
if hasattr(p_class, "__dict__") and not inspect_dir:
field_source = p_class.__dict__
field_keys = field_source.keys() # Jython 2.5.1 _codecs fail here
else:
field_keys = dir(p_class) # this includes unwanted inherited methods, but no dict + inheritance is rare
except:
field_keys = ()
for item_name in field_keys:
if item_name in ("__doc__", "__module__"):
if we_are_the_base_class:
item = "" # must be declared in base types
else:
continue # in all other cases must be skipped
elif keyword.iskeyword(item_name): # for example, PyQt4 contains definitions of methods named 'exec'
continue
else:
try:
item = getattr(p_class, item_name) # let getters do the magic
except AttributeError:
item = field_source[item_name] # have it raw
except Exception:
continue
if is_callable(item) and not isinstance(item, type):
methods[item_name] = item
elif is_property(item):
properties[item_name] = item
else:
others[item_name] = item
#
if we_are_the_base_class:
others["__dict__"] = {} # force-feed it, for __dict__ does not contain a reference to itself :)
# add fake __init__s to have the right sig
if p_class in FAKE_BUILTIN_INITS:
methods["__init__"] = self.fake_builtin_init
note("Faking init of %s", p_name)
elif '__init__' not in methods:
init_method = getattr(p_class, '__init__', None)
if init_method:
methods['__init__'] = init_method
#
seen_funcs = {}
for item_name in sorted_no_case(methods.keys()):
item = methods[item_name]
try:
self.redo_function(out, item, item_name, indent + 1, p_class, p_modname, classname=p_name, seen=seen_funcs)
except:
handle_error_func(item_name, out)
#
known_props = KNOWN_PROPS.get(p_modname, {})
a_setter = "lambda self, v: None"
a_deleter = "lambda self: None"
for item_name in sorted_no_case(properties.keys()):
item = properties[item_name]
prop_docstring = getattr(item, '__doc__', None)
prop_key = (p_name, item_name)
if prop_key in known_props:
prop_descr = known_props.get(prop_key, None)
if prop_descr is None:
continue # explicitly omitted
acc_line, getter_and_type = prop_descr
if getter_and_type:
getter, prop_type = getter_and_type
else:
getter, prop_type = None, None
out(indent + 1, item_name,
" = property(", format_accessors(acc_line, getter, a_setter, a_deleter), ")"
)
if prop_type:
if prop_docstring:
out(indent + 1, '"""', prop_docstring)
out(0, "")
out(indent + 1, ':type: ', prop_type)
out(indent + 1, '"""')
else:
out(indent + 1, '""":type: ', prop_type, '"""')
out(0, "")
else:
out(indent + 1, item_name, " = property(lambda self: object(), lambda self, v: None, lambda self: None) # default")
if prop_docstring:
out(indent + 1, '"""', prop_docstring, '"""')
out(0, "")
if properties:
out(0, "") # empty line after the block
#
for item_name in sorted_no_case(others.keys()):
item = others[item_name]
self.fmt_value(out, item, indent + 1, prefix=item_name + " = ")
if p_name == "object":
out(indent + 1, "__module__ = ''")
if others:
out(0, "") # empty line after the block
#
if not methods and not properties and not others:
out(indent + 1, "pass")
def redo_simple_header(self, p_name):
"""Puts boilerplate code on the top"""
out = self.header_buf.out # 1st class methods rule :)
out(0, "# encoding: %s" % OUT_ENCODING) # line 1
# NOTE: maybe encoding should be selectable
if hasattr(self.module, "__name__"):
self_name = self.module.__name__
if self_name != p_name:
mod_name = " calls itself " + self_name
else:
mod_name = ""
else:
mod_name = " does not know its name"
out(0, "# module ", p_name, mod_name) # line 2
BUILT_IN_HEADER = "(built-in)"
if self.mod_filename:
filename = self.mod_filename
elif p_name in sys.builtin_module_names:
filename = BUILT_IN_HEADER
else:
filename = getattr(self.module, "__file__", BUILT_IN_HEADER)
out(0, "# from %s" % filename) # line 3
out(0, "# by generator %s" % VERSION) # line 4
if p_name == BUILTIN_MOD_NAME and version[0] == 2 and version[1] >= 6:
out(0, "from __future__ import print_function")
out_doc_attr(out, self.module, 0)
def redo_imports(self):
module_type = type(sys)
for item_name in self.module.__dict__.keys():
try:
item = self.module.__dict__[item_name]
except:
continue
if type(item) is module_type: # not isinstance, py2.7 + PyQt4.QtCore on windows have a bug here
self.imported_modules[item_name] = item
self.add_import_header_if_needed()
ref_notice = getattr(item, "__file__", str(item))
if hasattr(item, "__name__"):
self.imports_buf.out(0, "import ", item.__name__, " as ", item_name, " # ", ref_notice)
else:
self.imports_buf.out(0, item_name, " = None # ??? name unknown; ", ref_notice)
def add_import_header_if_needed(self):
if self.imports_buf.isEmpty():
self.imports_buf.out(0, "")
self.imports_buf.out(0, "# imports")
def redo(self, p_name, inspect_dir):
"""
Restores module declarations.
Intended for built-in modules and thus does not handle import statements.
@param p_name name of module
"""
action("redoing header of module %r %r", p_name, str(self.module))
if "pyqt4" in p_name.lower(): # qt4 specific patch
self._initializeQApp4()
elif "pyqt5" in p_name.lower(): # qt5 specific patch
self._initializeQApp5()
self.redo_simple_header(p_name)
# find whatever other self.imported_modules the module knows; effectively these are imports
action("redoing imports of module %r %r", p_name, str(self.module))
try:
self.redo_imports()
except:
pass
action("redoing innards of module %r %r", p_name, str(self.module))
module_type = type(sys)
# group what we have into buckets
vars_simple = {}
vars_complex = {}
funcs = {}
classes = {}
module_dict = self.module.__dict__
if inspect_dir:
module_dict = dir(self.module)
for item_name in module_dict:
note("looking at %s", item_name)
if item_name in (
"__dict__", "__doc__", "__module__", "__file__", "__name__", "__builtins__", "__package__"):
continue # handled otherwise
try:
item = getattr(self.module, item_name) # let getters do the magic
except AttributeError:
if not item_name in self.module.__dict__: continue
item = self.module.__dict__[item_name] # have it raw
# check if it has percolated from an imported module
except NotImplementedError:
if not item_name in self.module.__dict__: continue
item = self.module.__dict__[item_name] # have it raw
# unless we're adamantly positive that the name was imported, we assume it is defined here
mod_name = None # module from which p_name might have been imported
# IronPython has non-trivial reexports in System module, but not in others:
skip_modname = sys.platform == "cli" and p_name != "System"
surely_not_imported_mods = KNOWN_FAKE_REEXPORTERS.get(p_name, ())
## can't figure weirdness in some modules, assume no reexports:
#skip_modname = skip_modname or p_name in self.KNOWN_FAKE_REEXPORTERS
if not skip_modname:
try:
mod_name = getattr(item, '__module__', None)
except:
pass
# we assume that module foo.bar never imports foo; foo may import foo.bar. (see pygame and pygame.rect)
maybe_import_mod_name = mod_name or ""
import_is_from_top = len(p_name) > len(maybe_import_mod_name) and p_name.startswith(maybe_import_mod_name)
note("mod_name = %s, prospective = %s, from top = %s", mod_name, maybe_import_mod_name, import_is_from_top)
want_to_import = False
if (mod_name
and mod_name != BUILTIN_MOD_NAME
and mod_name != p_name
and mod_name not in surely_not_imported_mods
and not import_is_from_top
):
# import looks valid, but maybe it's a .py file? we're certain not to import from .py
# e.g. this rules out _collections import collections and builtins import site.
try:
imported = __import__(mod_name) # ok to repeat, Python caches for us
if imported:
qualifiers = mod_name.split(".")[1:]
for qual in qualifiers:
imported = getattr(imported, qual, None)
if not imported:
break
imported_path = (getattr(imported, '__file__', False) or "").lower()
want_to_import = not (imported_path.endswith('.py') or imported_path.endswith('.pyc'))
note("path of %r is %r, want? %s", mod_name, imported_path, want_to_import)
except ImportError:
want_to_import = False
# NOTE: if we fail to import, we define 'imported' names here lest we lose them at all
if want_to_import:
import_list = self.used_imports[mod_name]
if item_name not in import_list:
import_list.append(item_name)
if not want_to_import:
if isinstance(item, type) or type(item).__name__ == 'classobj':
classes[item_name] = item
elif is_callable(item): # some classes are callable, check them before functions
funcs[item_name] = item
elif isinstance(item, module_type):
continue # self.imported_modules handled above already
else:
if isinstance(item, SIMPLEST_TYPES):
vars_simple[item_name] = item
else:
vars_complex[item_name] = item
# sort and output every bucket
action("outputting innards of module %r %r", p_name, str(self.module))
#
omitted_names = OMIT_NAME_IN_MODULE.get(p_name, [])
if vars_simple:
out = self.functions_buf.out
prefix = "" # try to group variables by common prefix
PREFIX_LEN = 2 # default prefix length if we can't guess better
out(0, "# Variables with simple values")
for item_name in sorted_no_case(vars_simple.keys()):
if item_name in omitted_names:
out(0, "# definition of " + item_name + " omitted")
continue
item = vars_simple[item_name]
# track the prefix
if len(item_name) >= PREFIX_LEN:
prefix_pos = string.rfind(item_name, "_") # most prefixes end in an underscore
if prefix_pos < 1:
prefix_pos = PREFIX_LEN
beg = item_name[0:prefix_pos]
if prefix != beg:
out(0, "") # space out from other prefix
prefix = beg
else:
prefix = ""
# output
replacement = REPLACE_MODULE_VALUES.get((p_name, item_name), None)
if replacement is not None:
out(0, item_name, " = ", replacement, " # real value of type ", str(type(item)), " replaced")
elif is_skipped_in_module(p_name, item_name):
t_item = type(item)
out(0, item_name, " = ", self.invent_initializer(t_item), " # real value of type ", str(t_item),
" skipped")
else:
self.fmt_value(out, item, 0, prefix=item_name + " = ")
self._defined[item_name] = True
out(0, "") # empty line after vars
#
if funcs:
out = self.functions_buf.out
out(0, "# functions")
out(0, "")
seen_funcs = {}
for item_name in sorted_no_case(funcs.keys()):
if item_name in omitted_names:
out(0, "# definition of ", item_name, " omitted")
continue
item = funcs[item_name]
try:
self.redo_function(out, item, item_name, 0, p_modname=p_name, seen=seen_funcs)
except:
handle_error_func(item_name, out)
else:
self.functions_buf.out(0, "# no functions")
#
if classes:
self.classes_buf.out(0, "# classes")
self.classes_buf.out(0, "")
seen_classes = {}
# sort classes so that inheritance order is preserved
cls_list = [] # items are (class_name, mro_tuple)
for cls_name in sorted_no_case(classes.keys()):
cls = classes[cls_name]
ins_index = len(cls_list)
for i in range(ins_index):
maybe_child_bases = cls_list[i][1]
if cls in maybe_child_bases:
ins_index = i # we could not go farther than current ins_index
break # ...and need not go fartehr than first known child
cls_list.insert(ins_index, (cls_name, get_mro(cls)))
self.split_modules = self.mod_filename and len(cls_list) >= 30
for item_name in [cls_item[0] for cls_item in cls_list]:
buf = ClassBuf(item_name, self)
self.classes_buffs.append(buf)
out = buf.out
if item_name in omitted_names:
out(0, "# definition of ", item_name, " omitted")
continue
item = classes[item_name]
self.redo_class(out, item, item_name, 0, p_modname=p_name, seen=seen_classes, inspect_dir=inspect_dir)
self._defined[item_name] = True
out(0, "") # empty line after each item
if self.doing_builtins and p_name == BUILTIN_MOD_NAME and version[0] < 3:
# classobj still supported
txt = classobj_txt
self.classes_buf.out(0, txt)
if self.doing_builtins and p_name == BUILTIN_MOD_NAME:
txt = create_generator()
self.classes_buf.out(0, txt)
txt = create_function()
self.classes_buf.out(0, txt)
txt = create_method()
self.classes_buf.out(0, txt)
txt = create_coroutine()
self.classes_buf.out(0, txt)
# Fake <type 'namedtuple'>
if version[0] >= 3 or (version[0] == 2 and version[1] >= 6):
namedtuple_text = create_named_tuple()
self.classes_buf.out(0, namedtuple_text)
else:
self.classes_buf.out(0, "# no classes")
#
if vars_complex:
out = self.footer_buf.out
out(0, "# variables with complex values")
out(0, "")
for item_name in sorted_no_case(vars_complex.keys()):
if item_name in omitted_names:
out(0, "# definition of " + item_name + " omitted")
continue
item = vars_complex[item_name]
if str(type(item)) == "<type 'namespace#'>":
continue # this is an IronPython submodule, we mustn't generate a reference for it in the base module
replacement = REPLACE_MODULE_VALUES.get((p_name, item_name), None)
if replacement is not None:
out(0, item_name + " = " + replacement + " # real value of type " + str(type(item)) + " replaced")
elif is_skipped_in_module(p_name, item_name):
t_item = type(item)
out(0, item_name + " = " + self.invent_initializer(t_item) + " # real value of type " + str(
t_item) + " skipped")
else:
self.fmt_value(out, item, 0, prefix=item_name + " = ", as_name=item_name)
self._defined[item_name] = True
out(0, "") # empty line after each item
values_to_add = ADD_VALUE_IN_MODULE.get(p_name, None)
if values_to_add:
self.footer_buf.out(0, "# intermittent names")
for value in values_to_add:
self.footer_buf.out(0, value)
# imports: last, because previous parts could alter used_imports or hidden_imports
self.output_import_froms()
if self.imports_buf.isEmpty():
self.imports_buf.out(0, "# no imports")
self.imports_buf.out(0, "") # empty line after imports
def output_import_froms(self):
"""Mention all imported names known within the module, wrapping as per PEP."""
out = self.imports_buf.out
if self.used_imports:
self.add_import_header_if_needed()
for mod_name in sorted_no_case(self.used_imports.keys()):
import_names = self.used_imports[mod_name]
if import_names:
self._defined[mod_name] = True
right_pos = 0 # tracks width of list to fold it at right margin
import_heading = "from % s import (" % mod_name
right_pos += len(import_heading)
names_pack = [import_heading]
indent_level = 0
import_names = list(import_names)
import_names.sort()
for n in import_names:
self._defined[n] = True
len_n = len(n)
if right_pos + len_n >= 78:
out(indent_level, *names_pack)
names_pack = [n, ", "]
if indent_level == 0:
indent_level = 1 # all but first line is indented
right_pos = self.indent_size + len_n + 2
else:
names_pack.append(n)
names_pack.append(", ")
right_pos += (len_n + 2)
# last line is...
if indent_level == 0: # one line
names_pack[0] = names_pack[0][:-1] # cut off lpar
names_pack[-1] = "" # cut last comma
else: # last line of multiline
names_pack[-1] = ")" # last comma -> rpar
out(indent_level, *names_pack)
out(0, "") # empty line after group
if self.hidden_imports:
self.add_import_header_if_needed()
for mod_name in sorted_no_case(self.hidden_imports.keys()):
out(0, 'import ', mod_name, ' as ', self.hidden_imports[mod_name])
out(0, "") # empty line after group
def module_to_package_name(module_name):
return re.sub(r"(.*)\.py$", r"\1", module_name)
| |
from pyftpdlib.ftpserver import AbstractedFS
from JumpScale import j
import os
# --- filesystem
class AbstractedFS(object):
"""A class used to interact with the file system, providing a
cross-platform interface compatible with both Windows and
UNIX style filesystems where all paths use "/" separator.
AbstractedFS distinguishes between "real" filesystem paths and
"virtual" ftp paths emulating a UNIX chroot jail where the user
can not escape its home directory (example: real "/home/user"
path will be seen as "/" by the client)
It also provides some utility methods and wraps around all os.*
calls involving operations against the filesystem like creating
files or removing directories.
"""
def __init__(self, root, cmd_channel):
"""
- (str) root: the user "real" home directory (e.g. '/home/user')
- (instance) cmd_channel: the FTPHandler class instance
"""
# Set initial current working directory.
# By default initial cwd is set to "/" to emulate a chroot jail.
# If a different behavior is desired (e.g. initial cwd = root,
# to reflect the real filesystem) users overriding this class
# are responsible to set _cwd attribute as necessary.
self._cwd = '/'
self._root = root
self.cmd_channel = cmd_channel
self.handler = None
@property
def root(self):
"""The user home directory."""
return self._root
@property
def cwd(self):
"""The user current working directory."""
return self._cwd
@root.setter
def root(self, path):
self._root = path
@cwd.setter
def cwd(self, path):
self._cwd = path
# --- Pathname / conversion utilities
def ftpnorm(self, ftppath):
"""Normalize a "virtual" ftp pathname (tipically the raw string
coming from client) depending on the current working directory.
Example (having "/foo" as current working directory):
>>> ftpnorm('bar')
'/foo/bar'
Note: directory separators are system independent ("/").
Pathname returned is always absolutized.
"""
if os.path.isabs(ftppath):
p = os.path.normpath(ftppath)
else:
p = os.path.normpath(os.path.join(self.cwd, ftppath))
# normalize string in a standard web-path notation having '/'
# as separator.
p = p.replace("\\", "/")
# os.path.normpath supports UNC paths (e.g. "//a/b/c") but we
# don't need them. In case we get an UNC path we collapse
# redundant separators appearing at the beginning of the string
while p[:2] == '//':
p = p[1:]
# Anti path traversal: don't trust user input, in the event
# that self.cwd is not absolute, return "/" as a safety measure.
# This is for extra protection, maybe not really necessary.
if not os.path.isabs(p):
p = "/"
return p
def ftp2fs(self, ftppath):
"""Translate a "virtual" ftp pathname (tipically the raw string
coming from client) into equivalent absolute "real" filesystem
pathname.
Example (having "/home/user" as root directory):
>>> ftp2fs("foo")
'/home/user/foo'
Note: directory separators are system dependent.
"""
# as far as I know, it should always be path traversal safe...
if os.path.normpath(self.root) == os.sep:
return os.path.realpath(os.path.normpath(self.ftpnorm(ftppath)))
else:
p = self.ftpnorm(ftppath)[1:]
return os.path.realpath(os.path.normpath(os.path.join(self.root, p)))
def fs2ftp(self, fspath):
"""Translate a "real" filesystem pathname into equivalent
absolute "virtual" ftp pathname depending on the user's
root directory.
Example (having "/home/user" as root directory):
>>> fs2ftp("/home/user/foo")
'/foo'
As for ftpnorm, directory separators are system independent
("/") and pathname returned is always absolutized.
On invalid pathnames escaping from user's root directory
(e.g. "/home" when root is "/home/user") always return "/".
"""
if os.path.isabs(fspath):
p = os.path.normpath(fspath)
else:
p = os.path.normpath(os.path.join(self.root, fspath))
if not self.validpath(p):
return '/'
p = p.replace(os.sep, "/")
p = p[len(self.root):]
if not p.startswith('/'):
p = '/' + p
return p
def validpath(self, path):
"""Check whether the path belongs to user's home directory.
Expected argument is a "real" filesystem pathname.
If path is a symbolic link it is resolved to check its real
destination.
Pathnames escaping from user's root directory are considered
not valid.
"""
return True
root = self.realpath(self.root)
path = self.realpath(path)
if not root.endswith(os.sep):
root = root + os.sep
if not path.endswith(os.sep):
path = path + os.sep
if path[0:len(root)] == root:
return True
return False
# --- Wrapper methods around open() and tempfile.mkstemp
def open(self, filename, mode):
"""Open a file returning its handler."""
return open(filename, mode)
def mkstemp(self, suffix='', prefix='', dir=None, mode='wb'):
"""A wrap around tempfile.mkstemp creating a file with a unique
name. Unlike mkstemp it returns an object with a file-like
interface.
"""
class FileWrapper:
def __init__(self, fd, name):
self.file = fd
self.name = name
def __getattr__(self, attr):
return getattr(self.file, attr)
text = not 'b' in mode
# max number of tries to find out a unique file name
tempfile.TMP_MAX = 50
fd, name = tempfile.mkstemp(suffix, prefix, dir, text=text)
file = os.fdopen(fd, mode)
return FileWrapper(file, name)
# --- Wrapper methods around os.* calls
def chdir(self, ftppath):
"""Change the current directory."""
# temporarily join the specified directory to see if we have
# permissions to do so
path = self.ftp2fs(ftppath)
print "chdir for %s : %s" % (ftppath, path)
#basedir = os.getcwd()
try:
os.chdir(path)
except OSError:
raise
else:
self._cwd = ftppath
def mkdir(self, ftppath):
"""Create the specified directory."""
ftppath = self.ftpnorm(ftppath)
path = self.ftp2fs(ftppath)
j.system.fs.createDir(path)
def listdir(self, path):
"""List the content of a directory."""
path1 = self.ftp2fs(path)
# print "listdir:%s:%s" %(path,path1)
return os.listdir(path1)
def rmdir(self, path):
"""Remove the specified directory."""
path = self.ftp2fs(path)
os.rmdir(path)
def remove(self, path):
"""Remove the specified file."""
path = self.ftpnorm(path)
path = self.ftp2fs(path)
os.remove(path)
def rename(self, src, dst):
"""Rename the specified src file to the dst filename."""
src = self.ftp2fs(src)
dst = self.ftp2fs(dst)
os.rename(src, dst)
def chmod(self, path, mode):
"""Change file/directory mode."""
raise NotImplementedError
path = self.ftp2fs(path)
if not hasattr(os, 'chmod'):
raise NotImplementedError
os.chmod(path, mode)
def stat(self, path):
"""Perform a stat() system call on the given path."""
return os.stat(path)
def lstat(self, path):
"""Like stat but does not follow symbolic links."""
path = self.ftp2fs(path)
return os.lstat(path)
if not hasattr(os, 'lstat'):
lstat = stat
# --- Wrapper methods around os.path.* calls
def isfile(self, path):
"""Return True if path is a file."""
path = self.ftp2fs(path)
return os.path.isfile(path)
def islink(self, path):
"""Return True if path is a symbolic link."""
path = self.ftp2fs(path)
return os.path.islink(path)
def isdir(self, path):
"""Return True if path is a directory."""
path = self.ftp2fs(path)
return os.path.isdir(path)
def getsize(self, path):
"""Return the size of the specified file in bytes."""
path = self.ftp2fs(path)
return os.path.getsize(path)
def getmtime(self, path):
"""Return the last modified time as a number of seconds since
the epoch."""
path = self.ftp2fs(path)
return os.path.getmtime(path)
# def realpath(self, path):
#"""Return the canonical version of path eliminating any
# symbolic links encountered in the path (if they are
# supported by the operating system).
#"""
# return os.path.realpath(path)
def lexists(self, path):
"""Return True if path refers to an existing path, including
a broken or circular symbolic link.
"""
path = self.ftp2fs(path)
return os.path.lexists(path)
def get_user_by_uid(self, uid):
"""Return the username associated with user id.
If this can't be determined return raw uid instead.
On Windows just return "owner".
"""
if pwd is not None:
try:
return pwd.getpwuid(uid).pw_name
except KeyError:
return uid
else:
return "owner"
def get_group_by_gid(self, gid):
"""Return the groupname associated with group id.
If this can't be determined return raw gid instead.
On Windows just return "group".
"""
if grp is not None:
try:
return grp.getgrgid(gid).gr_name
except KeyError:
return gid
else:
return "group"
if hasattr(os, 'readlink'):
def readlink(self, path):
"""Return a string representing the path to which a
symbolic link points.
"""
return os.readlink(path)
# --- Listing utilities
def get_list_dir(self, path):
""""Return an iterator object that yields a directory listing
in a form suitable for LIST command.
"""
if self.isdir(path):
listing = sorted(self.listdir(path))
return self.format_list(path, listing)
# if path is a file or a symlink we return information about it
else:
basedir, filename = os.path.split(path)
self.lstat(path) # raise exc in case of problems
return self.format_list(basedir, [filename])
def format_list(self, basedir, listing, ignore_err=True):
"""Return an iterator object that yields the entries of given
directory emulating the "/bin/ls -lA" UNIX command output.
- (str) basedir: the absolute dirname.
- (list) listing: the names of the entries in basedir
- (bool) ignore_err: when False raise exception if os.lstat()
call fails.
On platforms which do not support the pwd and grp modules (such
as Windows), ownership is printed as "owner" and "group" as a
default, and number of hard links is always "1". On UNIX
systems, the actual owner, group, and number of links are
printed.
This is how output appears to client:
-rw-rw-rw- 1 owner group 7045120 Sep 02 3:47 music.mp3
drwxrwxrwx 1 owner group 0 Aug 31 18:50 e-books
-rw-rw-rw- 1 owner group 380 Sep 02 3:40 module.py
"""
if self.cmd_channel.use_gmt_times:
timefunc = time.gmtime
else:
timefunc = time.localtime
now = time.time()
for basename in listing:
file = os.path.join(basedir, basename)
try:
st = self.lstat(file)
except OSError:
if ignore_err:
continue
raise
perms = _filemode(st.st_mode) # permissions
nlinks = st.st_nlink # number of links to inode
if not nlinks: # non-posix system, let's use a bogus value
nlinks = 1
size = st.st_size # file size
uname = self.get_user_by_uid(st.st_uid)
gname = self.get_group_by_gid(st.st_gid)
mtime = timefunc(st.st_mtime)
# if modificaton time > 6 months shows "month year"
# else "month hh:mm"; this matches proftpd format, see:
# http://code.google.com/p/pyftpdlib/issues/detail?id=187
if (now - st.st_mtime) > 180 * 24 * 60 * 60:
fmtstr = "%d %Y"
else:
fmtstr = "%d %H:%M"
try:
mtimestr = "%s %s" % (_months_map[mtime.tm_mon],
time.strftime(fmtstr, mtime))
except ValueError:
# It could be raised if last mtime happens to be too
# old (prior to year 1900) in which case we return
# the current time as last mtime.
mtime = timefunc()
mtimestr = "%s %s" % (_months_map[mtime.tm_mon],
time.strftime("%d %H:%M", mtime))
# if the file is a symlink, resolve it, e.g. "symlink -> realfile"
if stat.S_ISLNK(st.st_mode) and hasattr(self, 'readlink'):
basename = basename + " -> " + self.readlink(file)
# formatting is matched with proftpd ls output
yield "%s %3s %-8s %-8s %8s %s %s\r\n" % (perms, nlinks, uname, gname,
size, mtimestr, basename)
def format_mlsx(self, basedir, listing, perms, facts, ignore_err=False):
"""Return an iterator object that yields the entries of a given
directory or of a single file in a form suitable with MLSD and
MLST commands.
Every entry includes a list of "facts" referring the listed
element. See RFC-3659, chapter 7, to see what every single
fact stands for.
- (str) basedir: the absolute dirname.
- (list) listing: the names of the entries in basedir
- (str) perms: the string referencing the user permissions.
- (str) facts: the list of "facts" to be returned.
- (bool) ignore_err: when False raise exception if os.stat()
call fails.
Note that "facts" returned may change depending on the platform
and on what user specified by using the OPTS command.
This is how output could appear to the client issuing
a MLSD request:
type=file;size=156;perm=r;modify=20071029155301;unique=801cd2; music.mp3
type=dir;size=0;perm=el;modify=20071127230206;unique=801e33; ebooks
type=file;size=211;perm=r;modify=20071103093626;unique=801e32; module.py
"""
basedir = self.ftp2fs(basedir)
if self.cmd_channel.use_gmt_times:
timefunc = time.gmtime
else:
timefunc = time.localtime
permdir = ''.join([x for x in perms if x not in 'arw'])
permfile = ''.join([x for x in perms if x not in 'celmp'])
if ('w' in perms) or ('a' in perms) or ('f' in perms):
permdir += 'c'
if 'd' in perms:
permdir += 'p'
for basename in listing:
file = os.path.join(basedir, basename)
retfacts = dict()
# in order to properly implement 'unique' fact (RFC-3659,
# chapter 7.5.2) we are supposed to follow symlinks, hence
# use os.stat() instead of os.lstat()
try:
st = self.stat(file)
except OSError:
if ignore_err:
print "error for %s, cannot list (stat)" % file
continue
raise
# type + perm
if stat.S_ISDIR(st.st_mode):
if 'type' in facts:
if basename == '.':
retfacts['type'] = 'cdir'
elif basename == '..':
retfacts['type'] = 'pdir'
else:
retfacts['type'] = 'dir'
if 'perm' in facts:
retfacts['perm'] = permdir
else:
if 'type' in facts:
retfacts['type'] = 'file'
if 'perm' in facts:
retfacts['perm'] = permfile
if 'size' in facts:
retfacts['size'] = st.st_size # file size
# last modification time
if 'modify' in facts:
try:
retfacts['modify'] = time.strftime("%Y%m%d%H%M%S",
timefunc(st.st_mtime))
# it could be raised if last mtime happens to be too old
# (prior to year 1900)
except ValueError:
pass
if 'create' in facts:
# on Windows we can provide also the creation time
try:
retfacts['create'] = time.strftime("%Y%m%d%H%M%S",
timefunc(st.st_ctime))
except ValueError:
pass
# UNIX only
if 'unix.mode' in facts:
retfacts['unix.mode'] = oct(st.st_mode & 0o777)
if 'unix.uid' in facts:
retfacts['unix.uid'] = st.st_uid
if 'unix.gid' in facts:
retfacts['unix.gid'] = st.st_gid
# We provide unique fact (see RFC-3659, chapter 7.5.2) on
# posix platforms only; we get it by mixing st_dev and
# st_ino values which should be enough for granting an
# uniqueness for the file listed.
# The same approach is used by pure-ftpd.
# Implementors who want to provide unique fact on other
# platforms should use some platform-specific method (e.g.
# on Windows NTFS filesystems MTF records could be used).
if 'unique' in facts:
retfacts['unique'] = "%xg%x" % (st.st_dev, st.st_ino)
# facts can be in any order but we sort them by name
factstring = "".join(["%s=%s;" % (x, retfacts[x])
for x in sorted(retfacts.keys())])
yield "%s %s\r\n" % (factstring, basename)
class RootFilesystem(AbstractedFS):
"""
is our virtual root
"""
def __init__(self, root, cmd_channel):
#_Base.__init__(self, root, cmd_channel)
AbstractedFS.__init__(self, root, cmd_channel)
# initial cwd was set to "/" to emulate a chroot jail
self.cwd = "/"
def validpath(self, path):
return True
def mkstemp(self, suffix='', prefix='', dir=None, mode='wb'):
return None
def listdir(self, path):
return ["spaces", "buckets", "actors", "contentdirs", "stor"]
def chdir(self, path):
self._cwd = "/"
def mkdir(self, path):
j.system.fs.createDir(pathr)
raise RuntimeError("not implemented")
def rmdir(self, path):
return
def remove(self, path):
return
def rename(self, src, dst):
return
def chmod(self, path, mode):
return
def stat(self, path):
return
def lstat(self, path):
return self.stat(path)
def isfile(self, path):
"""Return True if path is a file."""
return False
def islink(self, path):
"""Return True if path is a symbolic link."""
return False
def isdir(self, path):
"""Return True if path is a directory."""
return True
def getsize(self, path):
"""Return the size of the specified file in bytes."""
return 0
def getmtime(self, path):
"""Return the last modified time as a number of seconds since
the epoch."""
return 0
def format_list(self, basedir, listing, ignore_err=True):
mtimestr = "Sep 02 3:40"
for basename in listing:
yield "%s %3s %-8s %-8s %8s %s %s\r\n" % ("elc", 0, "", "", 0, mtimestr, basename)
def format_mlsx(self, basedir, listing, perms, facts, ignore_err=True):
for dirname in listing:
item = "type=dir;size=0;perm=el;modify=20071127230206; %s\r\n" % dirname
# print item,
yield item
def open(self, filename, mode):
return None
class RootFilesystemList(RootFilesystem):
"""
basis for 1 level down eg spaces
"""
def __init__(self, root, cmd_channel, list):
AbstractedFS.__init__(self, root, cmd_channel)
self.cwd = root
self.list = list
def listdir(self, path):
print "list for rootfilesystemlist: %s" % self.cwd
return self.list()
def chdir(self, path):
print "chdirrootfslist:%s" % path
self._cwd = path
#self._cwd = self.fs2ftp(path)
def fs2ftp(self, path):
p = j.system.fs.pathRemoveDirPart(path, self.cmd_channel.rootpath).replace("\\", "/")
if len(p) != 0 and p[0] != "/":
p = "/" + p
# print "fs2ftp_list: %s -> %s" % (path,p)
return p
def ftp2fs(self, ftppath):
return self.root
class BaseFilesystem(AbstractedFS):
"""
is our virtual root
"""
def __init__(self, root, cmd_channel, ftproot, cwd, readonly=False):
#_Base.__init__(self, root, cmd_channel)
AbstractedFS.__init__(self, root, cmd_channel)
self.cwd = cwd
self.cwdftp = ""
self.ftproot = ftproot
self.readonly = readonly
# def chdir(self, path):
# self._cwd=path
def ftp2fs(self, ftppath):
if ftppath.find(self.ftproot) == 0:
ftppath = ftppath[len(self.ftproot):]
result = j.system.fs.joinPaths(self.root, ftppath)
return result
def _ignorePath(self, item):
# items=[]
# for item in os.listdir(path):
ext = j.system.fs.getFileExtension(item)
if item.find(".quarantine") == 0 or item.find(".tmb") == 0:
try:
j.system.fs.remove(item)
except:
pass
return True
elif ext == "pyc":
return True
# else:
# items.append(item)
def listdir(self, path):
"""List the content of a directory."""
path1 = self.ftp2fs(path)
# print "listdir:%s:%s" %(path,path1)
items = [item for item in os.listdir(path1) if not self._ignorePath(item)]
return items
def mkdir(self, path):
pathr = self.ftp2fs(path)
j.system.fs.createDir(pathr)
def fs2ftp(self, fspath):
p = j.system.fs.pathRemoveDirPart(fspath, self.root)
if len(p) != 0 and p[0] != "/":
p = "/" + p
p = self.ftproot + p
# print "fs2ftp: %s -> %s" % (fspath,p)
return p
def open(self, filename, mode):
"""Open a file returning its handler."""
#@todo check on extension .redirect (file path: $originalpath.$size.redirect) read from redirect where to go and repoint open
# if self.readonly and "w" in mode:
# return None
return open(filename, mode)
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_serialization import jsonutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints as constr
from heat.engine import function
from heat.engine.hot import parameters as hot_param
from heat.engine import parameters
from heat.engine import support
SCHEMA_KEYS = (
REQUIRED, IMPLEMENTED, DEFAULT, TYPE, SCHEMA,
ALLOWED_PATTERN, MIN_VALUE, MAX_VALUE, ALLOWED_VALUES,
MIN_LENGTH, MAX_LENGTH, DESCRIPTION, UPDATE_ALLOWED,
IMMUTABLE,
) = (
'Required', 'Implemented', 'Default', 'Type', 'Schema',
'AllowedPattern', 'MinValue', 'MaxValue', 'AllowedValues',
'MinLength', 'MaxLength', 'Description', 'UpdateAllowed',
'Immutable',
)
class Schema(constr.Schema):
"""
Schema class for validating resource properties.
This class is used for defining schema constraints for resource properties.
It inherits generic validation features from the base Schema class and add
processing that is specific to resource properties.
"""
KEYS = (
TYPE, DESCRIPTION, DEFAULT, SCHEMA, REQUIRED, CONSTRAINTS,
UPDATE_ALLOWED, IMMUTABLE,
) = (
'type', 'description', 'default', 'schema', 'required', 'constraints',
'update_allowed', 'immutable',
)
def __init__(self, data_type, description=None,
default=None, schema=None,
required=False, constraints=None,
implemented=True,
update_allowed=False,
immutable=False,
support_status=support.SupportStatus(),
allow_conversion=False):
super(Schema, self).__init__(data_type, description, default,
schema, required, constraints)
self.implemented = implemented
self.update_allowed = update_allowed
self.immutable = immutable
self.support_status = support_status
self.allow_conversion = allow_conversion
# validate structural correctness of schema itself
self.validate()
@classmethod
def from_legacy(cls, schema_dict):
"""
Return a Property Schema object from a legacy schema dictionary.
"""
# Check for fully-fledged Schema objects
if isinstance(schema_dict, cls):
return schema_dict
unknown = [k for k in schema_dict if k not in SCHEMA_KEYS]
if unknown:
raise exception.InvalidSchemaError(
message=_('Unknown key(s) %s') % unknown)
def constraints():
def get_num(key):
val = schema_dict.get(key)
if val is not None:
val = Schema.str_to_num(val)
return val
if MIN_VALUE in schema_dict or MAX_VALUE in schema_dict:
yield constr.Range(get_num(MIN_VALUE), get_num(MAX_VALUE))
if MIN_LENGTH in schema_dict or MAX_LENGTH in schema_dict:
yield constr.Length(get_num(MIN_LENGTH), get_num(MAX_LENGTH))
if ALLOWED_VALUES in schema_dict:
yield constr.AllowedValues(schema_dict[ALLOWED_VALUES])
if ALLOWED_PATTERN in schema_dict:
yield constr.AllowedPattern(schema_dict[ALLOWED_PATTERN])
try:
data_type = schema_dict[TYPE]
except KeyError:
raise exception.InvalidSchemaError(
message=_('No %s specified') % TYPE)
if SCHEMA in schema_dict:
if data_type == Schema.LIST:
ss = cls.from_legacy(schema_dict[SCHEMA])
elif data_type == Schema.MAP:
schema_dicts = schema_dict[SCHEMA].items()
ss = dict((n, cls.from_legacy(sd)) for n, sd in schema_dicts)
else:
raise exception.InvalidSchemaError(
message=_('%(schema)s supplied for %(type)s %(data)s') %
dict(schema=SCHEMA, type=TYPE, data=data_type))
else:
ss = None
return cls(data_type,
description=schema_dict.get(DESCRIPTION),
default=schema_dict.get(DEFAULT),
schema=ss,
required=schema_dict.get(REQUIRED, False),
constraints=list(constraints()),
implemented=schema_dict.get(IMPLEMENTED, True),
update_allowed=schema_dict.get(UPDATE_ALLOWED, False),
immutable=schema_dict.get(IMMUTABLE, False))
@classmethod
def from_parameter(cls, param):
"""
Return a Property Schema corresponding to a Parameter Schema.
Convert a parameter schema from a provider template to a property
Schema for the corresponding resource facade.
"""
# map param types to property types
param_type_map = {
param.STRING: cls.STRING,
param.NUMBER: cls.NUMBER,
param.LIST: cls.LIST,
param.MAP: cls.MAP,
param.BOOLEAN: cls.BOOLEAN
}
# allow_conversion allows slightly more flexible type conversion
# where property->parameter types don't align, primarily when
# a json parameter value is passed via a Map property, which requires
# some coercion to pass strings or lists (which are both valid for
# Json parameters but not for Map properties).
allow_conversion = param.type == param.MAP
# make update_allowed true by default on TemplateResources
# as the template should deal with this.
return cls(data_type=param_type_map.get(param.type, cls.MAP),
description=param.description,
required=param.required,
constraints=param.constraints,
update_allowed=True,
immutable=False,
allow_conversion=allow_conversion,
default=param.default)
def allowed_param_prop_type(self):
"""
Return allowed type of Property Schema converted from parameter.
Especially, when generating Schema from parameter, Integer Property
Schema will be supplied by Number parameter.
"""
param_type_map = {
self.INTEGER: self.NUMBER,
self.STRING: self.STRING,
self.NUMBER: self.NUMBER,
self.BOOLEAN: self.BOOLEAN,
self.LIST: self.LIST,
self.MAP: self.MAP
}
return param_type_map[self.type]
def __getitem__(self, key):
if key == self.UPDATE_ALLOWED:
return self.update_allowed
elif key == self.IMMUTABLE:
return self.immutable
else:
return super(Schema, self).__getitem__(key)
def schemata(schema_dicts):
"""
Return dictionary of Schema objects for given dictionary of schemata.
The input schemata are converted from the legacy (dictionary-based)
format to Schema objects where necessary.
"""
return dict((n, Schema.from_legacy(s)) for n, s in schema_dicts.items())
class Property(object):
def __init__(self, schema, name=None, context=None):
self.schema = Schema.from_legacy(schema)
self.name = name
self.context = context
def required(self):
return self.schema.required
def implemented(self):
return self.schema.implemented
def update_allowed(self):
return self.schema.update_allowed
def immutable(self):
return self.schema.immutable
def has_default(self):
return self.schema.default is not None
def default(self):
return self.schema.default
def type(self):
return self.schema.type
def support_status(self):
return self.schema.support_status
def _get_integer(self, value):
if value is None:
value = self.has_default() and self.default() or 0
try:
value = int(value)
except ValueError:
raise TypeError(_("Value '%s' is not an integer") % value)
else:
return value
def _get_number(self, value):
if value is None:
value = self.has_default() and self.default() or 0
return Schema.str_to_num(value)
def _get_string(self, value):
if value is None:
value = self.has_default() and self.default() or ''
if not isinstance(value, six.string_types):
if isinstance(value, (bool, int)):
value = six.text_type(value)
else:
raise ValueError(_('Value must be a string'))
return value
def _get_children(self, child_values, keys=None, validate=False):
if self.schema.schema is not None:
if keys is None:
keys = list(self.schema.schema)
schemata = dict((k, self.schema.schema[k]) for k in keys)
properties = Properties(schemata, dict(child_values),
context=self.context)
if validate:
properties.validate()
return ((k, properties[k]) for k in keys)
else:
return child_values
def _get_map(self, value, validate=False):
if value is None:
value = self.default() if self.has_default() else {}
if not isinstance(value, collections.Mapping):
# This is to handle passing Lists via Json parameters exposed
# via a provider resource, in particular lists-of-dicts which
# cannot be handled correctly via comma_delimited_list
if self.schema.allow_conversion:
if isinstance(value, six.string_types):
return value
elif isinstance(value, collections.Sequence):
return jsonutils.dumps(value)
raise TypeError(_('"%s" is not a map') % value)
return dict(self._get_children(six.iteritems(value),
validate=validate))
def _get_list(self, value, validate=False):
if value is None:
value = self.has_default() and self.default() or []
if (not isinstance(value, collections.Sequence) or
isinstance(value, six.string_types)):
raise TypeError(_('"%s" is not a list') % repr(value))
return [v[1] for v in self._get_children(enumerate(value),
range(len(value)),
validate)]
def _get_bool(self, value):
if value is None:
value = self.has_default() and self.default() or False
if isinstance(value, bool):
return value
normalised = value.lower()
if normalised not in ['true', 'false']:
raise ValueError(_('"%s" is not a valid boolean') % normalised)
return normalised == 'true'
def get_value(self, value, validate=False):
"""Get value from raw value and sanitize according to data type."""
t = self.type()
if t == Schema.STRING:
_value = self._get_string(value)
elif t == Schema.INTEGER:
_value = self._get_integer(value)
elif t == Schema.NUMBER:
_value = self._get_number(value)
elif t == Schema.MAP:
_value = self._get_map(value, validate)
elif t == Schema.LIST:
_value = self._get_list(value, validate)
elif t == Schema.BOOLEAN:
_value = self._get_bool(value)
if validate:
self.schema.validate_constraints(_value, self.context)
return _value
class Properties(collections.Mapping):
def __init__(self, schema, data, resolver=lambda d: d, parent_name=None,
context=None, section=None):
self.props = dict((k, Property(s, k, context))
for k, s in schema.items())
self.resolve = resolver
self.data = data
self.error_prefix = []
if parent_name is not None:
self.error_prefix.append(parent_name)
if section is not None:
self.error_prefix.append(section)
self.context = context
@staticmethod
def schema_from_params(params_snippet):
"""
Convert a template snippet that defines parameters
into a properties schema
:param params_snippet: parameter definition from a template
:returns: an equivalent properties schema for the specified params
"""
if params_snippet:
return dict((n, Schema.from_parameter(p)) for n, p
in params_snippet.items())
return {}
def validate(self, with_value=True):
try:
for key in self.data:
if key not in self.props:
msg = _("Unknown Property %s") % key
raise exception.StackValidationFailed(message=msg)
for (key, prop) in self.props.items():
# check that update_allowed and immutable
# do not contradict each other
if prop.update_allowed() and prop.immutable():
msg = _("Property %(prop)s: %(ua)s and %(im)s "
"cannot both be True") % {
'prop': key,
'ua': prop.schema.UPDATE_ALLOWED,
'im': prop.schema.IMMUTABLE}
raise exception.InvalidSchemaError(message=msg)
if with_value:
try:
self._get_property_value(key, validate=True)
except exception.StackValidationFailed as ex:
path = [key]
path.extend(ex.path)
raise exception.StackValidationFailed(
path=path, message=ex.error_message)
except ValueError as e:
if prop.required() and key not in self.data:
path = []
else:
path = [key]
raise exception.StackValidationFailed(
path=path, message=six.text_type(e))
# are there unimplemented Properties
if not prop.implemented() and key in self.data:
msg = _("Property %s not implemented yet") % key
raise exception.StackValidationFailed(message=msg)
except exception.StackValidationFailed as ex:
# NOTE(prazumovsky): should reraise exception for adding specific
# error name and error_prefix to path for correct error message
# building.
path = self.error_prefix
path.extend(ex.path)
raise exception.StackValidationFailed(
error=ex.error or 'Property error',
path=path,
message=ex.error_message
)
def _find_deps_any_in_init(self, unresolved_value):
deps = function.dependencies(unresolved_value)
if any(res.action == res.INIT for res in deps):
return True
def _get_property_value(self, key, validate=False):
if key not in self:
raise KeyError(_('Invalid Property %s') % key)
prop = self.props[key]
if key in self.data:
try:
unresolved_value = self.data[key]
if validate:
if self._find_deps_any_in_init(unresolved_value):
validate = False
value = self.resolve(unresolved_value)
return prop.get_value(value, validate)
# Children can raise StackValidationFailed with unique path which
# is necessary for further use in StackValidationFailed exception.
# So we need to handle this exception in this method.
except exception.StackValidationFailed as e:
raise exception.StackValidationFailed(path=e.path,
message=e.error_message)
# the resolver function could raise any number of exceptions,
# so handle this generically
except Exception as e:
raise ValueError(six.text_type(e))
elif prop.has_default():
return prop.get_value(None, validate)
elif prop.required():
raise ValueError(_('Property %s not assigned') % key)
else:
return None
def __getitem__(self, key):
return self._get_property_value(key)
def __len__(self):
return len(self.props)
def __contains__(self, key):
return key in self.props
def __iter__(self):
return iter(self.props)
@staticmethod
def _param_def_from_prop(schema):
"""
Return a template parameter definition corresponding to a property.
"""
param_type_map = {
schema.INTEGER: parameters.Schema.NUMBER,
schema.STRING: parameters.Schema.STRING,
schema.NUMBER: parameters.Schema.NUMBER,
schema.BOOLEAN: parameters.Schema.BOOLEAN,
schema.MAP: parameters.Schema.MAP,
schema.LIST: parameters.Schema.LIST,
}
def param_items():
yield parameters.TYPE, param_type_map[schema.type]
if schema.description is not None:
yield parameters.DESCRIPTION, schema.description
if schema.default is not None:
yield parameters.DEFAULT, schema.default
for constraint in schema.constraints:
if isinstance(constraint, constr.Length):
if constraint.min is not None:
yield parameters.MIN_LENGTH, constraint.min
if constraint.max is not None:
yield parameters.MAX_LENGTH, constraint.max
elif isinstance(constraint, constr.Range):
if constraint.min is not None:
yield parameters.MIN_VALUE, constraint.min
if constraint.max is not None:
yield parameters.MAX_VALUE, constraint.max
elif isinstance(constraint, constr.AllowedValues):
yield parameters.ALLOWED_VALUES, list(constraint.allowed)
elif isinstance(constraint, constr.AllowedPattern):
yield parameters.ALLOWED_PATTERN, constraint.pattern
if schema.type == schema.BOOLEAN:
yield parameters.ALLOWED_VALUES, ['True', 'true',
'False', 'false']
return dict(param_items())
@staticmethod
def _prop_def_from_prop(name, schema):
"""
Return a provider template property definition for a property.
"""
if schema.type == Schema.LIST:
return {'Fn::Split': [',', {'Ref': name}]}
else:
return {'Ref': name}
@staticmethod
def _hot_param_def_from_prop(schema):
"""
Return parameter definition corresponding to a property for
hot template.
"""
param_type_map = {
schema.INTEGER: hot_param.HOTParamSchema.NUMBER,
schema.STRING: hot_param.HOTParamSchema.STRING,
schema.NUMBER: hot_param.HOTParamSchema.NUMBER,
schema.BOOLEAN: hot_param.HOTParamSchema.BOOLEAN,
schema.MAP: hot_param.HOTParamSchema.MAP,
schema.LIST: hot_param.HOTParamSchema.LIST,
}
def param_items():
yield hot_param.HOTParamSchema.TYPE, param_type_map[schema.type]
if schema.description is not None:
yield hot_param.HOTParamSchema.DESCRIPTION, schema.description
if schema.default is not None:
yield hot_param.HOTParamSchema.DEFAULT, schema.default
for constraint in schema.constraints:
if (isinstance(constraint, constr.Length) or
isinstance(constraint, constr.Range)):
if constraint.min is not None:
yield hot_param.MIN, constraint.min
if constraint.max is not None:
yield hot_param.MAX, constraint.max
elif isinstance(constraint, constr.AllowedValues):
yield hot_param.ALLOWED_VALUES, list(constraint.allowed)
elif isinstance(constraint, constr.AllowedPattern):
yield hot_param.ALLOWED_PATTERN, constraint.pattern
if schema.type == schema.BOOLEAN:
yield hot_param.ALLOWED_VALUES, ['True', 'true',
'False', 'false']
return dict(param_items())
@staticmethod
def _hot_prop_def_from_prop(name, schema):
"""
Return a provider template property definition for a property.
"""
return {'get_param': name}
@classmethod
def schema_to_parameters_and_properties(cls, schema, template_type='cfn'):
"""Generates properties with params resolved for a resource's
properties_schema.
:param schema: A resource type's properties_schema
:returns: A tuple of params and properties dicts
ex: input: {'foo': {'Type': 'List'}}
output: {'foo': {'Type': 'CommaDelimitedList'}},
{'foo': {'Fn::Split': {'Ref': 'foo'}}}
ex: input: {'foo': {'Type': 'String'}, 'bar': {'Type': 'Map'}}
output: {'foo': {'Type': 'String'}, 'bar': {'Type': 'Json'}},
{'foo': {'Ref': 'foo'}, 'bar': {'Ref': 'bar'}}
"""
def param_prop_def_items(name, schema, template_type):
if template_type == 'hot':
param_def = cls._hot_param_def_from_prop(schema)
prop_def = cls._hot_prop_def_from_prop(name, schema)
else:
param_def = cls._param_def_from_prop(schema)
prop_def = cls._prop_def_from_prop(name, schema)
return (name, param_def), (name, prop_def)
if not schema:
return {}, {}
param_prop_defs = [param_prop_def_items(n, s, template_type)
for n, s in six.iteritems(schemata(schema))
if s.implemented]
param_items, prop_items = zip(*param_prop_defs)
return dict(param_items), dict(prop_items)
class TranslationRule(object):
"""Translating mechanism one properties to another.
Mechanism uses list of rules, each defines by this class, and can be
executed. Working principe: during resource creating after properties
defining resource take list of rules, specified by method
translation_rules, which should be overloaded for each resource, if it's
needed, and execute each rule using translate_properties method. Next
operations are allowed:
- ADD. This rule allows to add some value to list-type properties. Only
list-type values can be added to such properties. Using for other
cases is prohibited and will be returned with error.
- REPLACE. This rule allows to replace some property value to another. Used
for all types of properties. Note, that if property has list type,
then value will be replaced for all elements of list, where it
needed. If element in such property must be replaced by value of
another element of this property, value_name must be defined.
- DELETE. This rule allows to delete some property. If property has list
type, then deleting affects value in all list elements.
"""
RULE_KEYS = (ADD, REPLACE, DELETE) = ('Add', 'Replace', 'Delete')
def __init__(self, properties, rule, source_path, value=None,
value_name=None, value_path=None):
"""Add new rule for translating mechanism.
:param properties: properties of resource
:param rule: rule from RULE_KEYS
:param source_path: list with path to property, which value will be
affected in rule.
:param value: value which will be involved in rule
:param value_name: value_name which used for replacing properties
inside list-type properties.
:param value_path: path to value, which should be used for translation.
"""
self.properties = properties
self.rule = rule
self.source_path = source_path
self.value = value or None
self.value_name = value_name
self.value_path = value_path
self.validate()
def validate(self):
if self.rule not in self.RULE_KEYS:
raise ValueError(_('There is no rule %(rule)s. List of allowed '
'rules is: %(rules)s.') % {
'rule': self.rule,
'rules': ', '.join(self.RULE_KEYS)})
elif not isinstance(self.properties, Properties):
raise ValueError(_('Properties must be Properties type. '
'Found %s.') % type(self.properties))
elif not isinstance(self.source_path, list):
raise ValueError(_('source_path should be a list with path '
'instead of %s.') % type(self.source_path))
elif len(self.source_path) == 0:
raise ValueError(_('source_path must be non-empty list with '
'path.'))
elif self.value_name and self.rule != self.REPLACE:
raise ValueError(_('Use value_name only for replacing list '
'elements.'))
elif self.rule == self.ADD and not isinstance(self.value, list):
raise ValueError(_('value must be list type when rule is Add.'))
def execute_rule(self):
(source_key, source_data) = self.get_data_from_source_path(
self.source_path)
if self.value_path:
(value_key, value_data) = self.get_data_from_source_path(
self.value_path)
value = (value_data[value_key]
if value_data and value_data.get(value_key)
else self.value)
else:
(value_key, value_data) = None, None
value = self.value
if (source_data is None or (self.rule != self.DELETE and
(value is None and
self.value_name is None and
(value_data is None or
value_data.get(value_key) is None)))):
return
if self.rule == TranslationRule.ADD:
if isinstance(source_data, list):
source_data.extend(value)
else:
raise ValueError(_('Add rule must be used only for '
'lists.'))
elif self.rule == TranslationRule.REPLACE:
if isinstance(source_data, list):
for item in source_data:
if item.get(self.value_name) and item.get(source_key):
raise ValueError(_('Cannot use %(key)s and '
'%(name)s at the same time.')
% dict(key=source_key,
name=self.value_name))
elif item.get(self.value_name) is not None:
item[source_key] = item[self.value_name]
del item[self.value_name]
elif value is not None:
item[source_key] = value
else:
if (source_data and source_data.get(source_key) and
value_data and value_data.get(value_key)):
raise ValueError(_('Cannot use %(key)s and '
'%(name)s at the same time.')
% dict(key=source_key,
name=value_key))
source_data[source_key] = value
# If value defined with value_path, need to delete value_path
# property data after it's replacing.
if value_data and value_data.get(value_key):
del value_data[value_key]
elif self.rule == TranslationRule.DELETE:
if isinstance(source_data, list):
for item in source_data:
if item.get(source_key) is not None:
del item[source_key]
else:
del source_data[source_key]
def get_data_from_source_path(self, path):
def get_props(props, key):
props = props.get(key)
if props.schema.schema is not None:
keys = list(props.schema.schema)
schemata = dict((k, props.schema.schema[k])
for k in keys)
props = dict((k, Property(s, k))
for k, s in schemata.items())
return props
source_key = path[0]
data = self.properties.data
props = self.properties.props
for key in path:
if isinstance(data, list):
source_key = key
elif data.get(key) is not None and isinstance(data.get(key),
(list, dict)):
data = data.get(key)
props = get_props(props, key)
elif data.get(key) is None:
if (self.rule == TranslationRule.DELETE or
(self.rule == TranslationRule.REPLACE and
self.value_name)):
return None, None
elif props.get(key).type() == Schema.LIST:
data[key] = []
elif props.get(key).type() == Schema.MAP:
data[key] = {}
else:
source_key = key
continue
data = data.get(key)
props = get_props(props, key)
else:
source_key = key
return source_key, data
| |
# NOTE: Please avoid the use of numpy.testing since NPYV intrinsics
# may be involved in their functionality.
import pytest, math, re
import itertools
from numpy.core._simd import targets
from numpy.core._multiarray_umath import __cpu_baseline__
class _Test_Utility:
# submodule of the desired SIMD extension, e.g. targets["AVX512F"]
npyv = None
# the current data type suffix e.g. 's8'
sfx = None
# target name can be 'baseline' or one or more of CPU features
target_name = None
def __getattr__(self, attr):
"""
To call NPV intrinsics without the attribute 'npyv' and
auto suffixing intrinsics according to class attribute 'sfx'
"""
return getattr(self.npyv, attr + "_" + self.sfx)
def _data(self, start=None, count=None, reverse=False):
"""
Create list of consecutive numbers according to number of vector's lanes.
"""
if start is None:
start = 1
if count is None:
count = self.nlanes
rng = range(start, start + count)
if reverse:
rng = reversed(rng)
if self._is_fp():
return [x / 1.0 for x in rng]
return list(rng)
def _is_unsigned(self):
return self.sfx[0] == 'u'
def _is_signed(self):
return self.sfx[0] == 's'
def _is_fp(self):
return self.sfx[0] == 'f'
def _scalar_size(self):
return int(self.sfx[1:])
def _int_clip(self, seq):
if self._is_fp():
return seq
max_int = self._int_max()
min_int = self._int_min()
return [min(max(v, min_int), max_int) for v in seq]
def _int_max(self):
if self._is_fp():
return None
max_u = self._to_unsigned(self.setall(-1))[0]
if self._is_signed():
return max_u // 2
return max_u
def _int_min(self):
if self._is_fp():
return None
if self._is_unsigned():
return 0
return -(self._int_max() + 1)
def _true_mask(self):
max_unsig = getattr(self.npyv, "setall_u" + self.sfx[1:])(-1)
return max_unsig[0]
def _to_unsigned(self, vector):
if isinstance(vector, (list, tuple)):
return getattr(self.npyv, "load_u" + self.sfx[1:])(vector)
else:
sfx = vector.__name__.replace("npyv_", "")
if sfx[0] == "b":
cvt_intrin = "cvt_u{0}_b{0}"
else:
cvt_intrin = "reinterpret_u{0}_{1}"
return getattr(self.npyv, cvt_intrin.format(sfx[1:], sfx))(vector)
def _pinfinity(self):
v = self.npyv.setall_u32(0x7f800000)
return self.npyv.reinterpret_f32_u32(v)[0]
def _ninfinity(self):
v = self.npyv.setall_u32(0xff800000)
return self.npyv.reinterpret_f32_u32(v)[0]
def _nan(self):
v = self.npyv.setall_u32(0x7fc00000)
return self.npyv.reinterpret_f32_u32(v)[0]
def _cpu_features(self):
target = self.target_name
if target == "baseline":
target = __cpu_baseline__
else:
target = target.split('__') # multi-target separator
return ' '.join(target)
class _SIMD_BOOL(_Test_Utility):
"""
To test all boolean vector types at once
"""
def _data(self, start=None, count=None, reverse=False):
nlanes = getattr(self.npyv, "nlanes_u" + self.sfx[1:])
true_mask = self._true_mask()
rng = range(nlanes)
if reverse:
rng = reversed(rng)
return [true_mask if x % 2 else 0 for x in rng]
def _load_b(self, data):
len_str = self.sfx[1:]
load = getattr(self.npyv, "load_u" + len_str)
cvt = getattr(self.npyv, f"cvt_b{len_str}_u{len_str}")
return cvt(load(data))
def test_operators_logical(self):
"""
Logical operations for boolean types.
Test intrinsics:
npyv_xor_##SFX, npyv_and_##SFX, npyv_or_##SFX, npyv_not_##SFX
"""
data_a = self._data()
data_b = self._data(reverse=True)
vdata_a = self._load_b(data_a)
vdata_b = self._load_b(data_b)
data_and = [a & b for a, b in zip(data_a, data_b)]
vand = getattr(self, "and")(vdata_a, vdata_b)
assert vand == data_and
data_or = [a | b for a, b in zip(data_a, data_b)]
vor = getattr(self, "or")(vdata_a, vdata_b)
assert vor == data_or
data_xor = [a ^ b for a, b in zip(data_a, data_b)]
vxor = getattr(self, "xor")(vdata_a, vdata_b)
assert vxor == data_xor
vnot = getattr(self, "not")(vdata_a)
assert vnot == data_b
def test_tobits(self):
data2bits = lambda data: sum([int(x != 0) << i for i, x in enumerate(data, 0)])
for data in (self._data(), self._data(reverse=True)):
vdata = self._load_b(data)
data_bits = data2bits(data)
tobits = bin(self.tobits(vdata))
assert tobits == bin(data_bits)
class _SIMD_INT(_Test_Utility):
"""
To test all integer vector types at once
"""
def test_operators_shift(self):
if self.sfx in ("u8", "s8"):
return
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
for count in range(self._scalar_size()):
# load to cast
data_shl_a = self.load([a << count for a in data_a])
# left shift
shl = self.shl(vdata_a, count)
assert shl == data_shl_a
# load to cast
data_shr_a = self.load([a >> count for a in data_a])
# right shift
shr = self.shr(vdata_a, count)
assert shr == data_shr_a
# shift by zero or max or out-range immediate constant is not applicable and illogical
for count in range(1, self._scalar_size()):
# load to cast
data_shl_a = self.load([a << count for a in data_a])
# left shift by an immediate constant
shli = self.shli(vdata_a, count)
assert shli == data_shl_a
# load to cast
data_shr_a = self.load([a >> count for a in data_a])
# right shift by an immediate constant
shri = self.shri(vdata_a, count)
assert shri == data_shr_a
def test_arithmetic_subadd_saturated(self):
if self.sfx in ("u32", "s32", "u64", "s64"):
return
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
data_adds = self._int_clip([a + b for a, b in zip(data_a, data_b)])
adds = self.adds(vdata_a, vdata_b)
assert adds == data_adds
data_subs = self._int_clip([a - b for a, b in zip(data_a, data_b)])
subs = self.subs(vdata_a, vdata_b)
assert subs == data_subs
def test_math_max_min(self):
data_a = self._data()
data_b = self._data(self.nlanes)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
data_max = [max(a, b) for a, b in zip(data_a, data_b)]
simd_max = self.max(vdata_a, vdata_b)
assert simd_max == data_max
data_min = [min(a, b) for a, b in zip(data_a, data_b)]
simd_min = self.min(vdata_a, vdata_b)
assert simd_min == data_min
class _SIMD_FP32(_Test_Utility):
"""
To only test single precision
"""
def test_conversions(self):
"""
Round to nearest even integer, assume CPU control register is set to rounding.
Test intrinsics:
npyv_round_s32_##SFX
"""
features = self._cpu_features()
if not self.npyv.simd_f64 and re.match(r".*(NEON|ASIMD)", features):
# very costly to emulate nearest even on Armv7
# instead we round halves to up. e.g. 0.5 -> 1, -0.5 -> -1
_round = lambda v: int(v + (0.5 if v >= 0 else -0.5))
else:
_round = round
vdata_a = self.load(self._data())
vdata_a = self.sub(vdata_a, self.setall(0.5))
data_round = [_round(x) for x in vdata_a]
vround = self.round_s32(vdata_a)
assert vround == data_round
class _SIMD_FP64(_Test_Utility):
"""
To only test double precision
"""
def test_conversions(self):
"""
Round to nearest even integer, assume CPU control register is set to rounding.
Test intrinsics:
npyv_round_s32_##SFX
"""
vdata_a = self.load(self._data())
vdata_a = self.sub(vdata_a, self.setall(0.5))
vdata_b = self.mul(vdata_a, self.setall(-1.5))
data_round = [round(x) for x in list(vdata_a) + list(vdata_b)]
vround = self.round_s32(vdata_a, vdata_b)
assert vround == data_round
class _SIMD_FP(_Test_Utility):
"""
To test all float vector types at once
"""
def test_arithmetic_fused(self):
vdata_a, vdata_b, vdata_c = [self.load(self._data())]*3
vdata_cx2 = self.add(vdata_c, vdata_c)
# multiply and add, a*b + c
data_fma = self.load([a * b + c for a, b, c in zip(vdata_a, vdata_b, vdata_c)])
fma = self.muladd(vdata_a, vdata_b, vdata_c)
assert fma == data_fma
# multiply and subtract, a*b - c
fms = self.mulsub(vdata_a, vdata_b, vdata_c)
data_fms = self.sub(data_fma, vdata_cx2)
assert fms == data_fms
# negate multiply and add, -(a*b) + c
nfma = self.nmuladd(vdata_a, vdata_b, vdata_c)
data_nfma = self.sub(vdata_cx2, data_fma)
assert nfma == data_nfma
# negate multiply and subtract, -(a*b) - c
nfms = self.nmulsub(vdata_a, vdata_b, vdata_c)
data_nfms = self.mul(data_fma, self.setall(-1))
assert nfms == data_nfms
def test_abs(self):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
data = self._data()
vdata = self.load(self._data())
abs_cases = ((-0, 0), (ninf, pinf), (pinf, pinf), (nan, nan))
for case, desired in abs_cases:
data_abs = [desired]*self.nlanes
vabs = self.abs(self.setall(case))
assert vabs == pytest.approx(data_abs, nan_ok=True)
vabs = self.abs(self.mul(vdata, self.setall(-1)))
assert vabs == data
def test_sqrt(self):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
data = self._data()
vdata = self.load(self._data())
sqrt_cases = ((-0.0, -0.0), (0.0, 0.0), (-1.0, nan), (ninf, nan), (pinf, pinf))
for case, desired in sqrt_cases:
data_sqrt = [desired]*self.nlanes
sqrt = self.sqrt(self.setall(case))
assert sqrt == pytest.approx(data_sqrt, nan_ok=True)
data_sqrt = self.load([math.sqrt(x) for x in data]) # load to truncate precision
sqrt = self.sqrt(vdata)
assert sqrt == data_sqrt
def test_square(self):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
data = self._data()
vdata = self.load(self._data())
# square
square_cases = ((nan, nan), (pinf, pinf), (ninf, pinf))
for case, desired in square_cases:
data_square = [desired]*self.nlanes
square = self.square(self.setall(case))
assert square == pytest.approx(data_square, nan_ok=True)
data_square = [x*x for x in data]
square = self.square(vdata)
assert square == data_square
def test_max(self):
"""
Test intrinsics:
npyv_max_##SFX
npyv_maxp_##SFX
"""
data_a = self._data()
data_b = self._data(self.nlanes)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
data_max = [max(a, b) for a, b in zip(data_a, data_b)]
_max = self.max(vdata_a, vdata_b)
assert _max == data_max
maxp = self.maxp(vdata_a, vdata_b)
assert maxp == data_max
# test IEEE standards
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
max_cases = ((nan, nan, nan), (nan, 10, 10), (10, nan, 10),
(pinf, pinf, pinf), (pinf, 10, pinf), (10, pinf, pinf),
(ninf, ninf, ninf), (ninf, 10, 10), (10, ninf, 10),
(10, 0, 10), (10, -10, 10))
for case_operand1, case_operand2, desired in max_cases:
data_max = [desired]*self.nlanes
vdata_a = self.setall(case_operand1)
vdata_b = self.setall(case_operand2)
maxp = self.maxp(vdata_a, vdata_b)
assert maxp == pytest.approx(data_max, nan_ok=True)
if nan in (case_operand1, case_operand2, desired):
continue
_max = self.max(vdata_a, vdata_b)
assert _max == data_max
def test_min(self):
"""
Test intrinsics:
npyv_min_##SFX
npyv_minp_##SFX
"""
data_a = self._data()
data_b = self._data(self.nlanes)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
data_min = [min(a, b) for a, b in zip(data_a, data_b)]
_min = self.min(vdata_a, vdata_b)
assert _min == data_min
minp = self.minp(vdata_a, vdata_b)
assert minp == data_min
# test IEEE standards
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
min_cases = ((nan, nan, nan), (nan, 10, 10), (10, nan, 10),
(pinf, pinf, pinf), (pinf, 10, 10), (10, pinf, 10),
(ninf, ninf, ninf), (ninf, 10, ninf), (10, ninf, ninf),
(10, 0, 0), (10, -10, -10))
for case_operand1, case_operand2, desired in min_cases:
data_min = [desired]*self.nlanes
vdata_a = self.setall(case_operand1)
vdata_b = self.setall(case_operand2)
minp = self.minp(vdata_a, vdata_b)
assert minp == pytest.approx(data_min, nan_ok=True)
if nan in (case_operand1, case_operand2, desired):
continue
_min = self.min(vdata_a, vdata_b)
assert _min == data_min
def test_reciprocal(self):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
data = self._data()
vdata = self.load(self._data())
recip_cases = ((nan, nan), (pinf, 0.0), (ninf, -0.0), (0.0, pinf), (-0.0, ninf))
for case, desired in recip_cases:
data_recip = [desired]*self.nlanes
recip = self.recip(self.setall(case))
assert recip == pytest.approx(data_recip, nan_ok=True)
data_recip = self.load([1/x for x in data]) # load to truncate precision
recip = self.recip(vdata)
assert recip == data_recip
def test_special_cases(self):
"""
Compare Not NaN. Test intrinsics:
npyv_notnan_##SFX
"""
nnan = self.notnan(self.setall(self._nan()))
assert nnan == [0]*self.nlanes
class _SIMD_ALL(_Test_Utility):
"""
To test all vector types at once
"""
def test_memory_load(self):
data = self._data()
# unaligned load
load_data = self.load(data)
assert load_data == data
# aligned load
loada_data = self.loada(data)
assert loada_data == data
# stream load
loads_data = self.loads(data)
assert loads_data == data
# load lower part
loadl = self.loadl(data)
loadl_half = list(loadl)[:self.nlanes//2]
data_half = data[:self.nlanes//2]
assert loadl_half == data_half
assert loadl != data # detect overflow
def test_memory_store(self):
data = self._data()
vdata = self.load(data)
# unaligned store
store = [0] * self.nlanes
self.store(store, vdata)
assert store == data
# aligned store
store_a = [0] * self.nlanes
self.storea(store_a, vdata)
assert store_a == data
# stream store
store_s = [0] * self.nlanes
self.stores(store_s, vdata)
assert store_s == data
# store lower part
store_l = [0] * self.nlanes
self.storel(store_l, vdata)
assert store_l[:self.nlanes//2] == data[:self.nlanes//2]
assert store_l != vdata # detect overflow
# store higher part
store_h = [0] * self.nlanes
self.storeh(store_h, vdata)
assert store_h[:self.nlanes//2] == data[self.nlanes//2:]
assert store_h != vdata # detect overflow
def test_memory_partial_load(self):
if self.sfx in ("u8", "s8", "u16", "s16"):
return
data = self._data()
lanes = list(range(1, self.nlanes + 1))
lanes += [self.nlanes**2, self.nlanes**4] # test out of range
for n in lanes:
load_till = self.load_till(data, n, 15)
data_till = data[:n] + [15] * (self.nlanes-n)
assert load_till == data_till
load_tillz = self.load_tillz(data, n)
data_tillz = data[:n] + [0] * (self.nlanes-n)
assert load_tillz == data_tillz
def test_memory_partial_store(self):
if self.sfx in ("u8", "s8", "u16", "s16"):
return
data = self._data()
data_rev = self._data(reverse=True)
vdata = self.load(data)
lanes = list(range(1, self.nlanes + 1))
lanes += [self.nlanes**2, self.nlanes**4]
for n in lanes:
data_till = data_rev.copy()
data_till[:n] = data[:n]
store_till = self._data(reverse=True)
self.store_till(store_till, n, vdata)
assert store_till == data_till
def test_memory_noncont_load(self):
if self.sfx in ("u8", "s8", "u16", "s16"):
return
for stride in range(1, 64):
data = self._data(count=stride*self.nlanes)
data_stride = data[::stride]
loadn = self.loadn(data, stride)
assert loadn == data_stride
for stride in range(-64, 0):
data = self._data(stride, -stride*self.nlanes)
data_stride = self.load(data[::stride]) # cast unsigned
loadn = self.loadn(data, stride)
assert loadn == data_stride
def test_memory_noncont_partial_load(self):
if self.sfx in ("u8", "s8", "u16", "s16"):
return
lanes = list(range(1, self.nlanes + 1))
lanes += [self.nlanes**2, self.nlanes**4]
for stride in range(1, 64):
data = self._data(count=stride*self.nlanes)
data_stride = data[::stride]
for n in lanes:
data_stride_till = data_stride[:n] + [15] * (self.nlanes-n)
loadn_till = self.loadn_till(data, stride, n, 15)
assert loadn_till == data_stride_till
data_stride_tillz = data_stride[:n] + [0] * (self.nlanes-n)
loadn_tillz = self.loadn_tillz(data, stride, n)
assert loadn_tillz == data_stride_tillz
for stride in range(-64, 0):
data = self._data(stride, -stride*self.nlanes)
data_stride = list(self.load(data[::stride])) # cast unsigned
for n in lanes:
data_stride_till = data_stride[:n] + [15] * (self.nlanes-n)
loadn_till = self.loadn_till(data, stride, n, 15)
assert loadn_till == data_stride_till
data_stride_tillz = data_stride[:n] + [0] * (self.nlanes-n)
loadn_tillz = self.loadn_tillz(data, stride, n)
assert loadn_tillz == data_stride_tillz
def test_memory_noncont_store(self):
if self.sfx in ("u8", "s8", "u16", "s16"):
return
vdata = self.load(self._data())
for stride in range(1, 64):
data = [15] * stride * self.nlanes
data[::stride] = vdata
storen = [15] * stride * self.nlanes
storen += [127]*64
self.storen(storen, stride, vdata)
assert storen[:-64] == data
assert storen[-64:] == [127]*64 # detect overflow
for stride in range(-64, 0):
data = [15] * -stride * self.nlanes
data[::stride] = vdata
storen = [127]*64
storen += [15] * -stride * self.nlanes
self.storen(storen, stride, vdata)
assert storen[64:] == data
assert storen[:64] == [127]*64 # detect overflow
def test_memory_noncont_partial_store(self):
if self.sfx in ("u8", "s8", "u16", "s16"):
return
data = self._data()
vdata = self.load(data)
lanes = list(range(1, self.nlanes + 1))
lanes += [self.nlanes**2, self.nlanes**4]
for stride in range(1, 64):
for n in lanes:
data_till = [15] * stride * self.nlanes
data_till[::stride] = data[:n] + [15] * (self.nlanes-n)
storen_till = [15] * stride * self.nlanes
storen_till += [127]*64
self.storen_till(storen_till, stride, n, vdata)
assert storen_till[:-64] == data_till
assert storen_till[-64:] == [127]*64 # detect overflow
for stride in range(-64, 0):
for n in lanes:
data_till = [15] * -stride * self.nlanes
data_till[::stride] = data[:n] + [15] * (self.nlanes-n)
storen_till = [127]*64
storen_till += [15] * -stride * self.nlanes
self.storen_till(storen_till, stride, n, vdata)
assert storen_till[64:] == data_till
assert storen_till[:64] == [127]*64 # detect overflow
def test_misc(self):
broadcast_zero = self.zero()
assert broadcast_zero == [0] * self.nlanes
for i in range(1, 10):
broadcasti = self.setall(i)
assert broadcasti == [i] * self.nlanes
data_a, data_b = self._data(), self._data(reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
# py level of npyv_set_* don't support ignoring the extra specified lanes or
# fill non-specified lanes with zero.
vset = self.set(*data_a)
assert vset == data_a
# py level of npyv_setf_* don't support ignoring the extra specified lanes or
# fill non-specified lanes with the specified scalar.
vsetf = self.setf(10, *data_a)
assert vsetf == data_a
# We're testing the sanity of _simd's type-vector,
# reinterpret* intrinsics itself are tested via compiler
# during the build of _simd module
sfxes = ["u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64", "f32"]
if self.npyv.simd_f64:
sfxes.append("f64")
for sfx in sfxes:
vec_name = getattr(self, "reinterpret_" + sfx)(vdata_a).__name__
assert vec_name == "npyv_" + sfx
# select & mask operations
select_a = self.select(self.cmpeq(self.zero(), self.zero()), vdata_a, vdata_b)
assert select_a == data_a
select_b = self.select(self.cmpneq(self.zero(), self.zero()), vdata_a, vdata_b)
assert select_b == data_b
# cleanup intrinsic is only used with AVX for
# zeroing registers to avoid the AVX-SSE transition penalty,
# so nothing to test here
self.npyv.cleanup()
def test_reorder(self):
data_a, data_b = self._data(), self._data(reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
# lower half part
data_a_lo = data_a[:self.nlanes//2]
data_b_lo = data_b[:self.nlanes//2]
# higher half part
data_a_hi = data_a[self.nlanes//2:]
data_b_hi = data_b[self.nlanes//2:]
# combine two lower parts
combinel = self.combinel(vdata_a, vdata_b)
assert combinel == data_a_lo + data_b_lo
# combine two higher parts
combineh = self.combineh(vdata_a, vdata_b)
assert combineh == data_a_hi + data_b_hi
# combine x2
combine = self.combine(vdata_a, vdata_b)
assert combine == (data_a_lo + data_b_lo, data_a_hi + data_b_hi)
# zip(interleave)
data_zipl = [v for p in zip(data_a_lo, data_b_lo) for v in p]
data_ziph = [v for p in zip(data_a_hi, data_b_hi) for v in p]
vzip = self.zip(vdata_a, vdata_b)
assert vzip == (data_zipl, data_ziph)
def test_reorder_rev64(self):
# Reverse elements of each 64-bit lane
ssize = self._scalar_size()
if ssize == 64:
return
data_rev64 = [
y for x in range(0, self.nlanes, 64//ssize)
for y in reversed(range(x, x + 64//ssize))
]
rev64 = self.rev64(self.load(range(self.nlanes)))
assert rev64 == data_rev64
def test_operators_comparison(self):
if self._is_fp():
data_a = self._data()
else:
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
mask_true = self._true_mask()
def to_bool(vector):
return [lane == mask_true for lane in vector]
# equal
data_eq = [a == b for a, b in zip(data_a, data_b)]
cmpeq = to_bool(self.cmpeq(vdata_a, vdata_b))
assert cmpeq == data_eq
# not equal
data_neq = [a != b for a, b in zip(data_a, data_b)]
cmpneq = to_bool(self.cmpneq(vdata_a, vdata_b))
assert cmpneq == data_neq
# greater than
data_gt = [a > b for a, b in zip(data_a, data_b)]
cmpgt = to_bool(self.cmpgt(vdata_a, vdata_b))
assert cmpgt == data_gt
# greater than and equal
data_ge = [a >= b for a, b in zip(data_a, data_b)]
cmpge = to_bool(self.cmpge(vdata_a, vdata_b))
assert cmpge == data_ge
# less than
data_lt = [a < b for a, b in zip(data_a, data_b)]
cmplt = to_bool(self.cmplt(vdata_a, vdata_b))
assert cmplt == data_lt
# less than and equal
data_le = [a <= b for a, b in zip(data_a, data_b)]
cmple = to_bool(self.cmple(vdata_a, vdata_b))
assert cmple == data_le
def test_operators_logical(self):
if self._is_fp():
data_a = self._data()
else:
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
if self._is_fp():
data_cast_a = self._to_unsigned(vdata_a)
data_cast_b = self._to_unsigned(vdata_b)
cast, cast_data = self._to_unsigned, self._to_unsigned
else:
data_cast_a, data_cast_b = data_a, data_b
cast, cast_data = lambda a: a, self.load
data_xor = cast_data([a ^ b for a, b in zip(data_cast_a, data_cast_b)])
vxor = cast(self.xor(vdata_a, vdata_b))
assert vxor == data_xor
data_or = cast_data([a | b for a, b in zip(data_cast_a, data_cast_b)])
vor = cast(getattr(self, "or")(vdata_a, vdata_b))
assert vor == data_or
data_and = cast_data([a & b for a, b in zip(data_cast_a, data_cast_b)])
vand = cast(getattr(self, "and")(vdata_a, vdata_b))
assert vand == data_and
data_not = cast_data([~a for a in data_cast_a])
vnot = cast(getattr(self, "not")(vdata_a))
assert vnot == data_not
def test_conversion_boolean(self):
bsfx = "b" + self.sfx[1:]
to_boolean = getattr(self.npyv, "cvt_%s_%s" % (bsfx, self.sfx))
from_boolean = getattr(self.npyv, "cvt_%s_%s" % (self.sfx, bsfx))
false_vb = to_boolean(self.setall(0))
true_vb = self.cmpeq(self.setall(0), self.setall(0))
assert false_vb != true_vb
false_vsfx = from_boolean(false_vb)
true_vsfx = from_boolean(true_vb)
assert false_vsfx != true_vsfx
def test_conversion_expand(self):
"""
Test expand intrinsics:
npyv_expand_u16_u8
npyv_expand_u32_u16
"""
if self.sfx not in ("u8", "u16"):
return
totype = self.sfx[0]+str(int(self.sfx[1:])*2)
expand = getattr(self.npyv, f"expand_{totype}_{self.sfx}")
# close enough from the edge to detect any deviation
data = self._data(self._int_max() - self.nlanes)
vdata = self.load(data)
edata = expand(vdata)
# lower half part
data_lo = data[:self.nlanes//2]
# higher half part
data_hi = data[self.nlanes//2:]
assert edata == (data_lo, data_hi)
def test_arithmetic_subadd(self):
if self._is_fp():
data_a = self._data()
else:
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
# non-saturated
data_add = self.load([a + b for a, b in zip(data_a, data_b)]) # load to cast
add = self.add(vdata_a, vdata_b)
assert add == data_add
data_sub = self.load([a - b for a, b in zip(data_a, data_b)])
sub = self.sub(vdata_a, vdata_b)
assert sub == data_sub
def test_arithmetic_mul(self):
if self.sfx in ("u64", "s64"):
return
if self._is_fp():
data_a = self._data()
else:
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
data_mul = self.load([a * b for a, b in zip(data_a, data_b)])
mul = self.mul(vdata_a, vdata_b)
assert mul == data_mul
def test_arithmetic_div(self):
if not self._is_fp():
return
data_a, data_b = self._data(), self._data(reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
# load to truncate f64 to precision of f32
data_div = self.load([a / b for a, b in zip(data_a, data_b)])
div = self.div(vdata_a, vdata_b)
assert div == data_div
def test_arithmetic_intdiv(self):
"""
Test integer division intrinsics:
npyv_divisor_##sfx
npyv_divc_##sfx
"""
if self._is_fp():
return
def trunc_div(a, d):
"""
Divide towards zero works with large integers > 2^53,
and wrap around overflow similar to what C does.
"""
if d == -1 and a == int_min:
return a
sign_a, sign_d = a < 0, d < 0
if a == 0 or sign_a == sign_d:
return a // d
return (a + sign_d - sign_a) // d + 1
int_min = self._int_min() if self._is_signed() else 1
int_max = self._int_max()
rdata = (
0, 1, self.nlanes, int_max-self.nlanes,
int_min, int_min//2 + 1
)
divisors = (1, 2, 9, 13, self.nlanes, int_min, int_max, int_max//2)
for x, d in itertools.product(rdata, divisors):
data = self._data(x)
vdata = self.load(data)
data_divc = [trunc_div(a, d) for a in data]
divisor = self.divisor(d)
divc = self.divc(vdata, divisor)
assert divc == data_divc
if not self._is_signed():
return
safe_neg = lambda x: -x-1 if -x > int_max else -x
# test round divison for signed integers
for x, d in itertools.product(rdata, divisors):
d_neg = safe_neg(d)
data = self._data(x)
data_neg = [safe_neg(a) for a in data]
vdata = self.load(data)
vdata_neg = self.load(data_neg)
divisor = self.divisor(d)
divisor_neg = self.divisor(d_neg)
# round towards zero
data_divc = [trunc_div(a, d_neg) for a in data]
divc = self.divc(vdata, divisor_neg)
assert divc == data_divc
data_divc = [trunc_div(a, d) for a in data_neg]
divc = self.divc(vdata_neg, divisor)
assert divc == data_divc
# test truncate sign if the dividend is zero
vzero = self.zero()
for d in (-1, -10, -100, int_min//2, int_min):
divisor = self.divisor(d)
divc = self.divc(vzero, divisor)
assert divc == vzero
# test overflow
vmin = self.setall(int_min)
divisor = self.divisor(-1)
divc = self.divc(vmin, divisor)
assert divc == vmin
def test_arithmetic_reduce_sum(self):
"""
Test reduce sum intrinsics:
npyv_sum_##sfx
"""
if self.sfx not in ("u32", "u64", "f32", "f64"):
return
# reduce sum
data = self._data()
vdata = self.load(data)
data_sum = sum(data)
vsum = self.sum(vdata)
assert vsum == data_sum
def test_arithmetic_reduce_sumup(self):
"""
Test extend reduce sum intrinsics:
npyv_sumup_##sfx
"""
if self.sfx not in ("u8", "u16"):
return
rdata = (0, self.nlanes, self._int_min(), self._int_max()-self.nlanes)
for r in rdata:
data = self._data(r)
vdata = self.load(data)
data_sum = sum(data)
vsum = self.sumup(vdata)
assert vsum == data_sum
def test_mask_conditional(self):
"""
Conditional addition and subtraction for all supported data types.
Test intrinsics:
npyv_ifadd_##SFX, npyv_ifsub_##SFX
"""
vdata_a = self.load(self._data())
vdata_b = self.load(self._data(reverse=True))
true_mask = self.cmpeq(self.zero(), self.zero())
false_mask = self.cmpneq(self.zero(), self.zero())
data_sub = self.sub(vdata_b, vdata_a)
ifsub = self.ifsub(true_mask, vdata_b, vdata_a, vdata_b)
assert ifsub == data_sub
ifsub = self.ifsub(false_mask, vdata_a, vdata_b, vdata_b)
assert ifsub == vdata_b
data_add = self.add(vdata_b, vdata_a)
ifadd = self.ifadd(true_mask, vdata_b, vdata_a, vdata_b)
assert ifadd == data_add
ifadd = self.ifadd(false_mask, vdata_a, vdata_b, vdata_b)
assert ifadd == vdata_b
bool_sfx = ("b8", "b16", "b32", "b64")
int_sfx = ("u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64")
fp_sfx = ("f32", "f64")
all_sfx = int_sfx + fp_sfx
tests_registry = {
bool_sfx: _SIMD_BOOL,
int_sfx : _SIMD_INT,
fp_sfx : _SIMD_FP,
("f32",): _SIMD_FP32,
("f64",): _SIMD_FP64,
all_sfx : _SIMD_ALL
}
for target_name, npyv in targets.items():
simd_width = npyv.simd if npyv else ''
pretty_name = target_name.split('__') # multi-target separator
if len(pretty_name) > 1:
# multi-target
pretty_name = f"({' '.join(pretty_name)})"
else:
pretty_name = pretty_name[0]
skip = ""
skip_sfx = dict()
if not npyv:
skip = f"target '{pretty_name}' isn't supported by current machine"
elif not npyv.simd:
skip = f"target '{pretty_name}' isn't supported by NPYV"
elif not npyv.simd_f64:
skip_sfx["f64"] = f"target '{pretty_name}' doesn't support double-precision"
for sfxes, cls in tests_registry.items():
for sfx in sfxes:
skip_m = skip_sfx.get(sfx, skip)
inhr = (cls,)
attr = dict(npyv=targets[target_name], sfx=sfx, target_name=target_name)
tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr)
if skip_m:
pytest.mark.skip(reason=skip_m)(tcls)
globals()[tcls.__name__] = tcls
| |
import gzip
import sys
import os
from .config import get_db_dir
defaultdir = get_db_dir()
'''
From nodes.dmp
tax_id -- node id in GenBank taxonomy database
parent tax_id -- parent node id in GenBank taxonomy database
rank -- rank of this node (superkingdom, kingdom, ...)
embl code -- locus-name prefix; not unique
division id -- see division.dmp file
inherited div flag (1 or 0) -- 1 if node inherits division from parent
genetic code id -- see gencode.dmp file
inherited GC flag (1 or 0) -- 1 if node inherits genetic code from parent
mitochondrial genetic code id -- see gencode.dmp file
inherited MGC flag (1 or 0) -- 1 if node inherits mitochondrial gencode from parent
GenBank hidden flag (1 or 0) -- 1 if name is suppressed in GenBank entry lineage
hidden subtree root flag (1 or 0) -- 1 if this subtree has no sequence data yet
comments -- free-text comments and citations
'''
class TaxonNode:
def __init__(self, t=None, p=None, r=None, e=None, d=None, i=None, gc=None, igc=False, mgc=None, imgc=False,
gh=False, hs=False, c=None, *others):
self.parent = p
self.taxid = t
self.rank = r
self.embl = e
self.division = d
self.inherited = i
self.geneticCode = gc
self.inheritedGC = igc
self.mitochondrialGeneticCode = mgc
self.inheritedMitochondrialGeneticCode = imgc
self.GenBankHidden = gh
self.hiddenSubtree = hs
self.comments = c
if len(others) > 0:
print("WARNING: {} :: {}".format(p, others))
'''
Taxonomy names file (names.dmp):
tax_id -- the id of node associated with this name
name_txt -- name itself
unique name -- the unique variant of this name if name not unique
name class -- (synonym, common name, ...)
'''
class TaxonName:
def __init__(self, t=None, n=None, u=None, nc=None):
self.taxid = t
self.name = n
self.unique = u
self.nameClass = nc
'''
Divisions file (division.dmp):
division id -- taxonomy database division id
division cde -- GenBank division code (three characters)
division name -- e.g. BCT, PLN, VRT, MAM, PRI...
comments
'''
class TaxonDivision:
def __init__(self, i=None, c=None, n=None, co=None):
self.divid = i
self.name = n
self.code = c
self.comments = co
def read_taxa():
"""
Read the taxonomy tree. An alias for read_nodes()
"""
return read_nodes()
def read_nodes(directory=defaultdir):
"""
Read the node information from the default location
"""
if not directory:
directory = defaultdir
taxa = {}
fin = open(directory + '/nodes.dmp', 'r')
for line in fin:
line = line.rstrip('\t|\n')
cols = line.split('\t|\t')
t = TaxonNode(*cols)
taxa[cols[0]] = t
fin.close()
return taxa
def extended_names(directory=defaultdir):
"""
Extended names returns "genbank synonym" and "synonym" as well as
"scientific name" and "blast name". Because we are reading more
names it is slower and consumes more memory
"""
if not directory:
directory = defaultdir
names = {}
blastname = {}
genbankname = {}
synonym = {}
fin = open(directory + '/names.dmp', 'r')
for line in fin:
line = line.rstrip('\t|\n')
cols = line.split('\t|\t')
t = TaxonName(*cols)
if "scientific name" in cols[3]:
names[cols[0]] = t
elif "blast name" in cols[3]:
blastname[cols[0]] = t
elif "genbank synonym" in cols[3]:
genbankname[cols[0]] = t
elif "synonym" in cols[3]:
synonym[cols[0]] = t
fin.close()
return names, blastname, genbankname, synonym
def read_names(directory=defaultdir):
"""
Read the name information from the default location
"""
if not directory:
directory = defaultdir
names = {}
blastname = {}
fin = open(directory + '/names.dmp', 'r')
for line in fin:
line = line.rstrip('\t|\n')
cols = line.split('\t|\t')
t = TaxonName(*cols)
if "scientific name" in cols[3]:
names[cols[0]] = t
if "blast name" in cols[3]:
blastname[cols[0]] = t
fin.close()
return names, blastname
def read_divisions(directory=defaultdir):
"""
Read the divisions.dmp file
"""
if not directory:
directory = defaultdir
divs = {}
fin = open(directory + '/division.dmp', 'r')
for line in fin:
line = line.rstrip('\t|\n')
cols = line.split('\t|\t')
t = TaxonDivision(*cols)
divs[cols[0]] = t
fin.close()
return divs
def read_gi_tax_id(dtype='nucl', directory=defaultdir):
"""
Read gi_taxid.dmp. You can specify the type of database that you
want to parse, default is nucl (nucleotide), can also accept prot
(protein).
Returns a hash of gi and taxid
"""
if not directory:
directory = defaultdir
if dtype != 'nucl' and dtype != 'prot':
sys.stderr.write("Type must be either nucl or prot, not " + dtype + "\n")
sys.exit(-1)
file_in = directory + "/gi_taxid_" + dtype + ".dmp.gz"
taxid = {}
with gzip.open(file_in, 'r') as fin:
for line in fin:
line = line.decode().strip()
parts = line.split("\t")
taxid[parts[0]] = parts[1]
fin.close()
return taxid
def read_tax_id_gi(dtype='nucl', directory=defaultdir):
"""
Read gi_taxid.dmp. You can specify the type of database that you
want to parse, default is nucl (nucleotide), can also accept prot
(protein).
NOTE: This method returns taxid -> gi not the other way around. This
may be a one -> many mapping (as a single taxid maps to more than
one gi), and so we return a list of gi's for each taxid.
Returns a hash of taxid and gi
"""
if not directory:
directory = defaultdir
if dtype != 'nucl' and dtype != 'prot':
sys.stderr.write("Type must be either nucl or prot, not " + dtype + "\n")
sys.exit(-1)
file_in = directory + "/gi_taxid_" + dtype + ".dmp.gz"
tax_id = {}
with gzip.open(file_in, 'r') as fin:
for line in fin:
line = line.decode().strip()
parts = line.split("\t")
if parts[1] not in tax_id:
tax_id[parts[1]] = []
tax_id[parts[1]].append(parts[0])
fin.close()
return tax_id
| |
from __future__ import absolute_import, unicode_literals
from mopidy.core import PlaybackState
from mopidy.internal import deprecation
from mopidy.mpd import exceptions, protocol
@protocol.commands.add('consume', state=protocol.BOOL)
def consume(context, state):
"""
*musicpd.org, playback section:*
``consume {STATE}``
Sets consume state to ``STATE``, ``STATE`` should be 0 or
1. When consume is activated, each song played is removed from
playlist.
"""
context.core.tracklist.set_consume(state)
@protocol.commands.add('crossfade', seconds=protocol.UINT)
def crossfade(context, seconds):
"""
*musicpd.org, playback section:*
``crossfade {SECONDS}``
Sets crossfading between songs.
"""
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('mixrampdb')
def mixrampdb(context, decibels):
"""
*musicpd.org, playback section:*
``mixrampdb {deciBels}``
Sets the threshold at which songs will be overlapped. Like crossfading but
doesn't fade the track volume, just overlaps. The songs need to have
MixRamp tags added by an external tool. 0dB is the normalized maximum
volume so use negative values, I prefer -17dB. In the absence of mixramp
tags crossfading will be used. See http://sourceforge.net/projects/mixramp
"""
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('mixrampdelay', seconds=protocol.UINT)
def mixrampdelay(context, seconds):
"""
*musicpd.org, playback section:*
``mixrampdelay {SECONDS}``
Additional time subtracted from the overlap calculated by mixrampdb. A
value of "nan" disables MixRamp overlapping and falls back to
crossfading.
"""
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('next')
def next_(context):
"""
*musicpd.org, playback section:*
``next``
Plays next song in the playlist.
*MPD's behaviour when affected by repeat/random/single/consume:*
Given a playlist of three tracks numbered 1, 2, 3, and a currently
playing track ``c``. ``next_track`` is defined at the track that
will be played upon calls to ``next``.
Tests performed on MPD 0.15.4-1ubuntu3.
====== ====== ====== ======= ===== ===== ===== =====
Inputs next_track
------------------------------- ------------------- -----
repeat random single consume c = 1 c = 2 c = 3 Notes
====== ====== ====== ======= ===== ===== ===== =====
T T T T 2 3 EOPL
T T T . Rand Rand Rand [1]
T T . T Rand Rand Rand [4]
T T . . Rand Rand Rand [4]
T . T T 2 3 EOPL
T . T . 2 3 1
T . . T 3 3 EOPL
T . . . 2 3 1
. T T T Rand Rand Rand [3]
. T T . Rand Rand Rand [3]
. T . T Rand Rand Rand [2]
. T . . Rand Rand Rand [2]
. . T T 2 3 EOPL
. . T . 2 3 EOPL
. . . T 2 3 EOPL
. . . . 2 3 EOPL
====== ====== ====== ======= ===== ===== ===== =====
- When end of playlist (EOPL) is reached, the current track is
unset.
- [1] When *random* and *single* is combined, ``next`` selects
a track randomly at each invocation, and not just the next track
in an internal prerandomized playlist.
- [2] When *random* is active, ``next`` will skip through
all tracks in the playlist in random order, and finally EOPL is
reached.
- [3] *single* has no effect in combination with *random*
alone, or *random* and *consume*.
- [4] When *random* and *repeat* is active, EOPL is never
reached, but the playlist is played again, in the same random
order as the first time.
"""
return context.core.playback.next().get()
@protocol.commands.add('pause', state=protocol.BOOL)
def pause(context, state=None):
"""
*musicpd.org, playback section:*
``pause {PAUSE}``
Toggles pause/resumes playing, ``PAUSE`` is 0 or 1.
*MPDroid:*
- Calls ``pause`` without any arguments to toogle pause.
"""
if state is None:
deprecation.warn('mpd.protocol.playback.pause:state_arg')
playback_state = context.core.playback.get_state().get()
if (playback_state == PlaybackState.PLAYING):
context.core.playback.pause().get()
elif (playback_state == PlaybackState.PAUSED):
context.core.playback.resume().get()
elif state:
context.core.playback.pause().get()
else:
context.core.playback.resume().get()
@protocol.commands.add('play', songpos=protocol.INT)
def play(context, songpos=None):
"""
*musicpd.org, playback section:*
``play [SONGPOS]``
Begins playing the playlist at song number ``SONGPOS``.
The original MPD server resumes from the paused state on ``play``
without arguments.
*Clarifications:*
- ``play "-1"`` when playing is ignored.
- ``play "-1"`` when paused resumes playback.
- ``play "-1"`` when stopped with a current track starts playback at the
current track.
- ``play "-1"`` when stopped without a current track, e.g. after playlist
replacement, starts playback at the first track.
*BitMPC:*
- issues ``play 6`` without quotes around the argument.
"""
if songpos is None:
return context.core.playback.play().get()
elif songpos == -1:
return _play_minus_one(context)
try:
tl_track = context.core.tracklist.slice(songpos, songpos + 1).get()[0]
return context.core.playback.play(tl_track).get()
except IndexError:
raise exceptions.MpdArgError('Bad song index')
def _play_minus_one(context):
playback_state = context.core.playback.get_state().get()
if playback_state == PlaybackState.PLAYING:
return # Nothing to do
elif playback_state == PlaybackState.PAUSED:
return context.core.playback.resume().get()
current_tl_track = context.core.playback.get_current_tl_track().get()
if current_tl_track is not None:
return context.core.playback.play(current_tl_track).get()
tl_tracks = context.core.tracklist.slice(0, 1).get()
if tl_tracks:
return context.core.playback.play(tl_tracks[0]).get()
return # Fail silently
@protocol.commands.add('playid', tlid=protocol.INT)
def playid(context, tlid):
"""
*musicpd.org, playback section:*
``playid [SONGID]``
Begins playing the playlist at song ``SONGID``.
*Clarifications:*
- ``playid "-1"`` when playing is ignored.
- ``playid "-1"`` when paused resumes playback.
- ``playid "-1"`` when stopped with a current track starts playback at the
current track.
- ``playid "-1"`` when stopped without a current track, e.g. after playlist
replacement, starts playback at the first track.
"""
if tlid == -1:
return _play_minus_one(context)
tl_tracks = context.core.tracklist.filter({'tlid': [tlid]}).get()
if not tl_tracks:
raise exceptions.MpdNoExistError('No such song')
return context.core.playback.play(tl_tracks[0]).get()
@protocol.commands.add('previous')
def previous(context):
"""
*musicpd.org, playback section:*
``previous``
Plays previous song in the playlist.
*MPD's behaviour when affected by repeat/random/single/consume:*
Given a playlist of three tracks numbered 1, 2, 3, and a currently
playing track ``c``. ``previous_track`` is defined at the track
that will be played upon ``previous`` calls.
Tests performed on MPD 0.15.4-1ubuntu3.
====== ====== ====== ======= ===== ===== =====
Inputs previous_track
------------------------------- -------------------
repeat random single consume c = 1 c = 2 c = 3
====== ====== ====== ======= ===== ===== =====
T T T T Rand? Rand? Rand?
T T T . 3 1 2
T T . T Rand? Rand? Rand?
T T . . 3 1 2
T . T T 3 1 2
T . T . 3 1 2
T . . T 3 1 2
T . . . 3 1 2
. T T T c c c
. T T . c c c
. T . T c c c
. T . . c c c
. . T T 1 1 2
. . T . 1 1 2
. . . T 1 1 2
. . . . 1 1 2
====== ====== ====== ======= ===== ===== =====
- If :attr:`time_position` of the current track is 15s or more,
``previous`` should do a seek to time position 0.
"""
return context.core.playback.previous().get()
@protocol.commands.add('random', state=protocol.BOOL)
def random(context, state):
"""
*musicpd.org, playback section:*
``random {STATE}``
Sets random state to ``STATE``, ``STATE`` should be 0 or 1.
"""
context.core.tracklist.set_random(state)
@protocol.commands.add('repeat', state=protocol.BOOL)
def repeat(context, state):
"""
*musicpd.org, playback section:*
``repeat {STATE}``
Sets repeat state to ``STATE``, ``STATE`` should be 0 or 1.
"""
context.core.tracklist.set_repeat(state)
@protocol.commands.add('replay_gain_mode')
def replay_gain_mode(context, mode):
"""
*musicpd.org, playback section:*
``replay_gain_mode {MODE}``
Sets the replay gain mode. One of ``off``, ``track``, ``album``.
Changing the mode during playback may take several seconds, because
the new settings does not affect the buffered data.
This command triggers the options idle event.
"""
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('replay_gain_status')
def replay_gain_status(context):
"""
*musicpd.org, playback section:*
``replay_gain_status``
Prints replay gain options. Currently, only the variable
``replay_gain_mode`` is returned.
"""
return 'replay_gain_mode: off' # TODO
@protocol.commands.add('seek', songpos=protocol.UINT, seconds=protocol.UINT)
def seek(context, songpos, seconds):
"""
*musicpd.org, playback section:*
``seek {SONGPOS} {TIME}``
Seeks to the position ``TIME`` (in seconds) of entry ``SONGPOS`` in
the playlist.
*Droid MPD:*
- issues ``seek 1 120`` without quotes around the arguments.
"""
tl_track = context.core.playback.get_current_tl_track().get()
if context.core.tracklist.index(tl_track).get() != songpos:
play(context, songpos)
context.core.playback.seek(seconds * 1000).get()
@protocol.commands.add('seekid', tlid=protocol.UINT, seconds=protocol.UINT)
def seekid(context, tlid, seconds):
"""
*musicpd.org, playback section:*
``seekid {SONGID} {TIME}``
Seeks to the position ``TIME`` (in seconds) of song ``SONGID``.
"""
tl_track = context.core.playback.get_current_tl_track().get()
if not tl_track or tl_track.tlid != tlid:
playid(context, tlid)
context.core.playback.seek(seconds * 1000).get()
@protocol.commands.add('seekcur')
def seekcur(context, time):
"""
*musicpd.org, playback section:*
``seekcur {TIME}``
Seeks to the position ``TIME`` within the current song. If prefixed by
'+' or '-', then the time is relative to the current playing position.
"""
if time.startswith(('+', '-')):
position = context.core.playback.get_time_position().get()
position += protocol.INT(time) * 1000
context.core.playback.seek(position).get()
else:
position = protocol.UINT(time) * 1000
context.core.playback.seek(position).get()
@protocol.commands.add('setvol', volume=protocol.INT)
def setvol(context, volume):
"""
*musicpd.org, playback section:*
``setvol {VOL}``
Sets volume to ``VOL``, the range of volume is 0-100.
*Droid MPD:*
- issues ``setvol 50`` without quotes around the argument.
"""
# NOTE: we use INT as clients can pass in +N etc.
value = min(max(0, volume), 100)
success = context.core.mixer.set_volume(value).get()
if not success:
raise exceptions.MpdSystemError('problems setting volume')
@protocol.commands.add('single', state=protocol.BOOL)
def single(context, state):
"""
*musicpd.org, playback section:*
``single {STATE}``
Sets single state to ``STATE``, ``STATE`` should be 0 or 1. When
single is activated, playback is stopped after current song, or
song is repeated if the ``repeat`` mode is enabled.
"""
context.core.tracklist.set_single(state)
@protocol.commands.add('stop')
def stop(context):
"""
*musicpd.org, playback section:*
``stop``
Stops playing.
"""
context.core.playback.stop()
@protocol.commands.add('volume', change=protocol.INT)
def volume(context, change):
"""
*musicpd.org, playback section:*
``volume {CHANGE}``
Changes volume by amount ``CHANGE``.
Note: ``volume`` is deprecated, use ``setvol`` instead.
"""
if change < -100 or change > 100:
raise exceptions.MpdArgError('Invalid volume value')
old_volume = context.core.mixer.get_volume().get()
if old_volume is None:
raise exceptions.MpdSystemError('problems setting volume')
new_volume = min(max(0, old_volume + change), 100)
success = context.core.mixer.set_volume(new_volume).get()
if not success:
raise exceptions.MpdSystemError('problems setting volume')
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake VMware VI API implementation.
"""
import collections
import pprint
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova.virt.vmwareapi import error_util
_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
'Network', 'HostSystem', 'HostNetworkSystem', 'Task', 'session',
'files', 'ClusterComputeResource', 'HostStorageSystem']
_FAKE_FILE_SIZE = 1024
_db_content = {}
LOG = logging.getLogger(__name__)
def log_db_contents(msg=None):
"""Log DB Contents."""
LOG.debug(_("%(text)s: _db_content => %(content)s"),
{'text': msg or "", 'content': pprint.pformat(_db_content)})
def reset(vc=False):
"""Resets the db contents."""
cleanup()
create_network()
create_host_network_system()
create_host_storage_system()
create_host()
ds_ref1 = create_datastore('ds1', 1024, 500)
if vc:
create_host()
ds_ref2 = create_datastore('ds2', 1024, 500)
create_datacenter('dc1', ds_ref1)
if vc:
create_datacenter('dc2', ds_ref2)
create_res_pool()
if vc:
create_cluster('test_cluster', ds_ref1)
create_cluster('test_cluster2', ds_ref2)
def cleanup():
"""Clear the db contents."""
for c in _CLASSES:
# We fake the datastore by keeping the file references as a list of
# names in the db
if c == 'files':
_db_content[c] = []
else:
_db_content[c] = {}
def _create_object(table, table_obj):
"""Create an object in the db."""
_db_content[table][table_obj.obj] = table_obj
def _get_object(obj_ref):
"""Get object for the give reference."""
return _db_content[obj_ref.type][obj_ref]
def _get_objects(obj_type):
"""Get objects of the type."""
lst_objs = FakeRetrieveResult()
for key in _db_content[obj_type]:
lst_objs.add_object(_db_content[obj_type][key])
return lst_objs
def _convert_to_array_of_mor(mors):
"""Wraps the given array into a DataObject."""
array_of_mors = DataObject()
array_of_mors.ManagedObjectReference = mors
return array_of_mors
class FakeRetrieveResult(object):
"""Object to retrieve a ObjectContent list."""
def __init__(self):
self.objects = []
def add_object(self, object):
self.objects.append(object)
class MissingProperty(object):
"""Missing object in ObjectContent's missing set."""
def __init__(self, path='fake-path', message='fake_message',
method_fault=None):
self.path = path
self.fault = DataObject()
self.fault.localizedMessage = message
self.fault.fault = method_fault
def _get_object_refs(obj_type):
"""Get object References of the type."""
lst_objs = []
for key in _db_content[obj_type]:
lst_objs.append(key)
return lst_objs
def _update_object(table, table_obj):
"""Update objects of the type."""
_db_content[table][table_obj.obj] = table_obj
class Prop(object):
"""Property Object base class."""
def __init__(self, name=None, val=None):
self.name = name
self.val = val
class ManagedObjectReference(object):
"""A managed object reference is a remote identifier."""
def __init__(self, name="ManagedObject", value=None):
super(ManagedObjectReference, self)
# Managed Object Reference value attributes
# typically have values like vm-123 or
# host-232 and not UUID.
self.value = value
# Managed Object Reference type
# attributes hold the name of the type
# of the vCenter object the value
# attribute is the identifier for
self.type = name
self._type = name
class ObjectContent(object):
"""ObjectContent array holds dynamic properties."""
# This class is a *fake* of a class sent back to us by
# SOAP. It has its own names. These names are decided
# for us by the API we are *faking* here.
def __init__(self, obj_ref, prop_list=None, missing_list=None):
self.obj = obj_ref
if not isinstance(prop_list, collections.Iterable):
prop_list = []
if not isinstance(missing_list, collections.Iterable):
missing_list = []
# propSet is the name your Python code will need to
# use since this is the name that the API will use
self.propSet = prop_list
# missingSet is the name your python code will
# need to use since this is the name that the
# API we are talking to will use.
self.missingSet = missing_list
class ManagedObject(object):
"""Managed Object base class."""
_counter = 0
def __init__(self, mo_id_prefix="obj"):
"""Sets the obj property which acts as a reference to the object."""
object.__setattr__(self, 'mo_id', self._generate_moid(mo_id_prefix))
object.__setattr__(self, 'propSet', [])
object.__setattr__(self, 'obj',
ManagedObjectReference(self.__class__.__name__,
self.mo_id))
def set(self, attr, val):
"""
Sets an attribute value. Not using the __setattr__ directly for we
want to set attributes of the type 'a.b.c' and using this function
class we set the same.
"""
self.__setattr__(attr, val)
def get(self, attr):
"""
Gets an attribute. Used as an intermediary to get nested
property like 'a.b.c' value.
"""
return self.__getattr__(attr)
def __setattr__(self, attr, val):
# TODO(hartsocks): this is adds unnecessary complexity to the class
for prop in self.propSet:
if prop.name == attr:
prop.val = val
return
elem = Prop()
elem.name = attr
elem.val = val
self.propSet.append(elem)
def __getattr__(self, attr):
# TODO(hartsocks): remove this
# in a real ManagedObject you have to iterate the propSet
# in a real ManagedObject, the propSet is a *set* not a list
for elem in self.propSet:
if elem.name == attr:
return elem.val
msg = _("Property %(attr)s not set for the managed object %(name)s")
raise exception.NovaException(msg % {'attr': attr,
'name': self.__class__.__name__})
def _generate_moid(self, prefix):
"""Generates a new Managed Object ID."""
self.__class__._counter += 1
return prefix + "-" + str(self.__class__._counter)
def __repr__(self):
return jsonutils.dumps(dict([(elem.name, elem.val)
for elem in self.propSet]))
class DataObject(object):
"""Data object base class."""
def __init__(self, obj_name=None):
self.obj_name = obj_name
def __repr__(self):
return str(self.__dict__)
class HostInternetScsiHba(DataObject):
"""
iSCSI Host Bus Adapter
"""
def __init__(self):
super(HostInternetScsiHba, self).__init__()
self.device = 'vmhba33'
self.key = 'key-vmhba33'
class VirtualDisk(DataObject):
"""
Virtual Disk class.
"""
def __init__(self):
super(VirtualDisk, self).__init__()
self.key = 0
self.unitNumber = 0
class VirtualDiskFlatVer2BackingInfo(DataObject):
"""VirtualDiskFlatVer2BackingInfo class."""
def __init__(self):
super(VirtualDiskFlatVer2BackingInfo, self).__init__()
self.thinProvisioned = False
self.eagerlyScrub = False
class VirtualDiskRawDiskMappingVer1BackingInfo(DataObject):
"""VirtualDiskRawDiskMappingVer1BackingInfo class."""
def __init__(self):
super(VirtualDiskRawDiskMappingVer1BackingInfo, self).__init__()
self.lunUuid = ""
class VirtualLsiLogicController(DataObject):
"""VirtualLsiLogicController class."""
pass
class VirtualLsiLogicSASController(DataObject):
"""VirtualLsiLogicSASController class."""
pass
class VirtualPCNet32(DataObject):
"""VirtualPCNet32 class."""
def __init__(self):
super(VirtualPCNet32, self).__init__()
self.key = 4000
class VirtualMachine(ManagedObject):
"""Virtual Machine class."""
def __init__(self, **kwargs):
super(VirtualMachine, self).__init__("vm")
self.set("name", kwargs.get("name", 'test-vm'))
self.set("runtime.connectionState",
kwargs.get("conn_state", "connected"))
self.set("summary.config.guestId", kwargs.get("guest", "otherGuest"))
ds_do = kwargs.get("ds", None)
self.set("datastore", _convert_to_array_of_mor(ds_do))
self.set("summary.guest.toolsStatus", kwargs.get("toolsstatus",
"toolsOk"))
self.set("summary.guest.toolsRunningStatus", kwargs.get(
"toolsrunningstate", "guestToolsRunning"))
self.set("runtime.powerState", kwargs.get("powerstate", "poweredOn"))
self.set("config.files.vmPathName", kwargs.get("vmPathName"))
self.set("summary.config.numCpu", kwargs.get("numCpu", 1))
self.set("summary.config.memorySizeMB", kwargs.get("mem", 1))
self.set("config.hardware.device", kwargs.get("virtual_device", None))
self.set("config.extraConfig", kwargs.get("extra_config", None))
self.set('runtime.host', kwargs.get("runtime_host", None))
self.device = kwargs.get("virtual_device")
def reconfig(self, factory, val):
"""
Called to reconfigure the VM. Actually customizes the property
setting of the Virtual Machine object.
"""
try:
if not hasattr(val, 'deviceChange'):
return
if len(val.deviceChange) < 2:
return
# Case of Reconfig of VM to attach disk
controller_key = val.deviceChange[1].device.controllerKey
filename = val.deviceChange[1].device.backing.fileName
disk = VirtualDisk()
disk.controllerKey = controller_key
disk_backing = VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
disk_backing.key = -101
disk.backing = disk_backing
controller = VirtualLsiLogicController()
controller.key = controller_key
self.set("config.hardware.device", [disk, controller,
self.device[0]])
except AttributeError:
# Case of Reconfig of VM to set extra params
self.set("config.extraConfig", val.extraConfig)
class Network(ManagedObject):
"""Network class."""
def __init__(self):
super(Network, self).__init__("network")
self.set("summary.name", "vmnet0")
class ResourcePool(ManagedObject):
"""Resource Pool class."""
def __init__(self, name="test_ResPool", value="resgroup-test"):
super(ResourcePool, self).__init__("rp")
self.set("name", name)
summary = DataObject()
runtime = DataObject()
config = DataObject()
memory = DataObject()
cpu = DataObject()
memoryAllocation = DataObject()
cpuAllocation = DataObject()
memory.maxUsage = 1000 * 1024 * 1024
memory.overallUsage = 500 * 1024 * 1024
cpu.maxUsage = 10000
cpu.overallUsage = 1000
runtime.cpu = cpu
runtime.memory = memory
summary.runtime = runtime
cpuAllocation.limit = 10000
memoryAllocation.limit = 1024
memoryAllocation.reservation = 1024
config.memoryAllocation = memoryAllocation
config.cpuAllocation = cpuAllocation
self.set("summary", summary)
self.set("summary.runtime.memory", memory)
self.set("config", config)
parent = ManagedObjectReference(value=value,
name=name)
owner = ManagedObjectReference(value=value,
name=name)
self.set("parent", parent)
self.set("owner", owner)
class DatastoreHostMount(DataObject):
def __init__(self, value='host-100'):
super(DatastoreHostMount, self).__init__()
host_ref = (_db_content["HostSystem"]
[_db_content["HostSystem"].keys()[0]].obj)
host_system = DataObject()
host_system.ManagedObjectReference = [host_ref]
host_system.value = value
self.key = host_system
class ClusterComputeResource(ManagedObject):
"""Cluster class."""
def __init__(self, name="test_cluster"):
super(ClusterComputeResource, self).__init__("domain")
self.set("name", name)
self.set("host", None)
self.set("datastore", None)
self.set("resourcePool", None)
summary = DataObject()
summary.numHosts = 0
summary.numCpuCores = 0
summary.numCpuThreads = 0
summary.numEffectiveHosts = 0
summary.totalMemory = 0
summary.effectiveMemory = 0
summary.effectiveCpu = 10000
self.set("summary", summary)
def _add_root_resource_pool(self, r_pool):
if r_pool:
self.set("resourcePool", r_pool)
def _add_host(self, host_sys):
if host_sys:
hosts = self.get("host")
if hosts is None:
hosts = DataObject()
hosts.ManagedObjectReference = []
self.set("host", hosts)
hosts.ManagedObjectReference.append(host_sys)
# Update summary every time a new host is added
self._update_summary()
def _add_datastore(self, datastore):
if datastore:
datastores = self.get("datastore")
if datastores is None:
datastores = DataObject()
datastores.ManagedObjectReference = []
self.set("datastore", datastores)
datastores.ManagedObjectReference.append(datastore)
# Method to update summary of a cluster upon host addition
def _update_summary(self):
summary = self.get("summary")
summary.numHosts = 0
summary.numCpuCores = 0
summary.numCpuThreads = 0
summary.numEffectiveHosts = 0
summary.totalMemory = 0
summary.effectiveMemory = 0
hosts = self.get("host")
# Compute the aggregate stats
summary.numHosts = len(hosts.ManagedObjectReference)
for host_ref in hosts.ManagedObjectReference:
host_sys = _get_object(host_ref)
connected = host_sys.get("connected")
host_summary = host_sys.get("summary")
summary.numCpuCores += host_summary.hardware.numCpuCores
summary.numCpuThreads += host_summary.hardware.numCpuThreads
summary.totalMemory += host_summary.hardware.memorySize
free_memory = (host_summary.hardware.memorySize / (1024 * 1024)
- host_summary.quickStats.overallMemoryUsage)
summary.effectiveMemory += free_memory if connected else 0
summary.numEffectiveHosts += 1 if connected else 0
self.set("summary", summary)
class Datastore(ManagedObject):
"""Datastore class."""
def __init__(self, name="fake-ds", capacity=1024, free=500):
super(Datastore, self).__init__("ds")
self.set("summary.type", "VMFS")
self.set("summary.name", name)
self.set("summary.capacity", 1024 * 1024 * 1024 * 1024)
self.set("summary.freeSpace", 500 * 1024 * 1024 * 1024)
self.set("summary.accessible", True)
class HostNetworkSystem(ManagedObject):
"""HostNetworkSystem class."""
def __init__(self, name="networkSystem"):
super(HostNetworkSystem, self).__init__("ns")
self.set("name", name)
pnic_do = DataObject()
pnic_do.device = "vmnic0"
net_info_pnic = DataObject()
net_info_pnic.PhysicalNic = [pnic_do]
self.set("networkInfo.pnic", net_info_pnic)
class HostStorageSystem(ManagedObject):
"""HostStorageSystem class."""
def __init__(self):
super(HostStorageSystem, self).__init__("storageSystem")
class HostSystem(ManagedObject):
"""Host System class."""
def __init__(self, name="ha-host", connected=True):
super(HostSystem, self).__init__("host")
self.set("name", name)
if _db_content.get("HostNetworkSystem", None) is None:
create_host_network_system()
if not _get_object_refs('HostStorageSystem'):
create_host_storage_system()
host_net_key = _db_content["HostNetworkSystem"].keys()[0]
host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj
self.set("configManager.networkSystem", host_net_sys)
host_storage_sys_key = _get_object_refs('HostStorageSystem')[0]
self.set("configManager.storageSystem", host_storage_sys_key)
summary = DataObject()
hardware = DataObject()
hardware.numCpuCores = 8
hardware.numCpuPkgs = 2
hardware.numCpuThreads = 16
hardware.vendor = "Intel"
hardware.cpuModel = "Intel(R) Xeon(R)"
hardware.uuid = "host-uuid"
hardware.memorySize = 1024 * 1024 * 1024
summary.hardware = hardware
quickstats = DataObject()
quickstats.overallMemoryUsage = 500
summary.quickStats = quickstats
product = DataObject()
product.name = "VMware ESXi"
product.version = "5.0.0"
config = DataObject()
config.product = product
summary.config = config
pnic_do = DataObject()
pnic_do.device = "vmnic0"
net_info_pnic = DataObject()
net_info_pnic.PhysicalNic = [pnic_do]
self.set("summary", summary)
self.set("capability.maxHostSupportedVcpus", 600)
self.set("summary.runtime.inMaintenanceMode", False)
self.set("runtime.connectionState", "connected")
self.set("summary.hardware", hardware)
self.set("config.network.pnic", net_info_pnic)
self.set("connected", connected)
if _db_content.get("Network", None) is None:
create_network()
net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
vswitch_do = DataObject()
vswitch_do.pnic = ["vmnic0"]
vswitch_do.name = "vSwitch0"
vswitch_do.portgroup = ["PortGroup-vmnet0"]
net_swicth = DataObject()
net_swicth.HostVirtualSwitch = [vswitch_do]
self.set("config.network.vswitch", net_swicth)
host_pg_do = DataObject()
host_pg_do.key = "PortGroup-vmnet0"
pg_spec = DataObject()
pg_spec.vlanId = 0
pg_spec.name = "vmnet0"
host_pg_do.spec = pg_spec
host_pg = DataObject()
host_pg.HostPortGroup = [host_pg_do]
self.set("config.network.portgroup", host_pg)
config = DataObject()
storageDevice = DataObject()
iscsi_hba = HostInternetScsiHba()
iscsi_hba.iScsiName = "iscsi-name"
host_bus_adapter_array = DataObject()
host_bus_adapter_array.HostHostBusAdapter = [iscsi_hba]
storageDevice.hostBusAdapter = host_bus_adapter_array
config.storageDevice = storageDevice
self.set("config.storageDevice.hostBusAdapter", host_bus_adapter_array)
# Set the same on the storage system managed object
host_storage_sys = _get_object(host_storage_sys_key)
host_storage_sys.set('storageDeviceInfo.hostBusAdapter',
host_bus_adapter_array)
def _add_iscsi_target(self, data):
default_lun = DataObject()
default_lun.scsiLun = 'key-vim.host.ScsiDisk-010'
default_lun.key = 'key-vim.host.ScsiDisk-010'
default_lun.deviceName = 'fake-device'
default_lun.uuid = 'fake-uuid'
scsi_lun_array = DataObject()
scsi_lun_array.ScsiLun = [default_lun]
self.set("config.storageDevice.scsiLun", scsi_lun_array)
transport = DataObject()
transport.address = [data['target_portal']]
transport.iScsiName = data['target_iqn']
default_target = DataObject()
default_target.lun = [default_lun]
default_target.transport = transport
iscsi_adapter = DataObject()
iscsi_adapter.adapter = 'key-vmhba33'
iscsi_adapter.transport = transport
iscsi_adapter.target = [default_target]
iscsi_topology = DataObject()
iscsi_topology.adapter = [iscsi_adapter]
self.set("config.storageDevice.scsiTopology", iscsi_topology)
def _add_port_group(self, spec):
"""Adds a port group to the host system object in the db."""
pg_name = spec.name
vswitch_name = spec.vswitchName
vlanid = spec.vlanId
vswitch_do = DataObject()
vswitch_do.pnic = ["vmnic0"]
vswitch_do.name = vswitch_name
vswitch_do.portgroup = ["PortGroup-%s" % pg_name]
vswitches = self.get("config.network.vswitch").HostVirtualSwitch
vswitches.append(vswitch_do)
host_pg_do = DataObject()
host_pg_do.key = "PortGroup-%s" % pg_name
pg_spec = DataObject()
pg_spec.vlanId = vlanid
pg_spec.name = pg_name
host_pg_do.spec = pg_spec
host_pgrps = self.get("config.network.portgroup").HostPortGroup
host_pgrps.append(host_pg_do)
class Datacenter(ManagedObject):
"""Datacenter class."""
def __init__(self, name="ha-datacenter", ds_ref=None):
super(Datacenter, self).__init__("dc")
self.set("name", name)
self.set("vmFolder", "vm_folder_ref")
if _db_content.get("Network", None) is None:
create_network()
net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
datastore = DataObject()
datastore.ManagedObjectReference = [ds_ref]
self.set("datastore", datastore)
class Task(ManagedObject):
"""Task class."""
def __init__(self, task_name, state="running", result=None):
super(Task, self).__init__("Task")
info = DataObject()
info.name = task_name
info.state = state
info.result = result
self.set("info", info)
def create_host_network_system():
host_net_system = HostNetworkSystem()
_create_object("HostNetworkSystem", host_net_system)
def create_host_storage_system():
host_storage_system = HostStorageSystem()
_create_object("HostStorageSystem", host_storage_system)
def create_host():
host_system = HostSystem()
_create_object('HostSystem', host_system)
def create_datacenter(name, ds_ref=None):
data_center = Datacenter(name, ds_ref)
_create_object('Datacenter', data_center)
def create_datastore(name, capacity, free):
data_store = Datastore(name, capacity, free)
_create_object('Datastore', data_store)
return data_store.obj
def create_res_pool():
res_pool = ResourcePool()
_create_object('ResourcePool', res_pool)
def create_network():
network = Network()
_create_object('Network', network)
def create_cluster(name, ds_ref):
cluster = ClusterComputeResource(name=name)
cluster._add_host(_get_object_refs("HostSystem")[0])
cluster._add_host(_get_object_refs("HostSystem")[1])
cluster._add_datastore(ds_ref)
cluster._add_root_resource_pool(_get_object_refs("ResourcePool")[0])
_create_object('ClusterComputeResource', cluster)
def create_task(task_name, state="running", result=None):
task = Task(task_name, state, result)
_create_object("Task", task)
return task
def _add_file(file_path):
"""Adds a file reference to the db."""
_db_content["files"].append(file_path)
def _remove_file(file_path):
"""Removes a file reference from the db."""
if _db_content.get("files") is None:
raise exception.NoFilesFound()
# Check if the remove is for a single file object or for a folder
if file_path.find(".vmdk") != -1:
if file_path not in _db_content.get("files"):
raise exception.FileNotFound(file_path=file_path)
_db_content.get("files").remove(file_path)
else:
# Removes the files in the folder and the folder too from the db
for file in _db_content.get("files"):
if file.find(file_path) != -1:
lst_files = _db_content.get("files")
if lst_files and lst_files.count(file):
lst_files.remove(file)
def fake_plug_vifs(*args, **kwargs):
"""Fakes plugging vifs."""
pass
def fake_get_network(*args, **kwargs):
"""Fake get network."""
return {'type': 'fake'}
def get_file(file_path):
"""Check if file exists in the db."""
if _db_content.get("files") is None:
raise exception.NoFilesFound()
return file_path in _db_content.get("files")
def fake_fetch_image(context, image, instance, **kwargs):
"""Fakes fetch image call. Just adds a reference to the db for the file."""
ds_name = kwargs.get("datastore_name")
file_path = kwargs.get("file_path")
ds_file_path = "[" + ds_name + "] " + file_path
_add_file(ds_file_path)
def fake_upload_image(context, image, instance, **kwargs):
"""Fakes the upload of an image."""
pass
def fake_get_vmdk_size_and_properties(context, image_id, instance):
"""Fakes the file size and properties fetch for the image file."""
props = {"vmware_ostype": "otherGuest",
"vmware_adaptertype": "lsiLogic"}
return _FAKE_FILE_SIZE, props
def _get_vm_mdo(vm_ref):
"""Gets the Virtual Machine with the ref from the db."""
if _db_content.get("VirtualMachine", None) is None:
raise exception.NotFound(_("There is no VM registered"))
if vm_ref not in _db_content.get("VirtualMachine"):
raise exception.NotFound(_("Virtual Machine with ref %s is not "
"there") % vm_ref)
return _db_content.get("VirtualMachine")[vm_ref]
class FakeFactory(object):
"""Fake factory class for the suds client."""
def create(self, obj_name):
"""Creates a namespace object."""
return DataObject(obj_name)
class FakeVim(object):
"""Fake VIM Class."""
def __init__(self, protocol="https", host="localhost", trace=None):
"""
Initializes the suds client object, sets the service content
contents and the cookies for the session.
"""
self._session = None
self.client = DataObject()
self.client.factory = FakeFactory()
transport = DataObject()
transport.cookiejar = "Fake-CookieJar"
options = DataObject()
options.transport = transport
self.client.options = options
service_content = self.client.factory.create('ns0:ServiceContent')
service_content.propertyCollector = "PropCollector"
service_content.virtualDiskManager = "VirtualDiskManager"
service_content.fileManager = "FileManager"
service_content.rootFolder = "RootFolder"
service_content.sessionManager = "SessionManager"
about_info = DataObject()
about_info.name = "VMware vCenter Server"
about_info.version = "5.1.0"
service_content.about = about_info
self._service_content = service_content
def get_service_content(self):
return self._service_content
def __repr__(self):
return "Fake VIM Object"
def __str__(self):
return "Fake VIM Object"
def _login(self):
"""Logs in and sets the session object in the db."""
self._session = uuidutils.generate_uuid()
session = DataObject()
session.key = self._session
session.userName = 'sessionUserName'
_db_content['session'][self._session] = session
return session
def _logout(self):
"""Logs out and remove the session object ref from the db."""
s = self._session
self._session = None
if s not in _db_content['session']:
raise exception.NovaException(
_("Logging out a session that is invalid or already logged "
"out: %s") % s)
del _db_content['session'][s]
def _terminate_session(self, *args, **kwargs):
"""Terminates a session."""
s = kwargs.get("sessionId")[0]
if s not in _db_content['session']:
return
del _db_content['session'][s]
def _check_session(self):
"""Checks if the session is active."""
if (self._session is None or self._session not in
_db_content['session']):
LOG.debug(_("Session is faulty"))
raise error_util.VimFaultException(
[error_util.FAULT_NOT_AUTHENTICATED],
_("Session Invalid"))
def _session_is_active(self, *args, **kwargs):
try:
self._check_session()
return True
except Exception:
return False
def _create_vm(self, method, *args, **kwargs):
"""Creates and registers a VM object with the Host System."""
config_spec = kwargs.get("config")
ds = _db_content["Datastore"].keys()[0]
host = _db_content["HostSystem"].keys()[0]
vm_dict = {"name": config_spec.name,
"ds": [ds],
"runtime_host": host,
"powerstate": "poweredOff",
"vmPathName": config_spec.files.vmPathName,
"numCpu": config_spec.numCPUs,
"mem": config_spec.memoryMB,
"extra_config": config_spec.extraConfig,
"virtual_device": config_spec.deviceChange}
virtual_machine = VirtualMachine(**vm_dict)
_create_object("VirtualMachine", virtual_machine)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _reconfig_vm(self, method, *args, **kwargs):
"""Reconfigures a VM and sets the properties supplied."""
vm_ref = args[0]
vm_mdo = _get_vm_mdo(vm_ref)
vm_mdo.reconfig(self.client.factory, kwargs.get("spec"))
task_mdo = create_task(method, "success")
return task_mdo.obj
def _create_copy_disk(self, method, vmdk_file_path):
"""Creates/copies a vmdk file object in the datastore."""
# We need to add/create both .vmdk and .-flat.vmdk files
flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk")
_add_file(vmdk_file_path)
_add_file(flat_vmdk_file_path)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _extend_disk(self, method, size):
"""Extend disk size when create a instance."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _snapshot_vm(self, method):
"""Snapshots a VM. Here we do nothing for faking sake."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _delete_disk(self, method, *args, **kwargs):
"""Deletes .vmdk and -flat.vmdk files corresponding to the VM."""
vmdk_file_path = kwargs.get("name")
flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk")
_remove_file(vmdk_file_path)
_remove_file(flat_vmdk_file_path)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _delete_file(self, method, *args, **kwargs):
"""Deletes a file from the datastore."""
_remove_file(kwargs.get("name"))
task_mdo = create_task(method, "success")
return task_mdo.obj
def _just_return(self):
"""Fakes a return."""
return
def _just_return_task(self, method):
"""Fakes a task return."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _clone_vm(self, method, *args, **kwargs):
"""Fakes a VM clone."""
return self._just_return_task(method)
def _unregister_vm(self, method, *args, **kwargs):
"""Unregisters a VM from the Host System."""
vm_ref = args[0]
_get_vm_mdo(vm_ref)
del _db_content["VirtualMachine"][vm_ref]
def _search_ds(self, method, *args, **kwargs):
"""Searches the datastore for a file."""
ds_path = kwargs.get("datastorePath")
if _db_content.get("files", None) is None:
raise exception.NoFilesFound()
for file in _db_content.get("files"):
if file.find(ds_path) != -1:
result = DataObject()
result.path = ds_path
task_mdo = create_task(method, state="success",
result=result)
return task_mdo.obj
task_mdo = create_task(method, "error")
return task_mdo.obj
def _make_dir(self, method, *args, **kwargs):
"""Creates a directory in the datastore."""
ds_path = kwargs.get("name")
if _db_content.get("files", None) is None:
raise exception.NoFilesFound()
_db_content["files"].append(ds_path)
def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
"""Sets power state for the VM."""
if _db_content.get("VirtualMachine", None) is None:
raise exception.NotFound(_("No Virtual Machine has been "
"registered yet"))
if vm_ref not in _db_content.get("VirtualMachine"):
raise exception.NotFound(_("Virtual Machine with ref %s is not "
"there") % vm_ref)
vm_mdo = _db_content.get("VirtualMachine").get(vm_ref)
vm_mdo.set("runtime.powerState", pwr_state)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _retrieve_properties_continue(self, method, *args, **kwargs):
"""Continues the retrieve."""
return FakeRetrieveResult()
def _retrieve_properties_cancel(self, method, *args, **kwargs):
"""Cancels the retrieve."""
return None
def _retrieve_properties(self, method, *args, **kwargs):
"""Retrieves properties based on the type."""
spec_set = kwargs.get("specSet")[0]
type = spec_set.propSet[0].type
properties = spec_set.propSet[0].pathSet
if not isinstance(properties, list):
properties = properties.split()
objs = spec_set.objectSet
lst_ret_objs = FakeRetrieveResult()
for obj in objs:
try:
obj_ref = obj.obj
# This means that we are doing a search for the managed
# data objects of the type in the inventory
if obj_ref == "RootFolder":
mdo_refs = _db_content[type]
else:
mdo_refs = [obj_ref]
for mdo_ref in mdo_refs:
mdo = _db_content[type][mdo_ref]
prop_list = []
for prop_name in properties:
prop = Prop(prop_name, mdo.get(prop_name))
prop_list.append(prop)
obj_content = ObjectContent(mdo.obj, prop_list)
lst_ret_objs.add_object(obj_content)
except Exception as exc:
LOG.exception(exc)
continue
return lst_ret_objs
def _add_port_group(self, method, *args, **kwargs):
"""Adds a port group to the host system."""
_host_sk = _db_content["HostSystem"].keys()[0]
host_mdo = _db_content["HostSystem"][_host_sk]
host_mdo._add_port_group(kwargs.get("portgrp"))
def __getattr__(self, attr_name):
if attr_name != "Login":
self._check_session()
if attr_name == "Login":
return lambda *args, **kwargs: self._login()
elif attr_name == "Logout":
self._logout()
elif attr_name == "SessionIsActive":
return lambda *args, **kwargs: self._session_is_active(
*args, **kwargs)
elif attr_name == "TerminateSession":
return lambda *args, **kwargs: self._terminate_session(
*args, **kwargs)
elif attr_name == "CreateVM_Task":
return lambda *args, **kwargs: self._create_vm(attr_name,
*args, **kwargs)
elif attr_name == "ReconfigVM_Task":
return lambda *args, **kwargs: self._reconfig_vm(attr_name,
*args, **kwargs)
elif attr_name == "CreateVirtualDisk_Task":
return lambda *args, **kwargs: self._create_copy_disk(attr_name,
kwargs.get("name"))
elif attr_name == "DeleteDatastoreFile_Task":
return lambda *args, **kwargs: self._delete_file(attr_name,
*args, **kwargs)
elif attr_name == "PowerOnVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOn")
elif attr_name == "PowerOffVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOff")
elif attr_name == "RebootGuest":
return lambda *args, **kwargs: self._just_return()
elif attr_name == "ResetVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOn")
elif attr_name == "SuspendVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "suspended")
elif attr_name == "CreateSnapshot_Task":
return lambda *args, **kwargs: self._snapshot_vm(attr_name)
elif attr_name == "CopyVirtualDisk_Task":
return lambda *args, **kwargs: self._create_copy_disk(attr_name,
kwargs.get("destName"))
elif attr_name == "ExtendVirtualDisk_Task":
return lambda *args, **kwargs: self._extend_disk(attr_name,
kwargs.get("size"))
elif attr_name == "Destroy_Task":
return lambda *args, **kwargs: self._unregister_vm(attr_name,
*args, **kwargs)
elif attr_name == "UnregisterVM":
return lambda *args, **kwargs: self._unregister_vm(attr_name,
*args, **kwargs)
elif attr_name == "CloneVM_Task":
return lambda *args, **kwargs: self._clone_vm(attr_name,
*args, **kwargs)
elif attr_name == "Rename_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "SearchDatastore_Task":
return lambda *args, **kwargs: self._search_ds(attr_name,
*args, **kwargs)
elif attr_name == "MakeDirectory":
return lambda *args, **kwargs: self._make_dir(attr_name,
*args, **kwargs)
elif attr_name == "RetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties(
attr_name, *args, **kwargs)
elif attr_name == "ContinueRetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties_continue(
attr_name, *args, **kwargs)
elif attr_name == "CancelRetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties_cancel(
attr_name, *args, **kwargs)
elif attr_name == "AcquireCloneTicket":
return lambda *args, **kwargs: self._just_return()
elif attr_name == "AddPortGroup":
return lambda *args, **kwargs: self._add_port_group(attr_name,
*args, **kwargs)
elif attr_name == "RebootHost_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "ShutdownHost_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "PowerDownHostToStandBy_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "PowerUpHostFromStandBy_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "EnterMaintenanceMode_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "ExitMaintenanceMode_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import tensorflow as tf
# pylint: skip-file
def get_weight(shape, stddev, reg, name):
wd = 0.0
# init = tf.random_normal_initializer(stddev=stddev)
init = tf.contrib.layers.xavier_initializer()
if reg:
regu = tf.contrib.layers.l2_regularizer(wd)
filt = tf.get_variable(name, shape, initializer=init, regularizer=regu)
else:
filt = tf.get_variable(name, shape, initializer=init)
return filt
def get_bias(shape, init_bias, reg, name):
wd = 0.0
init = tf.constant_initializer(init_bias)
if reg:
regu = tf.contrib.layers.l2_regularizer(wd)
bias = tf.get_variable(name, shape, initializer=init, regularizer=regu)
else:
bias = tf.get_variable(name, shape, initializer=init)
return bias
def batch_norm(x, phase_train, moments_dim):
"""
Batch normalization on convolutional maps.
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
Args:
x: Tensor, 4D BHWD input maps
phase_train: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
with tf.variable_scope('bn'):
n_out = x.get_shape().as_list()[-1]
gamma = get_bias(n_out, 1.0, True, 'gamma')
beta = get_bias(n_out, 0.0, True, 'beta')
batch_mean, batch_var = tf.nn.moments(x, moments_dim, name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.999)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
def max_pool(inputs, name, k_shape=[1, 2, 2, 1],s_shape=[1, 2, 2, 1]):
with tf.variable_scope(name) as scope:
outputs = tf.nn.max_pool(inputs, ksize=k_shape, strides=s_shape, padding='SAME', name=name)
return outputs
def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
dim_size *= stride_size
if padding == 'VALID' and dim_size is not None:
dim_size += max(kernel_size - stride_size, 0)
return dim_size
def fc(inputs, n_output, is_training, name, bias=0.0, relu=True, reg=True, bn=True):
with tf.variable_scope(name) as scope:
n_input = inputs.get_shape().as_list()[-1]
shape = [n_input, n_output]
# print("shape of filter %s: %s" % (name, str(shape)))
filt = get_weight(shape, stddev=tf.sqrt(2.0/tf.to_float(n_input+n_output)), reg=True, name='weight')
bias = get_bias([n_output],init_bias=bias, reg=True, name='bias')
outputs = tf.matmul(inputs, filt)
outputs = tf.nn.bias_add(outputs, bias)
if bn:
outputs = batch_norm(outputs, is_training, [0,])
if relu:
outputs = tf.nn.leaky_relu(outputs)
return outputs
def conv_2d(inputs, ksize, n_output, is_training, name, stride=1, pad='SAME', relu=True, reg=True, bn=True):
with tf.variable_scope(name) as scope:
n_input = inputs.get_shape().as_list()[3]
shape = [ksize, ksize, n_input, n_output]
# print("shape of filter %s: %s\n" % (name, str(shape)))
filt = get_weight(shape, stddev=tf.sqrt(2.0/tf.to_float(n_input+n_output)), reg=reg, name='weight')
outputs = tf.nn.conv2d(inputs, filt, [1, stride, stride, 1], padding=pad)
if bn:
outputs = batch_norm(outputs, is_training, [0,1,2])
if relu:
outputs = tf.nn.leaky_relu(outputs)
return outputs
def conv_2d_trans(inputs, ksize, n_output, is_training, name, stride=1, pad='SAME', relu=True, reg=True, bn=True):
with tf.variable_scope(name) as scope:
batch_size = tf.shape(inputs)[0]
input_size = inputs.get_shape().as_list()[1]
n_input = inputs.get_shape().as_list()[3]
shape = [ksize, ksize, n_output, n_input]
output_shape = tf.stack([batch_size, input_size*stride, input_size*stride, n_output])
# print("shape of deconv_filter %s: %s\n" % (name, str(shape)))
filt = get_weight(shape, stddev=tf.sqrt(2.0/tf.to_float(n_input+n_output)), reg=reg, name='weight')
outputs = tf.nn.conv2d_transpose(inputs, filt, output_shape, [1, stride, stride, 1], padding=pad)
if bn:
outputs = batch_norm(outputs, is_training, [0,1,2])
if relu:
outputs = tf.nn.relu(outputs)
return outputs
class Adv_cls():
def build(self, inputs, n_class, is_training):
with tf.variable_scope('Adv', reuse=tf.AUTO_REUSE):
net = inputs[-1]
for i in range(3): #4x4
net = conv_2d(net, 3, 512, is_training, 'conv1_'+str(i))
# net = max_pool(net, 'pool3')
for i in range(3): #4x4
net = conv_2d(net, 3, 256, is_training, 'conv2_'+str(i))
# net = max_pool(net, 'pool3')
net = conv_2d(net, 4, 256, is_training, 'fc1', pad='VALID')
net = conv_2d(net, 1, 128, is_training, 'fc2', pad='VALID')
net = tf.squeeze(conv_2d(net, 1, n_class, is_training, 'fc3', pad='VALID', relu=False, bn=False))
self.vars = tf.trainable_variables('Adv')
self.reg_loss = tf.losses.get_regularization_losses('Adv')
return net
class Genc():
def build(self, inputs, is_training):
with tf.variable_scope('Genc', reuse=tf.AUTO_REUSE):
net = inputs
nets = []
for i in range(5):
net = conv_2d(net, 4, int(64 * 2**i), is_training, 'enc_'+str(i), stride=2)
nets.append(net)
self.vars = tf.trainable_variables('Genc')
self.reg_loss = tf.losses.get_regularization_losses('Genc')
return nets
class Gdec():
def build(self, inputs, labels, is_training):
with tf.variable_scope('Gdec', reuse=tf.AUTO_REUSE):
labels = tf.reshape(tf.to_float(labels),[-1,1,1,1]) # B,1,1,N
net = inputs[-1]
tile_labels = tf.tile(labels,[1,net.shape[1],net.shape[2],1])
net = tf.concat([net, tile_labels],axis=-1)
for i in range(4):
if i==1:
net = tf.concat([net, inputs[-2]],axis=-1)
tile_labels = tf.tile(labels,[1,net.shape[1],net.shape[2],1])
net = tf.concat([net, tile_labels],axis=-1)
net = conv_2d_trans(net, 4, int(1024 / 2**i), is_training, 'dec_'+str(i), stride=2)
net = tf.nn.tanh(conv_2d_trans(net, 4, 3, is_training, 'dec_f', stride=2, relu=False, bn=False))
self.vars = tf.trainable_variables('Gdec')
self.reg_loss = tf.losses.get_regularization_losses('Gdec')
return net
class D():
def build(self, inputs, is_training):
with tf.variable_scope('D', reuse=tf.AUTO_REUSE):
net = inputs
batch_size = net.get_shape().as_list()[0]
for i in range(5):
net = conv_2d(net, 4, int(64 * 2**i), is_training, 'D_'+str(i), stride=2)
net = tf.reshape(net, [batch_size, -1])
gan_net = fc(net, 1024, is_training, 'gan1')
gan_net = fc(gan_net, 1, is_training, 'gan2', relu=False, bn=False)
cls_net = fc(net, 1024, is_training, 'cls1')
cls_net = fc(cls_net, 1, is_training, 'cls2', relu=False, bn=False)
self.vars = tf.trainable_variables('D')
self.reg_loss = tf.losses.get_regularization_losses('D')
return gan_net, cls_net
| |
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field
from crispy_forms.bootstrap import AppendedText, PrependedText, FormActions
#validation
from django.core.exceptions import ValidationError
class CheckoutForm(forms.Form):
first_name = forms.CharField(max_length=100)
last_name = forms.CharField(max_length=100)
email = forms.EmailField()
billing_address1 = forms.CharField(label='Address', max_length=100, required=True)
billing_address2 = forms.CharField(label='', max_length=100, required=False)
billing_suburb = forms.CharField(label='Suburb', max_length=100, required=True)
billing_state = forms.CharField(label='State', max_length=100, required=True)
billing_postcode = forms.CharField(label='Postcode', max_length=100, required=True)
billing_phone = forms.CharField(label='Phone', max_length=100, required=True)
shipping_address1 = forms.CharField(label='Address', max_length=100, required=True)
shipping_address2 = forms.CharField(label='', max_length=100, required=False)
shipping_suburb = forms.CharField(label='Suburb', max_length=100, required=True)
shipping_state = forms.CharField(label='State', max_length=100, required=True)
shipping_postcode = forms.CharField(label='Postcode', max_length=100, required=True)
shipping_phone = forms.CharField(label='Phone', max_length=100, required=True)
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.form_action = 'checkout_thankyou'
helper.layout = Layout(
Field('first_name', css_class='input-xlarge, form-control'),
Field('last_name', css_class='input-xlarge, form-control'),
Field('email', css_class='input-xlarge, form-control'),
Field('billing_address1', css_class='input-xlarge, form-control'),
Field('billing_suburb', css_class='input-xlarge, form-control'),
Field('billing_state', css_class='input-xlarge, form-control'),
Field('billing_postcode', css_class='input-xlarge, form-control'),
Field('billing_phone', css_class='input-xlarge, form-control'),
FormActions(
Submit('save_changes', 'Save Details', css_class="btn-primary checkout_form_name_btn"),
Submit('cancel', 'Cancel', css_class="checkout_form_name_btn"),
)
)
class CheckoutFormCustomerName(forms.Form):
def validate_even(value):
if value % 2 != 0:
raise ValidationError('%s is not an even number' % value)
first_name = forms.CharField(max_length=100)
last_name = forms.CharField(max_length=100, validators=[validate_even])
email = forms.EmailField()
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.form_action = 'checkout_billing'
helper.layout = Layout(
HTML("""
<p> Please enter your details <strong></strong></p>
"""),
Field('first_name', css_class='input-xlarge, form-control', required=True),
Field('last_name', css_class='input-xlarge, form-control', required=True),
Field('email', css_class='input-xlarge, form-control', required=True),
FormActions(
Submit('save_changes', 'Save Details', css_class="btn-primary checkout_form_name_btn"),
Submit('cancel', 'Cancel', css_class="checkout_form_name_btn"),
)
)
class CheckoutFormBilling(forms.Form):
billing_address1 = forms.CharField(label='Address', max_length=100, required=True)
billing_address2 = forms.CharField(label='', max_length=100, required=False)
billing_suburb = forms.CharField(label='Suburb', max_length=100, required=True)
billing_state = forms.CharField(label='State', max_length=100, required=True)
billing_postcode = forms.CharField(label='Postcode', max_length=100, required=True)
billing_phone = forms.CharField(label='Phone', max_length=100, required=True)
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.form_action = 'checkout_shipping'
helper.form_show_errors = False
helper.layout = Layout(
HTML("""
<p> <strong>Billing Address</strong></p>
"""),
Field('billing_address1', css_class='input-xlarge, form-control', required=True, ),
Field('billing_suburb', css_class='input-xlarge, form-control', required=True),
Field('billing_state', css_class='input-xlarge, form-control', required=True),
Field('billing_postcode', css_class='input-xlarge, form-control'),
Field('billing_phone', css_class='input-xlarge, form-control', oninvalid="this.setCustomValidity('Please Enter phone number')", required=True),
FormActions(
Submit('save_changes', 'Save Details', css_class="btn-primary checkout_form_name_btn"),
Submit('cancel', 'Cancel', css_class="checkout_form_name_btn"),
)
)
class CheckoutFormShipping(forms.Form):
shipping_first_name = forms.CharField(label='First Name', max_length=100)
shipping_last_name = forms.CharField(label='Last Name', max_length=100)
shipping_email = forms.EmailField()
shipping_address1 = forms.CharField(label='Address', max_length=100, required=True)
shipping_address2 = forms.CharField(label='', max_length=100, required=False)
shipping_suburb = forms.CharField(label='Suburb', max_length=100, required=True)
shipping_state = forms.CharField(label='State', max_length=100, required=True)
shipping_postcode = forms.CharField(label='Postcode', max_length=100, required=True)
shipping_phone = forms.CharField(label='Phone', max_length=100, required=True)
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.form_action = 'checkout_payment_method'
helper.form_show_errors = False
helper.layout = Layout(
HTML("""
<p> <strong>Shipping Address</strong></p>
"""),
Field('shipping_address1', css_class='input-xlarge, form-control'),
Field('shipping_suburb', css_class='input-xlarge, form-control'),
Field('shipping_state', css_class='input-xlarge, form-control'),
Field('shipping_postcode', css_class='input-xlarge, form-control'),
Field('shipping_phone', css_class='input-xlarge, form-control', help_text="Phone goes here"),
FormActions(
Submit('save_changes', 'Save Details', css_class="btn-primary checkout_form_name_btn"),
Submit('cancel', 'Cancel', css_class="checkout_form_name_btn"),
)
)
class ChekoutPaymentMethodForm(forms.Form):
CHOICES=[
('select1','select 1'),
('select2','select 2')
]
payment_method = forms.ChoiceField(
choices=CHOICES,
widget=forms.RadioSelect()
)
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.form_action = 'checkout_payment'
helper.form_show_errors = False
helper.layout = Layout(
HTML("""
<p> <strong>Payment Method</strong></p>
"""),
Field('payment_method', css_class='input-xlarge'),
FormActions(
Submit('save_changes', 'Save Details', css_class="btn-primary checkout_form_name_btn"),
Submit('cancel', 'Cancel', css_class="checkout_form_name_btn"),
)
)
class CheckoutCCForm(forms.Form):
card_name = forms.CharField(label='Full Name', max_length=100, required=True)
card_number = forms.CharField(max_length=16, required=True)
card_expiry_date = forms.DateField(label='Expiry Date', input_formats=['%m%y'], required=True)
cvv = forms.CharField(label='CVV', max_length=3, required=True)
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.form_action = 'checkout_thankyou'
helper.form_show_errors = False
helper.layout = Layout(
HTML("""
<p> <strong>Make Payment</strong></p>
"""),
Field('card_name', css_class='input-xlarge, form-control'),
Field('card_number', css_class='input-xlarge, form-control'),
Field('card_expiry_date', css_class='input-xlarge, form-control'),
Field('cvv', css_class='input-xlarge, form-control'),
FormActions(
Submit('save_changes', 'Save Details', css_class="btn-primary checkout_form_name_btn"),
Submit('cancel', 'Cancel', css_class="checkout_form_name_btn"),
)
)
| |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the exploration editor page."""
__author__ = 'Sean Lip'
import os
import StringIO
import zipfile
from core.controllers import editor
from core.domain import config_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import stats_domain
from core.domain import rights_manager
from core.tests import test_utils
import feconf
class BaseEditorControllerTest(test_utils.GenericTestBase):
CAN_EDIT_STR = 'GLOBALS.can_edit = JSON.parse(\'true\');'
CANNOT_EDIT_STR = 'GLOBALS.can_edit = JSON.parse(\'false\');'
def setUp(self):
"""Completes the sign-up process for self.EDITOR_EMAIL."""
super(BaseEditorControllerTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.set_admins([self.ADMIN_EMAIL])
def assert_can_edit(self, response_body):
"""Returns True if the response body indicates that the exploration is
editable."""
self.assertIn(self.CAN_EDIT_STR, response_body)
self.assertNotIn(self.CANNOT_EDIT_STR, response_body)
def assert_cannot_edit(self, response_body):
"""Returns True if the response body indicates that the exploration is
not editable."""
self.assertIn(self.CANNOT_EDIT_STR, response_body)
self.assertNotIn(self.CAN_EDIT_STR, response_body)
class EditorTest(BaseEditorControllerTest):
def test_editor_page(self):
"""Test access to editor pages for the sample exploration."""
exp_services.delete_demo('0')
exp_services.load_demo('0')
# Check that non-editors can access, but not edit, the editor page.
response = self.testapp.get('/create/0')
self.assertEqual(response.status_int, 200)
self.assertIn('Welcome to Oppia!', response.body)
self.assert_cannot_edit(response.body)
# Log in as an editor.
self.login(self.EDITOR_EMAIL)
# Check that it is now possible to access and edit the editor page.
response = self.testapp.get('/create/0')
self.assertIn('Welcome to Oppia!', response.body)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
self.assertIn('Stats', response.body)
self.assertIn('History', response.body)
# Test that the value generator JS is included.
self.assertIn('RandomSelector', response.body)
self.logout()
def test_new_state_template(self):
"""Test the validity of the NEW_STATE_TEMPLATE."""
exp_services.load_demo('0')
exploration = exp_services.get_exploration_by_id('0')
exploration.add_states([feconf.DEFAULT_INIT_STATE_NAME])
new_state_dict = exploration.states[
feconf.DEFAULT_INIT_STATE_NAME].to_dict()
new_state_dict['unresolved_answers'] = {}
self.assertEqual(new_state_dict, editor.NEW_STATE_TEMPLATE)
def test_add_new_state_error_cases(self):
"""Test the error cases for adding a new state to an exploration."""
exp_services.delete_demo('0')
exp_services.load_demo('0')
CURRENT_VERSION = 1
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/create/0')
csrf_token = self.get_csrf_token_from_response(response)
def _get_payload(new_state_name, version=None):
result = {
'change_list': [{
'cmd': 'add_state',
'state_name': new_state_name
}],
'commit_message': 'Add new state',
}
if version is not None:
result['version'] = version
return result
def _put_and_expect_400_error(payload):
return self.put_json(
'/createhandler/data/0', payload, csrf_token,
expect_errors=True, expected_status_int=400)
# A request with no version number is invalid.
response_dict = _put_and_expect_400_error(_get_payload('New state'))
self.assertIn('a version must be specified', response_dict['error'])
# A request with the wrong version number is invalid.
response_dict = _put_and_expect_400_error(
_get_payload('New state', 123))
self.assertIn('which is too old', response_dict['error'])
# A request with an empty state name is invalid.
response_dict = _put_and_expect_400_error(
_get_payload('', CURRENT_VERSION))
self.assertIn('should be between 1 and 50', response_dict['error'])
# A request with a really long state name is invalid.
response_dict = _put_and_expect_400_error(
_get_payload('a' * 100, CURRENT_VERSION))
self.assertIn('should be between 1 and 50', response_dict['error'])
# A request with a state name containing invalid characters is
# invalid.
response_dict = _put_and_expect_400_error(
_get_payload('[Bad State Name]', CURRENT_VERSION))
self.assertIn('Invalid character [', response_dict['error'])
# A name cannot have spaces at the front or back.
response_dict = _put_and_expect_400_error(
_get_payload(' aa', CURRENT_VERSION))
self.assertIn('start or end with whitespace', response_dict['error'])
response_dict = _put_and_expect_400_error(
_get_payload('aa\t', CURRENT_VERSION))
self.assertIn('end with whitespace', response_dict['error'])
response_dict = _put_and_expect_400_error(
_get_payload('\n', CURRENT_VERSION))
self.assertIn('end with whitespace', response_dict['error'])
# A name cannot have consecutive whitespace.
response_dict = _put_and_expect_400_error(
_get_payload('The B', CURRENT_VERSION))
self.assertIn('Adjacent whitespace', response_dict['error'])
response_dict = _put_and_expect_400_error(
_get_payload('The\t\tB', CURRENT_VERSION))
self.assertIn('Adjacent whitespace', response_dict['error'])
self.logout()
def test_resolved_answers_handler(self):
exp_services.delete_demo('0')
exp_services.load_demo('0')
# In the reader perspective, submit the first multiple-choice answer,
# then submit 'blah' once, 'blah2' twice and 'blah3' three times.
# TODO(sll): Use the ExplorationPlayer in reader_test for this.
exploration_dict = self.get_json(
'%s/0' % feconf.EXPLORATION_INIT_URL_PREFIX)
self.assertEqual(
exploration_dict['exploration']['title'], 'Welcome to Oppia!')
state_name = exploration_dict['exploration']['init_state_name']
exploration_dict = self.submit_answer('0', state_name, '0')
state_name = exploration_dict['state_name']
self.submit_answer('0', state_name, 'blah')
for _ in range(2):
self.submit_answer('0', state_name, 'blah2')
for _ in range(3):
self.submit_answer('0', state_name, 'blah3')
# Log in as an editor.
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/create/0')
csrf_token = self.get_csrf_token_from_response(response)
url = str('/createhandler/resolved_answers/0/%s' % state_name)
def _get_unresolved_answers():
return stats_domain.StateRuleAnswerLog.get(
'0', state_name, feconf.SUBMIT_HANDLER_NAME,
exp_domain.DEFAULT_RULESPEC_STR
).answers
self.assertEqual(
_get_unresolved_answers(), {'blah': 1, 'blah2': 2, 'blah3': 3})
# An empty request should result in an error.
response_dict = self.put_json(
url, {'something_else': []}, csrf_token,
expect_errors=True, expected_status_int=400)
self.assertIn('Expected a list', response_dict['error'])
# A request of the wrong type should result in an error.
response_dict = self.put_json(
url, {'resolved_answers': 'this_is_a_string'}, csrf_token,
expect_errors=True, expected_status_int=400)
self.assertIn('Expected a list', response_dict['error'])
# Trying to remove an answer that wasn't submitted has no effect.
response_dict = self.put_json(
url, {'resolved_answers': ['not_submitted_answer']}, csrf_token)
self.assertEqual(
_get_unresolved_answers(), {'blah': 1, 'blah2': 2, 'blah3': 3})
# A successful request should remove the answer in question.
response_dict = self.put_json(
url, {'resolved_answers': ['blah']}, csrf_token)
self.assertEqual(
_get_unresolved_answers(), {'blah2': 2, 'blah3': 3})
# It is possible to remove more than one answer at a time.
response_dict = self.put_json(
url, {'resolved_answers': ['blah2', 'blah3']}, csrf_token)
self.assertEqual(_get_unresolved_answers(), {})
self.logout()
class DownloadIntegrationTest(BaseEditorControllerTest):
"""Test handler for exploration and state download."""
SAMPLE_JSON_CONTENT = {
"State A": ("""content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: State A
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
"""),
"State B": ("""content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: State B
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
"""),
feconf.DEFAULT_INIT_STATE_NAME: ("""content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: %s
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
""") % feconf.DEFAULT_INIT_STATE_NAME
}
SAMPLE_STATE_STRING = (
"""content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: State A
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
""")
def test_exploration_download_handler_for_default_exploration(self):
self.login(self.EDITOR_EMAIL)
self.OWNER_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
# Create a simple exploration
EXP_ID = 'eid'
self.save_new_valid_exploration(
EXP_ID, self.OWNER_ID,
title='The title for ZIP download handler test!',
category='This is just a test category',
objective='')
exploration = exp_services.get_exploration_by_id(EXP_ID)
exploration.states[exploration.init_state_name].interaction.handlers[
0].rule_specs[0].dest = exploration.init_state_name
exploration.add_states(['State A', 'State 2', 'State 3'])
exploration.states['State A'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
exploration.states['State 3'].update_interaction_id('TextInput')
exploration.rename_state('State 2', 'State B')
exploration.delete_state('State 3')
exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
response = self.testapp.get('/create/%s' % EXP_ID)
# Check download to zip file
# Download to zip file using download handler
EXPLORATION_DOWNLOAD_URL = '/createhandler/download/%s' % EXP_ID
response = self.testapp.get(EXPLORATION_DOWNLOAD_URL)
# Check downloaded zip file
self.assertEqual(response.headers['Content-Type'], 'text/plain')
filename = 'oppia-ThetitleforZIPdownloadhandlertest!-v2.zip'
self.assertEqual(response.headers['Content-Disposition'],
'attachment; filename=%s' % str(filename))
zf_saved = zipfile.ZipFile(StringIO.StringIO(response.body))
self.assertEqual(
zf_saved.namelist(),
['The title for ZIP download handler test!.yaml'])
# Load golden zip file
with open(os.path.join(
feconf.TESTS_DATA_DIR,
'oppia-ThetitleforZIPdownloadhandlertest!-v2-gold.zip'),
'rb') as f:
golden_zipfile = f.read()
zf_gold = zipfile.ZipFile(StringIO.StringIO(golden_zipfile))
# Compare saved with golden file
self.assertEqual(
zf_saved.open(
'The title for ZIP download handler test!.yaml'
).read(),
zf_gold.open(
'The title for ZIP download handler test!.yaml'
).read())
# Check download to JSON
exploration.update_objective('Test JSON download')
exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
# Download to JSON string using download handler
self.maxDiff = None
EXPLORATION_DOWNLOAD_URL = (
'/createhandler/download/%s?output_format=%s&width=50' %
(EXP_ID, feconf.OUTPUT_FORMAT_JSON))
response = self.get_json(EXPLORATION_DOWNLOAD_URL)
# Check downloaded dict
self.assertEqual(self.SAMPLE_JSON_CONTENT, response)
self.logout()
def test_state_download_handler_for_default_exploration(self):
self.login(self.EDITOR_EMAIL)
self.OWNER_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
# Create a simple exploration
EXP_ID = 'eid'
self.save_new_valid_exploration(
EXP_ID, self.OWNER_ID,
title='The title for states download handler test!',
category='This is just a test category')
exploration = exp_services.get_exploration_by_id(EXP_ID)
exploration.add_states(['State A', 'State 2', 'State 3'])
exploration.states['State A'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
exploration.states['State 3'].update_interaction_id('TextInput')
exploration.rename_state('State 2', 'State B')
exploration.delete_state('State 3')
exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
response = self.testapp.get('/create/%s' % EXP_ID)
# Check download state as YAML string
self.maxDiff = None
state_name = 'State%20A'
EXPLORATION_DOWNLOAD_URL = (
'/createhandler/download_state/%s?state=%s&width=50' %
(EXP_ID, state_name))
response = self.testapp.get(EXPLORATION_DOWNLOAD_URL)
self.assertEqual(self.SAMPLE_STATE_STRING, response.body)
self.logout()
class ExplorationDeletionRightsTest(BaseEditorControllerTest):
def test_deletion_rights_for_unpublished_exploration(self):
"""Test rights management for deletion of unpublished explorations."""
UNPUBLISHED_EXP_ID = 'unpublished_eid'
exploration = exp_domain.Exploration.create_default_exploration(
UNPUBLISHED_EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.owner_id, exploration)
rights_manager.assign_role(
self.owner_id, UNPUBLISHED_EXP_ID, self.editor_id,
rights_manager.ROLE_EDITOR)
self.login(self.EDITOR_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % UNPUBLISHED_EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.VIEWER_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % UNPUBLISHED_EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.OWNER_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % UNPUBLISHED_EXP_ID)
self.assertEqual(response.status_int, 200)
self.logout()
def test_deletion_rights_for_published_exploration(self):
"""Test rights management for deletion of published explorations."""
PUBLISHED_EXP_ID = 'published_eid'
exploration = exp_domain.Exploration.create_default_exploration(
PUBLISHED_EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.owner_id, exploration)
rights_manager.assign_role(
self.owner_id, PUBLISHED_EXP_ID, self.editor_id,
rights_manager.ROLE_EDITOR)
rights_manager.publish_exploration(self.owner_id, PUBLISHED_EXP_ID)
self.login(self.EDITOR_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % PUBLISHED_EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.VIEWER_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % PUBLISHED_EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.OWNER_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % PUBLISHED_EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.ADMIN_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % PUBLISHED_EXP_ID)
self.assertEqual(response.status_int, 200)
self.logout()
class VersioningIntegrationTest(BaseEditorControllerTest):
"""Test retrieval of and reverting to old exploration versions."""
def setUp(self):
"""Create exploration with two versions"""
super(VersioningIntegrationTest, self).setUp()
self.EXP_ID = '0'
exp_services.delete_demo(self.EXP_ID)
exp_services.load_demo(self.EXP_ID)
self.login(self.EDITOR_EMAIL)
# In version 2, change the objective and the initial state content.
exploration = exp_services.get_exploration_by_id(self.EXP_ID)
exp_services.update_exploration(
self.EDITOR_EMAIL, self.EXP_ID, [{
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective',
}, {
'cmd': 'edit_state_property',
'property_name': 'content',
'state_name': exploration.init_state_name,
'new_value': [{'type': 'text', 'value': 'ABC'}],
}], 'Change objective and init state content')
def test_reverting_to_old_exploration(self):
"""Test reverting to old exploration versions."""
# Open editor page
response = self.testapp.get(
'%s/%s' % (feconf.EDITOR_URL_PREFIX, self.EXP_ID))
csrf_token = self.get_csrf_token_from_response(response)
# May not revert to any version that's not 1
for rev_version in (-1, 0, 2, 3, 4, '1', ()):
response_dict = self.post_json(
'/createhandler/revert/%s' % self.EXP_ID, {
'current_version': 2,
'revert_to_version': rev_version
}, csrf_token, expect_errors=True, expected_status_int=400)
# Check error message
if not isinstance(rev_version, int):
self.assertIn('Expected an integer', response_dict['error'])
else:
self.assertIn('Cannot revert to version',
response_dict['error'])
# Check that exploration is really not reverted to old version
reader_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content'][0]['value']
self.assertIn('ABC', init_content)
self.assertNotIn('Hi, welcome to Oppia!', init_content)
# Revert to version 1
rev_version = 1
response_dict = self.post_json(
'/createhandler/revert/%s' % self.EXP_ID, {
'current_version': 2,
'revert_to_version': rev_version
}, csrf_token)
# Check that exploration is really reverted to version 1
reader_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content'][0]['value']
self.assertNotIn('ABC', init_content)
self.assertIn('Hi, welcome to Oppia!', init_content)
def test_versioning_for_default_exploration(self):
"""Test retrieval of old exploration versions."""
# The latest version contains 'ABC'.
reader_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content'][0]['value']
self.assertIn('ABC', init_content)
self.assertNotIn('Hi, welcome to Oppia!', init_content)
# v1 contains 'Hi, welcome to Oppia!'.
reader_dict = self.get_json(
'%s/%s?v=1' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content'][0]['value']
self.assertIn('Hi, welcome to Oppia!', init_content)
self.assertNotIn('ABC', init_content)
# v2 contains 'ABC'.
reader_dict = self.get_json(
'%s/%s?v=2' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content'][0]['value']
self.assertIn('ABC', init_content)
self.assertNotIn('Hi, welcome to Oppia!', init_content)
# v3 does not exist.
response = self.testapp.get(
'%s/%s?v=3' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID),
expect_errors=True)
self.assertEqual(response.status_int, 404)
class ExplorationEditRightsTest(BaseEditorControllerTest):
"""Test the handling of edit rights for explorations."""
def test_user_banning(self):
"""Test that banned users are banned."""
EXP_ID = '0'
exp_services.delete_demo(EXP_ID)
exp_services.load_demo(EXP_ID)
# Sign-up new editors Joe and Sandra.
self.signup('joe@example.com', 'joe')
self.signup('sandra@example.com', 'sandra')
# Joe logs in.
self.login('joe@example.com')
response = self.testapp.get(feconf.GALLERY_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.get('/create/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
# Ban joe.
config_services.set_property(
feconf.ADMIN_COMMITTER_ID, 'banned_usernames', ['joe'])
# Test that Joe is banned. (He can still access the gallery.)
response = self.testapp.get(feconf.GALLERY_URL, expect_errors=True)
self.assertEqual(response.status_int, 200)
response = self.testapp.get('/create/%s' % EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 200)
self.assert_cannot_edit(response.body)
# Joe logs out.
self.logout()
# Sandra logs in and is unaffected.
self.login('sandra@example.com')
response = self.testapp.get('/create/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
self.logout()
class ExplorationRightsIntegrationTest(BaseEditorControllerTest):
"""Test the handler for managing exploration editing rights."""
COLLABORATOR_EMAIL = 'collaborator@example.com'
COLLABORATOR_USERNAME = 'collab'
COLLABORATOR2_EMAIL = 'collaborator2@example.com'
COLLABORATOR2_USERNAME = 'collab2'
COLLABORATOR3_EMAIL = 'collaborator3@example.com'
COLLABORATOR3_USERNAME = 'collab3'
VIEWER2_EMAIL = 'viewer2@example.com'
def test_exploration_rights_handler(self):
"""Test exploration rights handler."""
# Create several users
self.signup(
self.COLLABORATOR_EMAIL, username=self.COLLABORATOR_USERNAME)
self.signup(
self.COLLABORATOR2_EMAIL, username=self.COLLABORATOR2_USERNAME)
self.signup(
self.COLLABORATOR3_EMAIL, username=self.COLLABORATOR3_USERNAME)
self.collaborator_id = self.get_user_id_from_email(
self.COLLABORATOR_EMAIL)
# Owner creates exploration
self.login(self.OWNER_EMAIL)
EXP_ID = 'eid'
self.save_new_valid_exploration(
EXP_ID, self.owner_id, title='Title for rights handler test!',
category='My category')
exploration = exp_services.get_exploration_by_id(EXP_ID)
exploration.add_states(['State A', 'State 2', 'State 3'])
exploration.states['State A'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
exploration.states['State 3'].update_interaction_id('TextInput')
response = self.testapp.get(
'%s/%s' % (feconf.EDITOR_URL_PREFIX, EXP_ID))
csrf_token = self.get_csrf_token_from_response(response)
# Owner adds rights for other users
rights_url = '%s/%s' % (feconf.EXPLORATION_RIGHTS_PREFIX, EXP_ID)
self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.VIEWER_USERNAME,
'new_member_role': rights_manager.ROLE_VIEWER
}, csrf_token)
self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.COLLABORATOR_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR
}, csrf_token)
self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.COLLABORATOR2_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR
}, csrf_token)
self.logout()
# Check that viewer can access editor page but cannot edit.
self.login(self.VIEWER_EMAIL)
response = self.testapp.get('/create/%s' % EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 200)
self.assert_cannot_edit(response.body)
self.logout()
# Check that collaborator can access editor page and can edit.
self.login(self.COLLABORATOR_EMAIL)
response = self.testapp.get('/create/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
csrf_token = self.get_csrf_token_from_response(response)
# Check that collaborator can add a new state called 'State 4'
add_url = '%s/%s' % (feconf.EXPLORATION_DATA_PREFIX, EXP_ID)
response_dict = self.put_json(
add_url,
{
'version': exploration.version,
'commit_message': 'Added State 4',
'change_list': [{
'cmd': 'add_state',
'state_name': 'State 4'
}, {
'cmd': 'edit_state_property',
'state_name': 'State 4',
'property_name': 'widget_id',
'new_value': 'TextInput',
}]
},
csrf_token=csrf_token,
expected_status_int=200
)
self.assertIn('State 4', response_dict['states'])
# Check that collaborator cannot add new members
exploration = exp_services.get_exploration_by_id(EXP_ID)
rights_url = '%s/%s' % (feconf.EXPLORATION_RIGHTS_PREFIX, EXP_ID)
response_dict = self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.COLLABORATOR3_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR,
}, csrf_token, expect_errors=True, expected_status_int=401)
self.assertEqual(response_dict['code'], 401)
self.logout()
# Check that collaborator2 can access editor page and can edit.
self.login(self.COLLABORATOR2_EMAIL)
response = self.testapp.get('/create/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
csrf_token = self.get_csrf_token_from_response(response)
# Check that collaborator2 can add a new state called 'State 5'
add_url = '%s/%s' % (feconf.EXPLORATION_DATA_PREFIX, EXP_ID)
response_dict = self.put_json(
add_url,
{
'version': exploration.version,
'commit_message': 'Added State 5',
'change_list': [{
'cmd': 'add_state',
'state_name': 'State 5'
}, {
'cmd': 'edit_state_property',
'state_name': 'State 5',
'property_name': 'widget_id',
'new_value': 'TextInput',
}]
},
csrf_token=csrf_token,
expected_status_int=200
)
self.assertIn('State 5', response_dict['states'])
# Check that collaborator2 cannot add new members
exploration = exp_services.get_exploration_by_id(EXP_ID)
rights_url = '%s/%s' % (feconf.EXPLORATION_RIGHTS_PREFIX, EXP_ID)
response_dict = self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.COLLABORATOR3_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR,
}, csrf_token, expect_errors=True, expected_status_int=401)
self.assertEqual(response_dict['code'], 401)
self.logout()
| |
# elf tools
# supports loading elves into the emulator, and saving emulator state as an elf
import struct
import utils
def nt_string_at(a, i):
end = a.find(b'\x00', i)
if end == -1:
end = len(a)
return a[i:end].decode('ascii')
def nt_string_append(a, s):
i = len(a)
return a + s.encode('ascii') + b'\x00', i
def unpack_schem(schem, fieldnames, data):
data_struct = struct.unpack(schem, data)
assert(len(fieldnames) == len(data_struct))
return {fieldnames[i] : data_struct[i] for i in range(len(fieldnames))}
def pack_schem(schem, fieldnames, fields):
ordered_fields = (fields[field] if field in fields else 0 for field in fieldnames)
return struct.pack(schem, *ordered_fields)
elf_header_schem = '<I5B7x2H5I6H'
elf_header_fields = [
'ei_mag', # 4
'ei_class', # 1
'ei_data', # 1
'ei_version', # 1
'ei_osabi', # 1
'ei_abiversion', # 1
'e_type', # 2
'e_machine', # 2
'e_version', # 4
'e_entry', # 4
'e_phoff', # 4
'e_shoff', # 4
'e_flags', # 4
'e_ehsize', # 2
'e_phentsize', # 2
'e_phnum', # 2
'e_shentsize', # 2
'e_shnum', # 2
'e_shstrndx', # 2
]
def extract_header(f):
header_size = struct.calcsize(elf_header_schem)
f.seek(0)
a = f.read(header_size)
return unpack_schem(elf_header_schem, elf_header_fields, a)
def blast_header(f, header):
header_bytes = pack_schem(elf_header_schem, elf_header_fields, header)
f.seek(0)
f.write(header_bytes)
elf_magic = 0x464c457f
elf_version = 0x1
elf_msp_class = 0x1
elf_msp_data = 0x1
elf_msp_machine = 0x69
def msp_check_header(header):
if header['ei_mag'] != elf_magic:
raise ValueError('bad magic number in elf: was {:x}, expecting {:x}'.format(header['ei_mag'], elf_magic))
if header['ei_version'] != elf_version:
raise ValueError('bad elf version: was {:x}, expecting {:x}'.format(header['ei_version'], elf_version))
if header['ei_class'] != elf_msp_class:
raise ValueError('bad elf class: was {:x}, expecting {:x}'.format(header['ei_class'], elf_msp_class))
if header['ei_data'] != elf_msp_data:
raise ValueError('bad elf endianness: was {:x}, expecting {:x}'.format(header['ei_data'], elf_msp_data))
if header['e_machine'] != elf_msp_machine:
raise ValueError('bad machine identifier in elf: was {:x}, expecting {:x}'.format(header['e_machine'], elf_msp_machine))
elf_prog_schem = '<8I'
elf_prog_fields = [
'p_type',
'p_offset',
'p_vaddr',
'p_paddr',
'p_filesz',
'p_memsz',
'p_flags',
'p_align',
]
def extract_segments(f, header):
phoff = header['e_phoff']
phnum = header['e_phnum']
phentsize = header['e_phentsize']
prog_size = struct.calcsize(elf_prog_schem)
if(prog_size != phentsize):
raise ValueError('bad phentsize in elf: was {:x}, expecting {:x}'.format(phentsize, prog_size))
segments = []
for segid in range(phnum):
f.seek(phoff + segid * prog_size)
a = f.read(prog_size)
prog = unpack_schem(elf_prog_schem, elf_prog_fields, a)
f.seek(prog['p_offset'])
prog['data'] = f.read(prog['p_filesz'])
segments.append(prog)
return segments
def blast_segments(f, segments, phoff, write_data = True):
f.seek(phoff)
for segment in segments:
segment_bytes = pack_schem(elf_prog_schem, elf_prog_fields, segment)
f.write(segment_bytes)
if write_data:
for segment in segments:
offset = segment['p_offset']
filesz = segment['p_filesz']
data = segment['data'][:filesz]
f.seek(offset)
f.write(data)
elf_section_schem = '<10I'
elf_section_fields = [
'sh_name',
'sh_type',
'sh_flags',
'sh_addr',
'sh_offset',
'sh_size',
'sh_link',
'sh_info',
'sh_addralign',
'sh_entsize',
]
def extract_sections(f, header):
shoff = header['e_shoff']
shnum = header['e_shnum']
shentsize = header['e_shentsize']
sec_size = struct.calcsize(elf_section_schem)
if(sec_size != shentsize):
raise ValueError('bad shentsize in elf: was {:x}, expecting {:x}'.format(shentsize, sec_size))
sections = []
for secid in range(shnum):
f.seek(shoff + secid * sec_size)
a = f.read(sec_size)
sec = unpack_schem(elf_section_schem, elf_section_fields, a)
if sec['sh_type'] in [0, 8]: # SHT_NULL, SHT_NOBITS
sec['data'] = b''
else:
f.seek(sec['sh_offset'])
sec['data'] = f.read(sec['sh_size'])
sections.append(sec)
strtab = sections[header['e_shstrndx']]
for sec in sections:
if sec['sh_type'] == 0:
sec['name'] = ''
else:
sec['name'] = nt_string_at(strtab['data'], sec['sh_name'])
return sections
def blast_sections(f, sections, shoff, write_data = True):
f.seek(shoff)
for section in sections:
section_bytes = pack_schem(elf_section_schem, elf_section_fields, section)
f.write(section_bytes)
if write_data:
for section in sections:
if 'data' in section and len(section['data']) > 0:
offset = section['sh_offset']
size = section['sh_size']
data = section['data'][:size]
f.seek(offset)
f.write(data)
elf_symbol_schem = '<3I2BH'
elf_symbol_fields = [
'st_name',
'st_value',
'st_size',
'st_info',
'st_other',
'st_shndx',
]
def symbols_of(sections):
symtab = None
for section in sections:
if section['sh_type'] == 2: # SH_SYMTAB
symtab = section
break
if symtab is None or (not 'data' in symtab):
return []
entsize = symtab['sh_entsize']
sym_size = struct.calcsize(elf_symbol_schem)
if(sym_size != entsize):
raise ValueError('bad symbol entsize in elf: was {:x}, expecting {:x}'.format(entsize, sym_size))
data = symtab['data']
if len(data) % sym_size != 0:
raise ValueError('size of symbol table is not divisible by entsize: {:d} % {:d} != 0'.format(len(data), sym_size))
strtab = sections[symtab['sh_link']]
symbols = []
for i in range(0, len(data), sym_size):
sym = unpack_schem(elf_symbol_schem, elf_symbol_fields, data[i:i+sym_size])
if sym['st_name'] == 0:
sym['name'] = ''
else:
sym['name'] = nt_string_at(strtab['data'], sym['st_name'])
symbols.append(sym)
return symbols
def symbols_pack(symbols):
a = b''
for symbol in symbols:
a += pack_schem(elf_symbol_schem, elf_symbol_fields, symbol)
return a
elf_section_registers = 0x8000ff01
def load(state, fname, restore_regs = True, verbosity = 0):
with open(fname, 'rb') as f:
header = extract_header(f)
msp_check_header(header)
# hack to relocate sections based on virtual to physical mapping in prog headers
v_to_p = {}
segments = extract_segments(f, header)
for segment in segments:
v_to_p[segment['p_vaddr']] = segment['p_paddr']
sections = extract_sections(f, header)
for section in sections:
# special section for storing registers from dumps
if section['sh_type'] == elf_section_registers:
regdata = section['data']
r = 0
for i in range(0, len(regdata), 4):
regval = struct.unpack('<I', regdata[i:i+4])[0]
state.writereg(r, regval)
r += 1
elif section['sh_flags'] & 0x7 != 0 and section['sh_size'] > 0:
vaddr = section['sh_addr']
if vaddr in v_to_p:
addr = v_to_p[vaddr]
elif vaddr == 0:
print('WARNING: section located at address 0, ignoring')
continue
else:
addr = vaddr
data = section['data']
size = section['sh_size']
if verbosity >= 1:
if vaddr != addr:
vdesc = ' (virtual address {:05x})'.format(vaddr)
else:
vdesc = ''
print('Writing {:5d} bytes at {:05x} [section: {:s}]{:s}...'.format(
size, addr, section['name'], vdesc))
for i in range(min(size, len(data))):
state.write8(addr + i, data[i])
for i in range(max(0, size - len(data))):
state.write8(addr + i, 0)
if verbosity >= 3:
print('loaded state:')
state.dump()
save_header = {
'ei_mag' : elf_magic,
'ei_class' : elf_msp_class,
'ei_data' : elf_msp_data,
'ei_version' : elf_version,
'ei_osabi' : 0,
'ei_abiversion' : 0,
'e_type' : 2, # it's kind of a core file though (4)
'e_machine' : elf_msp_machine,
'e_version' : elf_version,
'e_entry' : None,
'e_phoff' : None,
'e_shoff' : None,
'e_flags' : 0x0,
'e_ehsize' : 52,
'e_phentsize' : 32,
'e_phnum' : None,
'e_shentsize' : 40,
'e_shnum' : None,
'e_shstrndx' : None,
}
save_prog = {
'p_type' : 1,
'p_offset' : None,
'p_vaddr' : None,
'p_paddr' : None,
'p_filesz' : None,
'p_memsz' : None,
'p_flags' : 0x7, # RWE
'p_align' : 2,
}
save_section = {
'sh_name' : 0,
'sh_type' : 1, # PROGBITS
'sh_flags' : 0x7, # WAX
'sh_addr' : None,
'sh_offset' : None,
'sh_size' : None,
'sh_link' : 0,
'sh_info' : 0,
'sh_addralign' : 2,
'sh_entsize' : 0,
}
save_symbol = {
'st_name' : None,
'st_value' : None,
'st_size' : 0,
'st_info' : 0x3, # STT_SECTION
'st_other' : 0,
'st_shndx' : 0xfff1, # SHN_ABS
}
def save(state, fname, verbosity = 0):
if verbosity >= 3:
print('saving state:')
state.dump()
regions = state.segments()
phoff = struct.calcsize(elf_header_schem)
phnum = len(regions)
header = save_header.copy()
header['e_phoff'] = phoff
header['e_phnum'] = phnum
header['e_entry'] = state.entry()
name_strtab = '.shstrtab'
name_symtab = '.symtab'
s_data = b'\x00'
s_data, s_name_strtab = nt_string_append(s_data, name_strtab)
s_data, s_name_symtab = nt_string_append(s_data, name_symtab)
segments = []
sections = []
symbols = []
# section 0 and symbol 0 are null
sections.append({})
symbols.append({})
offset = phoff + (phnum * struct.calcsize(elf_prog_schem))
idx = 0
for addr, data in regions:
name = '__segment_{:d}'.format(idx)
s_data, s_name = nt_string_append(s_data, name)
if verbosity >= 1:
print('saving {:5d} bytes at {:05x} [section: {:s}]...'.format(
len(data), addr, name))
segment = save_prog.copy()
segment['p_offset'] = offset
segment['p_vaddr'] = addr
segment['p_paddr'] = addr
segment['p_filesz'] = len(data)
segment['p_memsz'] = len(data)
segment['data'] = bytes(data)
segments.append(segment)
section = save_section.copy()
section['name'] = name
section['sh_name'] = s_name
section['sh_addr'] = addr
section['sh_offset'] = offset
section['sh_size'] = len(data)
section['data'] = bytes(data)
sections.append(section)
symbol = save_symbol.copy()
symbol['name'] = name
symbol['st_name'] = s_name
symbol['st_value'] = addr
symbols.append(symbol)
offset += len(data)
idx += 1
# registers section (for internal use mostly)
registers = state.registers()
regdata = b''
for r in registers:
regdata += struct.pack('<I', r)
name_registers = '__registers'
s_data, s_name_registers = nt_string_append(s_data, name_registers)
regtab = save_section.copy()
regtab['name'] = name_registers
regtab['sh_name'] = s_name_registers
regtab['sh_type'] = elf_section_registers
regtab['sh_flags'] = 0x0
regtab['sh_addr'] = 0
regtab['sh_offset'] = offset
regtab['sh_size'] = len(regdata)
regtab['sh_addralign'] = 4
regtab['sh_entsize'] = 4
regtab['data'] = regdata
sections.append(regtab)
offset += len(regdata)
# shstrtab section
strtab = save_section.copy()
strtab['name'] = name_strtab
strtab['sh_name'] = s_name_strtab
strtab['sh_type'] = 3 # SHT_STRTAB
strtab['sh_flags'] = 0x20 # SHF_STRINGS
strtab['sh_addr'] = 0
strtab['sh_offset'] = offset
strtab['sh_size'] = len(s_data)
strtab['sh_addralign'] = 1
strtab['data'] = s_data
sections.append(strtab)
strtab_idx = len(sections) - 1
offset += len(s_data)
# symtab section
symdata = symbols_pack(symbols)
symtab = save_section.copy()
symtab['name'] = name_symtab
symtab['sh_name'] = s_name_symtab
symtab['sh_type'] = 2 # SHT_SYMTAB
symtab['sh_flags'] = 0x0
symtab['sh_addr'] = 0
symtab['sh_offset'] = offset
symtab['sh_size'] = len(symdata)
symtab['sh_link'] = strtab_idx
symtab['sh_info'] = len(symbols)
symtab['sh_addralign'] = 4
symtab['sh_entsize'] = struct.calcsize(elf_symbol_schem)
symtab['data'] = symdata
sections.append(symtab)
offset += len(symdata)
shoff = offset
shnum = len(sections)
header['e_shoff'] = shoff
header['e_shnum'] = shnum
header['e_shstrndx'] = strtab_idx
with open(fname, 'wb') as f:
blast_header(f, header)
blast_segments(f, segments, phoff, write_data=False)
blast_sections(f, sections, shoff, write_data=True)
if __name__ == '__main__':
import msp_fr5969_model as model
import sys
if len(sys.argv) != 3:
print('usage: {:s} <INELF> <OUTELF>'.format(sys.argv[0]))
exit(1)
fname = sys.argv[1]
outfname = sys.argv[2]
state = model.Model()
load(state, fname, verbosity=3)
save(state, outfname, verbosity=1)
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v9.common.types import ad_type_infos
from google.ads.googleads.v9.common.types import custom_parameter
from google.ads.googleads.v9.common.types import final_app_url
from google.ads.googleads.v9.common.types import url_collection
from google.ads.googleads.v9.enums.types import ad_type
from google.ads.googleads.v9.enums.types import device
from google.ads.googleads.v9.enums.types import system_managed_entity_source
__protobuf__ = proto.module(
package="google.ads.googleads.v9.resources",
marshal="google.ads.googleads.v9",
manifest={"Ad",},
)
class Ad(proto.Message):
r"""An ad.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
resource_name (str):
Immutable. The resource name of the ad. Ad resource names
have the form:
``customers/{customer_id}/ads/{ad_id}``
id (int):
Output only. The ID of the ad.
This field is a member of `oneof`_ ``_id``.
final_urls (Sequence[str]):
The list of possible final URLs after all
cross-domain redirects for the ad.
final_app_urls (Sequence[google.ads.googleads.v9.common.types.FinalAppUrl]):
A list of final app URLs that will be used on
mobile if the user has the specific app
installed.
final_mobile_urls (Sequence[str]):
The list of possible final mobile URLs after
all cross-domain redirects for the ad.
tracking_url_template (str):
The URL template for constructing a tracking
URL.
This field is a member of `oneof`_ ``_tracking_url_template``.
final_url_suffix (str):
The suffix to use when constructing a final
URL.
This field is a member of `oneof`_ ``_final_url_suffix``.
url_custom_parameters (Sequence[google.ads.googleads.v9.common.types.CustomParameter]):
The list of mappings that can be used to substitute custom
parameter tags in a ``tracking_url_template``,
``final_urls``, or ``mobile_final_urls``. For mutates,
please use url custom parameter operations.
display_url (str):
The URL that appears in the ad description
for some ad formats.
This field is a member of `oneof`_ ``_display_url``.
type_ (google.ads.googleads.v9.enums.types.AdTypeEnum.AdType):
Output only. The type of ad.
added_by_google_ads (bool):
Output only. Indicates if this ad was
automatically added by Google Ads and not by a
user. For example, this could happen when ads
are automatically created as suggestions for new
ads based on knowledge of how existing ads are
performing.
This field is a member of `oneof`_ ``_added_by_google_ads``.
device_preference (google.ads.googleads.v9.enums.types.DeviceEnum.Device):
The device preference for the ad. You can
only specify a preference for mobile devices.
When this preference is set the ad will be
preferred over other ads when being displayed on
a mobile device. The ad can still be displayed
on other device types, e.g. if no other ads are
available. If unspecified (no device
preference), all devices are targeted. This is
only supported by some ad types.
url_collections (Sequence[google.ads.googleads.v9.common.types.UrlCollection]):
Additional URLs for the ad that are tagged
with a unique identifier that can be referenced
from other fields in the ad.
name (str):
Immutable. The name of the ad. This is only
used to be able to identify the ad. It does not
need to be unique and does not affect the served
ad. The name field is currently only supported
for DisplayUploadAd, ImageAd,
ShoppingComparisonListingAd and VideoAd.
This field is a member of `oneof`_ ``_name``.
system_managed_resource_source (google.ads.googleads.v9.enums.types.SystemManagedResourceSourceEnum.SystemManagedResourceSource):
Output only. If this ad is system managed,
then this field will indicate the source. This
field is read-only.
text_ad (google.ads.googleads.v9.common.types.TextAdInfo):
Immutable. Details pertaining to a text ad.
This field is a member of `oneof`_ ``ad_data``.
expanded_text_ad (google.ads.googleads.v9.common.types.ExpandedTextAdInfo):
Details pertaining to an expanded text ad.
This field is a member of `oneof`_ ``ad_data``.
call_ad (google.ads.googleads.v9.common.types.CallAdInfo):
Details pertaining to a call ad.
This field is a member of `oneof`_ ``ad_data``.
expanded_dynamic_search_ad (google.ads.googleads.v9.common.types.ExpandedDynamicSearchAdInfo):
Immutable. Details pertaining to an Expanded Dynamic Search
Ad. This type of ad has its headline, final URLs, and
display URL auto-generated at serving time according to
domain name specific information provided by
``dynamic_search_ads_setting`` linked at the campaign level.
This field is a member of `oneof`_ ``ad_data``.
hotel_ad (google.ads.googleads.v9.common.types.HotelAdInfo):
Details pertaining to a hotel ad.
This field is a member of `oneof`_ ``ad_data``.
shopping_smart_ad (google.ads.googleads.v9.common.types.ShoppingSmartAdInfo):
Details pertaining to a Smart Shopping ad.
This field is a member of `oneof`_ ``ad_data``.
shopping_product_ad (google.ads.googleads.v9.common.types.ShoppingProductAdInfo):
Details pertaining to a Shopping product ad.
This field is a member of `oneof`_ ``ad_data``.
gmail_ad (google.ads.googleads.v9.common.types.GmailAdInfo):
Immutable. Details pertaining to a Gmail ad.
This field is a member of `oneof`_ ``ad_data``.
image_ad (google.ads.googleads.v9.common.types.ImageAdInfo):
Immutable. Details pertaining to an Image ad.
This field is a member of `oneof`_ ``ad_data``.
video_ad (google.ads.googleads.v9.common.types.VideoAdInfo):
Details pertaining to a Video ad.
This field is a member of `oneof`_ ``ad_data``.
video_responsive_ad (google.ads.googleads.v9.common.types.VideoResponsiveAdInfo):
Details pertaining to a Video responsive ad.
This field is a member of `oneof`_ ``ad_data``.
responsive_search_ad (google.ads.googleads.v9.common.types.ResponsiveSearchAdInfo):
Details pertaining to a responsive search ad.
This field is a member of `oneof`_ ``ad_data``.
legacy_responsive_display_ad (google.ads.googleads.v9.common.types.LegacyResponsiveDisplayAdInfo):
Details pertaining to a legacy responsive
display ad.
This field is a member of `oneof`_ ``ad_data``.
app_ad (google.ads.googleads.v9.common.types.AppAdInfo):
Details pertaining to an app ad.
This field is a member of `oneof`_ ``ad_data``.
legacy_app_install_ad (google.ads.googleads.v9.common.types.LegacyAppInstallAdInfo):
Immutable. Details pertaining to a legacy app
install ad.
This field is a member of `oneof`_ ``ad_data``.
responsive_display_ad (google.ads.googleads.v9.common.types.ResponsiveDisplayAdInfo):
Details pertaining to a responsive display
ad.
This field is a member of `oneof`_ ``ad_data``.
local_ad (google.ads.googleads.v9.common.types.LocalAdInfo):
Details pertaining to a local ad.
This field is a member of `oneof`_ ``ad_data``.
display_upload_ad (google.ads.googleads.v9.common.types.DisplayUploadAdInfo):
Details pertaining to a display upload ad.
This field is a member of `oneof`_ ``ad_data``.
app_engagement_ad (google.ads.googleads.v9.common.types.AppEngagementAdInfo):
Details pertaining to an app engagement ad.
This field is a member of `oneof`_ ``ad_data``.
shopping_comparison_listing_ad (google.ads.googleads.v9.common.types.ShoppingComparisonListingAdInfo):
Details pertaining to a Shopping Comparison
Listing ad.
This field is a member of `oneof`_ ``ad_data``.
smart_campaign_ad (google.ads.googleads.v9.common.types.SmartCampaignAdInfo):
Details pertaining to a Smart campaign ad.
This field is a member of `oneof`_ ``ad_data``.
app_pre_registration_ad (google.ads.googleads.v9.common.types.AppPreRegistrationAdInfo):
Details pertaining to an app pre-registration
ad.
This field is a member of `oneof`_ ``ad_data``.
"""
resource_name = proto.Field(proto.STRING, number=37,)
id = proto.Field(proto.INT64, number=40, optional=True,)
final_urls = proto.RepeatedField(proto.STRING, number=41,)
final_app_urls = proto.RepeatedField(
proto.MESSAGE, number=35, message=final_app_url.FinalAppUrl,
)
final_mobile_urls = proto.RepeatedField(proto.STRING, number=42,)
tracking_url_template = proto.Field(proto.STRING, number=43, optional=True,)
final_url_suffix = proto.Field(proto.STRING, number=44, optional=True,)
url_custom_parameters = proto.RepeatedField(
proto.MESSAGE, number=10, message=custom_parameter.CustomParameter,
)
display_url = proto.Field(proto.STRING, number=45, optional=True,)
type_ = proto.Field(proto.ENUM, number=5, enum=ad_type.AdTypeEnum.AdType,)
added_by_google_ads = proto.Field(proto.BOOL, number=46, optional=True,)
device_preference = proto.Field(
proto.ENUM, number=20, enum=device.DeviceEnum.Device,
)
url_collections = proto.RepeatedField(
proto.MESSAGE, number=26, message=url_collection.UrlCollection,
)
name = proto.Field(proto.STRING, number=47, optional=True,)
system_managed_resource_source = proto.Field(
proto.ENUM,
number=27,
enum=system_managed_entity_source.SystemManagedResourceSourceEnum.SystemManagedResourceSource,
)
text_ad = proto.Field(
proto.MESSAGE,
number=6,
oneof="ad_data",
message=ad_type_infos.TextAdInfo,
)
expanded_text_ad = proto.Field(
proto.MESSAGE,
number=7,
oneof="ad_data",
message=ad_type_infos.ExpandedTextAdInfo,
)
call_ad = proto.Field(
proto.MESSAGE,
number=49,
oneof="ad_data",
message=ad_type_infos.CallAdInfo,
)
expanded_dynamic_search_ad = proto.Field(
proto.MESSAGE,
number=14,
oneof="ad_data",
message=ad_type_infos.ExpandedDynamicSearchAdInfo,
)
hotel_ad = proto.Field(
proto.MESSAGE,
number=15,
oneof="ad_data",
message=ad_type_infos.HotelAdInfo,
)
shopping_smart_ad = proto.Field(
proto.MESSAGE,
number=17,
oneof="ad_data",
message=ad_type_infos.ShoppingSmartAdInfo,
)
shopping_product_ad = proto.Field(
proto.MESSAGE,
number=18,
oneof="ad_data",
message=ad_type_infos.ShoppingProductAdInfo,
)
gmail_ad = proto.Field(
proto.MESSAGE,
number=21,
oneof="ad_data",
message=ad_type_infos.GmailAdInfo,
)
image_ad = proto.Field(
proto.MESSAGE,
number=22,
oneof="ad_data",
message=ad_type_infos.ImageAdInfo,
)
video_ad = proto.Field(
proto.MESSAGE,
number=24,
oneof="ad_data",
message=ad_type_infos.VideoAdInfo,
)
video_responsive_ad = proto.Field(
proto.MESSAGE,
number=39,
oneof="ad_data",
message=ad_type_infos.VideoResponsiveAdInfo,
)
responsive_search_ad = proto.Field(
proto.MESSAGE,
number=25,
oneof="ad_data",
message=ad_type_infos.ResponsiveSearchAdInfo,
)
legacy_responsive_display_ad = proto.Field(
proto.MESSAGE,
number=28,
oneof="ad_data",
message=ad_type_infos.LegacyResponsiveDisplayAdInfo,
)
app_ad = proto.Field(
proto.MESSAGE,
number=29,
oneof="ad_data",
message=ad_type_infos.AppAdInfo,
)
legacy_app_install_ad = proto.Field(
proto.MESSAGE,
number=30,
oneof="ad_data",
message=ad_type_infos.LegacyAppInstallAdInfo,
)
responsive_display_ad = proto.Field(
proto.MESSAGE,
number=31,
oneof="ad_data",
message=ad_type_infos.ResponsiveDisplayAdInfo,
)
local_ad = proto.Field(
proto.MESSAGE,
number=32,
oneof="ad_data",
message=ad_type_infos.LocalAdInfo,
)
display_upload_ad = proto.Field(
proto.MESSAGE,
number=33,
oneof="ad_data",
message=ad_type_infos.DisplayUploadAdInfo,
)
app_engagement_ad = proto.Field(
proto.MESSAGE,
number=34,
oneof="ad_data",
message=ad_type_infos.AppEngagementAdInfo,
)
shopping_comparison_listing_ad = proto.Field(
proto.MESSAGE,
number=36,
oneof="ad_data",
message=ad_type_infos.ShoppingComparisonListingAdInfo,
)
smart_campaign_ad = proto.Field(
proto.MESSAGE,
number=48,
oneof="ad_data",
message=ad_type_infos.SmartCampaignAdInfo,
)
app_pre_registration_ad = proto.Field(
proto.MESSAGE,
number=50,
oneof="ad_data",
message=ad_type_infos.AppPreRegistrationAdInfo,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| |
"""
Tests for VARMAX models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import re
import warnings
from statsmodels.datasets import webuse
from statsmodels.tsa.statespace import dynamic_factor
from .results import results_varmax, results_dynamic_factor
from numpy.testing import assert_equal, assert_almost_equal, assert_raises, assert_allclose
from nose.exc import SkipTest
from statsmodels.iolib.summary import forg
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
current_path = os.path.dirname(os.path.abspath(__file__))
output_path = 'results' + os.sep + 'results_dynamic_factor_stata.csv'
output_results = pd.read_csv(current_path + os.sep + output_path)
class CheckDynamicFactor(object):
def __init__(self, true, k_factors, factor_order, cov_type='oim',
included_vars=['dln_inv', 'dln_inc', 'dln_consump'],
demean=False, filter=True, **kwargs):
self.true = true
# 1960:Q1 - 1982:Q4
dta = pd.DataFrame(
results_varmax.lutkepohl_data, columns=['inv', 'inc', 'consump'],
index=pd.date_range('1960-01-01', '1982-10-01', freq='QS'))
dta['dln_inv'] = np.log(dta['inv']).diff()
dta['dln_inc'] = np.log(dta['inc']).diff()
dta['dln_consump'] = np.log(dta['consump']).diff()
endog = dta.ix['1960-04-01':'1978-10-01', included_vars]
if demean:
endog -= dta.ix[1:, included_vars].mean()
self.model = dynamic_factor.DynamicFactor(endog, k_factors=k_factors,
factor_order=factor_order,
**kwargs)
if filter:
self.results = self.model.smooth(true['params'], cov_type=cov_type)
def test_params(self):
# Smoke test to make sure the start_params are well-defined and
# lead to a well-defined model
self.model.filter(self.model.start_params)
# Similarly a smoke test for param_names
assert_equal(len(self.model.start_params), len(self.model.param_names))
# Finally make sure the transform and untransform do their job
actual = self.model.transform_params(self.model.untransform_params(self.model.start_params))
assert_allclose(actual, self.model.start_params)
# Also in the case of enforce stationarity = False
self.model.enforce_stationarity = False
actual = self.model.transform_params(self.model.untransform_params(self.model.start_params))
self.model.enforce_stationarity = True
assert_allclose(actual, self.model.start_params)
def test_results(self):
# Smoke test for creating the summary
self.results.summary()
# Test cofficient matrix creation (via a different, more direct, method)
if self.model.factor_order > 0:
coefficients = np.array(self.results.params[self.model._params_factor_transition]).reshape(self.model.k_factors, self.model.k_factors * self.model.factor_order)
coefficient_matrices = np.array([
coefficients[:self.model.k_factors, i*self.model.k_factors:(i+1)*self.model.k_factors]
for i in range(self.model.factor_order)
])
assert_equal(self.results.coefficient_matrices_var, coefficient_matrices)
else:
assert_equal(self.results.coefficient_matrices_var, None)
# Smoke test for plot_coefficients_of_determination
if have_matplotlib:
fig = self.results.plot_coefficients_of_determination();
plt.close(fig)
def test_no_enforce(self):
return
# Test that nothing goes wrong when we don't enforce stationarity
params = self.model.untransform_params(self.true['params'])
params[self.model._params_transition] = (
self.true['params'][self.model._params_transition])
self.model.enforce_stationarity = False
results = self.model.filter(params, transformed=False)
self.model.enforce_stationarity = True
assert_allclose(results.llf, self.results.llf, rtol=1e-5)
def test_mle(self):
with warnings.catch_warnings(record=True) as w:
results = self.model.fit(method='powell', maxiter=100, disp=False)
results = self.model.fit(results.params, maxiter=1000, disp=False)
results = self.model.fit(results.params, method='nm', maxiter=1000,
disp=False)
if not results.llf > self.results.llf:
assert_allclose(results.llf, self.results.llf, rtol=1e-5)
def test_loglike(self):
assert_allclose(self.results.llf, self.true['loglike'], rtol=1e-6)
def test_bse_oim(self):
raise SkipTest('Known failure: standard errors do not match.')
# assert_allclose(self.results.bse, self.true['bse_oim'], atol=1e-2)
def test_aic(self):
# We only get 3 digits from Stata
assert_allclose(self.results.aic, self.true['aic'], atol=3)
def test_bic(self):
# We only get 3 digits from Stata
assert_allclose(self.results.bic, self.true['bic'], atol=3)
def test_predict(self, **kwargs):
# Tests predict + forecast
assert_allclose(
self.results.predict(end='1982-10-01', **kwargs),
self.true['predict'],
atol=1e-6)
def test_dynamic_predict(self, **kwargs):
# Tests predict + dynamic predict + forecast
assert_allclose(
self.results.predict(end='1982-10-01', dynamic='1961-01-01', **kwargs),
self.true['dynamic_predict'],
atol=1e-6)
class TestDynamicFactor(CheckDynamicFactor):
"""
Test for a dynamic factor model with 1 AR(2) factor
"""
def __init__(self):
true = results_dynamic_factor.lutkepohl_dfm.copy()
true['predict'] = output_results.ix[1:, ['predict_dfm_1', 'predict_dfm_2', 'predict_dfm_3']]
true['dynamic_predict'] = output_results.ix[1:, ['dyn_predict_dfm_1', 'dyn_predict_dfm_2', 'dyn_predict_dfm_3']]
super(TestDynamicFactor, self).__init__(true, k_factors=1, factor_order=2)
class TestDynamicFactor2(CheckDynamicFactor):
"""
Test for a dynamic factor model with two VAR(1) factors
"""
def __init__(self):
true = results_dynamic_factor.lutkepohl_dfm2.copy()
true['predict'] = output_results.ix[1:, ['predict_dfm2_1', 'predict_dfm2_2', 'predict_dfm2_3']]
true['dynamic_predict'] = output_results.ix[1:, ['dyn_predict_dfm2_1', 'dyn_predict_dfm2_2', 'dyn_predict_dfm2_3']]
super(TestDynamicFactor2, self).__init__(true, k_factors=2, factor_order=1)
def test_mle(self):
# Stata's MLE on this model doesn't converge, so no reason to check
pass
def test_bse(self):
# Stata's MLE on this model doesn't converge, and four of their
# params don't even have bse (possibly they are still at starting
# values?), so no reason to check this
pass
def test_aic(self):
# Stata uses 9 df (i.e. 9 params) here instead of 13, because since the
# model didn't coverge, 4 of the parameters aren't fully estimated
# (possibly they are still at starting values?) so the AIC is off
pass
def test_bic(self):
# Stata uses 9 df (i.e. 9 params) here instead of 13, because since the
# model didn't coverge, 4 of the parameters aren't fully estimated
# (possibly they are still at starting values?) so the BIC is off
pass
def test_summary(self):
summary = self.results.summary()
tables = [str(table) for table in summary.tables]
params = self.true['params']
# Make sure we have the right number of tables
assert_equal(len(tables), 2 + self.model.k_endog + self.model.k_factors + 1)
# Check the model overview table
assert_equal(re.search(r'Model:.*DynamicFactor\(factors=2, order=1\)', tables[0]) is None, False)
# For each endogenous variable, check the output
for i in range(self.model.k_endog):
offset_loading = self.model.k_factors * i
offset_var = self.model.k_factors * self.model.k_endog
table = tables[i + 2]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert_equal(re.search('Results for equation %s' % name, table) is None, False)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 7)
# -> Check that we have the right coefficients
assert_equal(re.search('loading.f1 +' + forg(params[offset_loading + 0], prec=4), table) is None, False)
assert_equal(re.search('loading.f2 +' + forg(params[offset_loading + 1], prec=4), table) is None, False)
# For each factor, check the output
for i in range(self.model.k_factors):
offset = self.model.k_endog * (self.model.k_factors + 1) + i * self.model.k_factors
table = tables[self.model.k_endog + i + 2]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert_equal(re.search('Results for factor equation f%d' % (i+1), table) is None, False)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 7)
# -> Check that we have the right coefficients
assert_equal(re.search('L1.f1 +' + forg(params[offset + 0], prec=4), table) is None, False)
assert_equal(re.search('L1.f2 +' + forg(params[offset + 1], prec=4), table) is None, False)
# Check the Error covariance matrix output
table = tables[2 + self.model.k_endog + self.model.k_factors]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert_equal(re.search('Error covariance matrix', table) is None, False)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 8)
# -> Check that we have the right coefficients
offset = self.model.k_endog * self.model.k_factors
for i in range(self.model.k_endog):
assert_equal(re.search('sigma2.%s +%s' % (self.model.endog_names[i], forg(params[offset + i], prec=4)), table) is None, False)
class TestDynamicFactor_exog1(CheckDynamicFactor):
"""
Test for a dynamic factor model with 1 exogenous regressor: a constant
"""
def __init__(self):
true = results_dynamic_factor.lutkepohl_dfm_exog1.copy()
true['predict'] = output_results.ix[1:, ['predict_dfm_exog1_1', 'predict_dfm_exog1_2', 'predict_dfm_exog1_3']]
true['dynamic_predict'] = output_results.ix[1:, ['dyn_predict_dfm_exog1_1', 'dyn_predict_dfm_exog1_2', 'dyn_predict_dfm_exog1_3']]
exog = np.ones((75,1))
super(TestDynamicFactor_exog1, self).__init__(true, k_factors=1, factor_order=1, exog=exog)
def test_predict(self):
exog = np.ones((16, 1))
super(TestDynamicFactor_exog1, self).test_predict(exog=exog)
def test_dynamic_predict(self):
exog = np.ones((16, 1))
super(TestDynamicFactor_exog1, self).test_dynamic_predict(exog=exog)
class TestDynamicFactor_exog2(CheckDynamicFactor):
"""
Test for a dynamic factor model with 2 exogenous regressors: a constant
and a time-trend
"""
def __init__(self):
true = results_dynamic_factor.lutkepohl_dfm_exog2.copy()
true['predict'] = output_results.ix[1:, ['predict_dfm_exog2_1', 'predict_dfm_exog2_2', 'predict_dfm_exog2_3']]
true['dynamic_predict'] = output_results.ix[1:, ['dyn_predict_dfm_exog2_1', 'dyn_predict_dfm_exog2_2', 'dyn_predict_dfm_exog2_3']]
exog = np.c_[np.ones((75,1)), (np.arange(75) + 2)[:, np.newaxis]]
super(TestDynamicFactor_exog2, self).__init__(true, k_factors=1, factor_order=1, exog=exog)
def test_predict(self):
exog = np.c_[np.ones((16, 1)), (np.arange(75, 75+16) + 2)[:, np.newaxis]]
super(TestDynamicFactor_exog2, self).test_predict(exog=exog)
def test_dynamic_predict(self):
exog = np.c_[np.ones((16, 1)), (np.arange(75, 75+16) + 2)[:, np.newaxis]]
super(TestDynamicFactor_exog2, self).test_dynamic_predict(exog=exog)
def test_summary(self):
summary = self.results.summary()
tables = [str(table) for table in summary.tables]
params = self.true['params']
# Make sure we have the right number of tables
assert_equal(len(tables), 2 + self.model.k_endog + self.model.k_factors + 1)
# Check the model overview table
assert_equal(re.search(r'Model:.*DynamicFactor\(factors=1, order=1\)', tables[0]) is None, False)
assert_equal(re.search(r'.*2 regressors', tables[0]) is None, False)
# For each endogenous variable, check the output
for i in range(self.model.k_endog):
offset_loading = self.model.k_factors * i
offset_exog = self.model.k_factors * self.model.k_endog
table = tables[i + 2]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert_equal(re.search('Results for equation %s' % name, table) is None, False)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 8)
# -> Check that we have the right coefficients
assert_equal(re.search('loading.f1 +' + forg(params[offset_loading + 0], prec=4), table) is None, False)
assert_equal(re.search('beta.const +' + forg(params[offset_exog + i*2 + 0], prec=4), table) is None, False)
assert_equal(re.search('beta.x1 +' + forg(params[offset_exog + i*2 + 1], prec=4), table) is None, False)
# For each factor, check the output
for i in range(self.model.k_factors):
offset = self.model.k_endog * (self.model.k_factors + 3) + i * self.model.k_factors
table = tables[self.model.k_endog + i + 2]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert_equal(re.search('Results for factor equation f%d' % (i+1), table) is None, False)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 6)
# -> Check that we have the right coefficients
assert_equal(re.search('L1.f1 +' + forg(params[offset + 0], prec=4), table) is None, False)
# Check the Error covariance matrix output
table = tables[2 + self.model.k_endog + self.model.k_factors]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert_equal(re.search('Error covariance matrix', table) is None, False)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 8)
# -> Check that we have the right coefficients
offset = self.model.k_endog * (self.model.k_factors + 2)
for i in range(self.model.k_endog):
assert_equal(re.search('sigma2.%s +%s' % (self.model.endog_names[i], forg(params[offset + i], prec=4)), table) is None, False)
class TestDynamicFactor_general_errors(CheckDynamicFactor):
"""
Test for a dynamic factor model where errors are as general as possible,
meaning:
- Errors are vector autocorrelated, VAR(1)
- Innovations are correlated
"""
def __init__(self):
true = results_dynamic_factor.lutkepohl_dfm_gen.copy()
true['predict'] = output_results.ix[1:, ['predict_dfm_gen_1', 'predict_dfm_gen_2', 'predict_dfm_gen_3']]
true['dynamic_predict'] = output_results.ix[1:, ['dyn_predict_dfm_gen_1', 'dyn_predict_dfm_gen_2', 'dyn_predict_dfm_gen_3']]
super(TestDynamicFactor_general_errors, self).__init__(true, k_factors=1, factor_order=1, error_var=True, error_order=1, error_cov_type='unstructured')
def test_mle(self):
raise SkipTest("Known failure, no sequence of optimizers has been"
" found which can achieve the maximum.")
# The following gets us to llf=546.53, which is still not good enough
# llf = 300.842477412
# res = mod.fit(method='lbfgs', maxiter=10000)
# llf = 460.26576722
# res = mod.fit(res.params, method='nm', maxiter=10000, maxfev=10000)
# llf = 542.245718508
# res = mod.fit(res.params, method='lbfgs', maxiter=10000)
# llf = 544.035160955
# res = mod.fit(res.params, method='nm', maxiter=10000, maxfev=10000)
# llf = 557.442240083
# res = mod.fit(res.params, method='lbfgs', maxiter=10000)
# llf = 558.199513262
# res = mod.fit(res.params, method='nm', maxiter=10000, maxfev=10000)
# llf = 559.049076604
# res = mod.fit(res.params, method='nm', maxiter=10000, maxfev=10000)
# llf = 559.049076604
# ...
def test_summary(self):
summary = self.results.summary()
tables = [str(table) for table in summary.tables]
params = self.true['params']
# Make sure we have the right number of tables
assert_equal(len(tables), 2 + self.model.k_endog + self.model.k_factors + self.model.k_endog + 1)
# Check the model overview table
assert_equal(re.search(r'Model:.*DynamicFactor\(factors=1, order=1\)', tables[0]) is None, False)
assert_equal(re.search(r'.*VAR\(1\) errors', tables[0]) is None, False)
# For each endogenous variable, check the output
for i in range(self.model.k_endog):
offset_loading = self.model.k_factors * i
table = tables[i + 2]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert_equal(re.search('Results for equation %s' % name, table) is None, False)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 6)
# -> Check that we have the right coefficients
assert_equal(re.search('loading.f1 +' + forg(params[offset_loading + 0], prec=4), table) is None, False)
# For each factor, check the output
for i in range(self.model.k_factors):
offset = self.model.k_endog * self.model.k_factors + 6 + i * self.model.k_factors
table = tables[2 + self.model.k_endog + i]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert_equal(re.search('Results for factor equation f%d' % (i+1), table) is None, False)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 6)
# -> Check that we have the right coefficients
assert_equal(re.search('L1.f1 +' + forg(params[offset + 0], prec=4), table) is None, False)
# For each error equation, check the output
for i in range(self.model.k_endog):
offset = self.model.k_endog * (self.model.k_factors + i) + 6 + self.model.k_factors
table = tables[2 + self.model.k_endog + self.model.k_factors + i]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert_equal(re.search('Results for error equation e\(%s\)' % name, table) is None, False)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 8)
# -> Check that we have the right coefficients
for j in range(self.model.k_endog):
name = self.model.endog_names[j]
assert_equal(re.search('L1.e\(%s\) +%s' % (name, forg(params[offset + j], prec=4)), table) is None, False)
# Check the Error covariance matrix output
table = tables[2 + self.model.k_endog + self.model.k_factors + self.model.k_endog]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert_equal(re.search('Error covariance matrix', table) is None, False)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 11)
# -> Check that we have the right coefficients
offset = self.model.k_endog * self.model.k_factors
assert_equal(re.search('sqrt.var.dln_inv +' + forg(params[offset + 0], prec=4), table) is None, False)
assert_equal(re.search('sqrt.cov.dln_inv.dln_inc +' + forg(params[offset + 1], prec=4), table) is None, False)
assert_equal(re.search('sqrt.var.dln_inc +' + forg(params[offset + 2], prec=4), table) is None, False)
assert_equal(re.search('sqrt.cov.dln_inv.dln_consump +' + forg(params[offset + 3], prec=4), table) is None, False)
assert_equal(re.search('sqrt.cov.dln_inc.dln_consump +' + forg(params[offset + 4], prec=4), table) is None, False)
assert_equal(re.search('sqrt.var.dln_consump +' + forg(params[offset + 5], prec=4), table) is None, False)
class TestDynamicFactor_ar2_errors(CheckDynamicFactor):
"""
Test for a dynamic factor model where errors are as general as possible,
meaning:
- Errors are vector autocorrelated, VAR(1)
- Innovations are correlated
"""
def __init__(self):
true = results_dynamic_factor.lutkepohl_dfm_ar2.copy()
true['predict'] = output_results.ix[1:, ['predict_dfm_ar2_1', 'predict_dfm_ar2_2', 'predict_dfm_ar2_3']]
true['dynamic_predict'] = output_results.ix[1:, ['dyn_predict_dfm_ar2_1', 'dyn_predict_dfm_ar2_2', 'dyn_predict_dfm_ar2_3']]
super(TestDynamicFactor_ar2_errors, self).__init__(true, k_factors=1, factor_order=1, error_order=2)
def test_mle(self):
with warnings.catch_warnings(record=True) as w:
mod = self.model
res1 = mod.fit(method='lbfgs', maxiter=10000, disp=-1)
res = mod.fit(res1.params, method='nm', maxiter=10000, maxfev=10000, disp=False)
assert_allclose(res.llf, self.results.llf, atol=1e-3)
class TestDynamicFactor_scalar_error(CheckDynamicFactor):
"""
Test for a dynamic factor model where innovations are uncorrelated and
are forced to have the same variance.
"""
def __init__(self):
true = results_dynamic_factor.lutkepohl_dfm_scalar.copy()
true['predict'] = output_results.ix[1:, ['predict_dfm_scalar_1', 'predict_dfm_scalar_2', 'predict_dfm_scalar_3']]
true['dynamic_predict'] = output_results.ix[1:, ['dyn_predict_dfm_scalar_1', 'dyn_predict_dfm_scalar_2', 'dyn_predict_dfm_scalar_3']]
exog = np.ones((75,1))
super(TestDynamicFactor_scalar_error, self).__init__(true, k_factors=1, factor_order=1, exog=exog, error_cov_type='scalar')
def test_predict(self):
exog = np.ones((16, 1))
super(TestDynamicFactor_scalar_error, self).test_predict(exog=exog)
def test_dynamic_predict(self):
exog = np.ones((16, 1))
super(TestDynamicFactor_scalar_error, self).test_dynamic_predict(exog=exog)
class TestStaticFactor(CheckDynamicFactor):
"""
Test for a static factor model (i.e. factors are not autocorrelated).
"""
def __init__(self):
true = results_dynamic_factor.lutkepohl_sfm.copy()
true['predict'] = output_results.ix[1:, ['predict_sfm_1', 'predict_sfm_2', 'predict_sfm_3']]
true['dynamic_predict'] = output_results.ix[1:, ['dyn_predict_sfm_1', 'dyn_predict_sfm_2', 'dyn_predict_sfm_3']]
super(TestStaticFactor, self).__init__(true, k_factors=1, factor_order=0)
def test_bic(self):
# Stata uses 5 df (i.e. 5 params) here instead of 6, because one param
# is basically zero.
pass
class TestSUR(CheckDynamicFactor):
"""
Test for a seemingly unrelated regression model (i.e. no factors) with
errors cross-sectionally, but not auto-, correlated
"""
def __init__(self):
true = results_dynamic_factor.lutkepohl_sur.copy()
true['predict'] = output_results.ix[1:, ['predict_sur_1', 'predict_sur_2', 'predict_sur_3']]
true['dynamic_predict'] = output_results.ix[1:, ['dyn_predict_sur_1', 'dyn_predict_sur_2', 'dyn_predict_sur_3']]
exog = np.c_[np.ones((75,1)), (np.arange(75) + 2)[:, np.newaxis]]
super(TestSUR, self).__init__(true, k_factors=0, factor_order=0, exog=exog, error_cov_type='unstructured')
def test_predict(self):
exog = np.c_[np.ones((16, 1)), (np.arange(75, 75+16) + 2)[:, np.newaxis]]
super(TestSUR, self).test_predict(exog=exog)
def test_dynamic_predict(self):
exog = np.c_[np.ones((16, 1)), (np.arange(75, 75+16) + 2)[:, np.newaxis]]
super(TestSUR, self).test_dynamic_predict(exog=exog)
class TestSUR_autocorrelated_errors(CheckDynamicFactor):
"""
Test for a seemingly unrelated regression model (i.e. no factors) where
the errors are vector autocorrelated, but innovations are uncorrelated.
"""
def __init__(self):
true = results_dynamic_factor.lutkepohl_sur_auto.copy()
true['predict'] = output_results.ix[1:, ['predict_sur_auto_1', 'predict_sur_auto_2']]
true['dynamic_predict'] = output_results.ix[1:, ['dyn_predict_sur_auto_1', 'dyn_predict_sur_auto_2']]
exog = np.c_[np.ones((75,1)), (np.arange(75) + 2)[:, np.newaxis]]
super(TestSUR_autocorrelated_errors, self).__init__(true, k_factors=0, factor_order=0, exog=exog, error_order=1, error_var=True, error_cov_type='diagonal', included_vars=['dln_inv', 'dln_inc'])
def test_predict(self):
exog = np.c_[np.ones((16, 1)), (np.arange(75, 75+16) + 2)[:, np.newaxis]]
super(TestSUR_autocorrelated_errors, self).test_predict(exog=exog)
def test_dynamic_predict(self):
exog = np.c_[np.ones((16, 1)), (np.arange(75, 75+16) + 2)[:, np.newaxis]]
super(TestSUR_autocorrelated_errors, self).test_dynamic_predict(exog=exog)
def test_misspecification():
# Tests for model specification and misspecification exceptions
endog = np.arange(20).reshape(10,2)
# Too few endog
assert_raises(ValueError, dynamic_factor.DynamicFactor, endog[:,0], k_factors=0, factor_order=0)
# Too many factors
assert_raises(ValueError, dynamic_factor.DynamicFactor, endog, k_factors=2, factor_order=1)
# Bad error_cov_type specification
assert_raises(ValueError, dynamic_factor.DynamicFactor, endog, k_factors=1, factor_order=1, order=(1,0), error_cov_type='')
def test_miscellaneous():
# Initialization with 1-dimensional exog array
exog = np.arange(75)
mod = CheckDynamicFactor(true=None, k_factors=1, factor_order=1, exog=exog, filter=False)
exog = pd.Series(np.arange(75), index=pd.date_range(start='1960-04-01', end='1978-10-01', freq='QS'))
mod = CheckDynamicFactor(true=None, k_factors=1, factor_order=1, exog=exog, filter=False)
| |
#!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
__doc__='testscript for reportlab.pdfgen'
__version__=''' $Id$ '''
#tests and documents new low-level canvas
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import os
import unittest
from reportlab.pdfgen import canvas # gmcm 2000/10/13, pdfgen now a package
from reportlab.lib.units import inch, cm
from reportlab.lib import colors
from reportlab.lib.utils import haveImages, fileName2FSEnc
#################################################################
#
# first some drawing utilities
#
#
################################################################
BASEFONT = ('Times-Roman', 10)
def framePageForm(c):
c.beginForm("frame")
c.saveState()
# forms can't do non-constant operations
#canvas.setFont('Times-BoldItalic',20)
#canvas.drawString(inch, 10.5 * inch, title)
#c.setFont('Times-Roman',10)
#c.drawCentredString(4.135 * inch, 0.75 * inch,
# 'Page %d' % c.getPageNumber())
#draw a border
c.setFillColor(colors.ReportLabBlue)
c.rect(0.3*inch, inch, 0.5*inch, 10*inch, fill=1)
from reportlab.lib import corp
c.translate(0.8*inch, 9.6*inch)
c.rotate(90)
logo = corp.ReportLabLogo(width=1.3*inch, height=0.5*inch, powered_by=1)
c.setFillColorRGB(1,1,1)
c.setStrokeColorRGB(1,1,1)
logo.draw(c)
#c.setStrokeColorRGB(1,0,0)
#c.setLineWidth(5)
#c.line(0.8 * inch, inch, 0.8 * inch, 10.75 * inch)
#reset carefully afterwards
#canvas.setLineWidth(1)
#canvas.setStrokeColorRGB(0,0,0)\
c.restoreState()
c.endForm()
def framePage(canvas, title):
global closeit
titlelist.append(title)
#canvas._inPage0() # do we need this at all? would be good to eliminate it
canvas.saveState()
canvas.setFont('Times-BoldItalic',20)
canvas.drawString(inch, 10.5 * inch, title)
canvas.bookmarkHorizontalAbsolute(title, 10.8*inch)
#newsection(title)
canvas.addOutlineEntry(title+" section", title, level=0, closed=closeit)
closeit = not closeit # close every other one
canvas.setFont('Times-Roman',10)
canvas.drawCentredString(4.135 * inch, 0.75 * inch,
'Page %d' % canvas.getPageNumber())
canvas.restoreState()
canvas.doForm("frame")
def makesubsection(canvas, title, horizontal):
canvas.bookmarkHorizontalAbsolute(title, horizontal)
#newsubsection(title)
canvas.addOutlineEntry(title+" subsection", title, level=1)
# outline helpers
#outlinenametree = []
#def newsection(name):
# outlinenametree.append(name)
#def newsubsection(name):
# thissection = outlinenametree[-1]
# if not isinstance(thissection,tuple):
# subsectionlist = []
# thissection = outlinenametree[-1] = (thissection, subsectionlist)
# else:
# (sectionname, subsectionlist) = thissection
# subsectionlist.append(name)
class DocBlock:
"""A DocBlock has a chunk of commentary and a chunk of code.
It prints the code and commentary, then executes the code,
which is presumed to draw in a region reserved for it.
"""
def __init__(self):
self.comment1 = "A doc block"
self.code = "canvas.setTextOrigin(cm, cm)\ncanvas.textOut('Hello World')"
self.comment2 = "That was a doc block"
self.drawHeight = 0
def _getHeight(self):
"splits into lines"
self.comment1lines = self.comment1.split('\n')
self.codelines = self.code.split('\n')
self.comment2lines = self.comment2.split('\n')
textheight = (len(self.comment1lines) +
len(self.code) +
len(self.comment2lines) +
18)
return max(textheight, self.drawHeight)
def draw(self, canvas, x, y):
#specifies top left corner
canvas.saveState()
height = self._getHeight()
canvas.rect(x, y-height, 6*inch, height)
#first draw the text
canvas.setTextOrigin(x + 3 * inch, y - 12)
canvas.setFont('Times-Roman',10)
canvas.textLines(self.comment1)
drawCode(canvas, self.code)
canvas.textLines(self.comment2)
#now a box for the drawing, slightly within rect
canvas.rect(x + 9, y - height + 9, 198, height - 18)
#boundary:
self.namespace = {'canvas':canvas,'cm': cm,'inch':inch}
canvas.translate(x+9, y - height + 9)
codeObj = compile(self.code, '<sample>','exec')
exec(codeObj, self.namespace)
canvas.restoreState()
def drawAxes(canvas, label):
"""draws a couple of little rulers showing the coords -
uses points as units so you get an imperial ruler
one inch on each side"""
#y axis
canvas.line(0,0,0,72)
for y in range(9):
tenths = (y+1) * 7.2
canvas.line(-6,tenths,0,tenths)
canvas.line(-6, 66, 0, 72) #arrow...
canvas.line(6, 66, 0, 72) #arrow...
canvas.line(0,0,72,0)
for x in range(9):
tenths = (x+1) * 7.2
canvas.line(tenths,-6,tenths, 0)
canvas.line(66, -6, 72, 0) #arrow...
canvas.line(66, +6, 72, 0) #arrow...
canvas.drawString(18, 30, label)
def drawCrossHairs(canvas, x, y):
"""just a marker for checking text metrics - blue for fun"""
canvas.saveState()
canvas.setStrokeColorRGB(0,1,0)
canvas.line(x-6,y,x+6,y)
canvas.line(x,y-6,x,y+6)
canvas.restoreState()
def drawCode(canvas, code):
"""Draws a block of text at current point, indented and in Courier"""
canvas.addLiteral('36 0 Td')
canvas.setFillColor(colors.blue)
canvas.setFont('Courier',10)
t = canvas.beginText()
t.textLines(code)
c.drawText(t)
canvas.setFillColor(colors.black)
canvas.addLiteral('-36 0 Td')
canvas.setFont('Times-Roman',10)
def makeDocument(filename, pageCallBack=None):
#the extra arg is a hack added later, so other
#tests can get hold of the canvas just before it is
#saved
global titlelist, closeit
titlelist = []
closeit = 0
c = canvas.Canvas(filename)
c.setPageCompression(0)
c.setPageCallBack(pageCallBack)
framePageForm(c) # define the frame form
c.showOutline()
framePage(c, 'PDFgen graphics API test script')
makesubsection(c, "PDFgen", 10*inch)
#quickie encoding test: when canvas encoding not set,
#the following should do (tm), (r) and (c)
msg_uni = 'copyright\u00A9 trademark\u2122 registered\u00AE scissors\u2702: ReportLab in unicode!'
msg_utf8 = msg_uni.replace('unicode','utf8').encode('utf8')
c.drawString(100, 100, msg_uni)
c.drawString(100, 80, msg_utf8)
t = c.beginText(inch, 10*inch)
t.setFont('Times-Roman', 10)
drawCrossHairs(c, t.getX(),t.getY())
t.textLines("""
The ReportLab library permits you to create PDF documents directly from
your Python code. The "pdfgen" subpackage is the lowest level exposed
to the user and lets you directly position test and graphics on the
page, with access to almost the full range of PDF features.
The API is intended to closely mirror the PDF / Postscript imaging
model. There is an almost one to one correspondence between commands
and PDF operators. However, where PDF provides several ways to do a job,
we have generally only picked one.
The test script attempts to use all of the methods exposed by the Canvas
class, defined in reportlab/pdfgen/canvas.py
First, let's look at text output. There are some basic commands
to draw strings:
- canvas.setFont(fontname, fontsize [, leading])
- canvas.drawString(x, y, text)
- canvas.drawRightString(x, y, text)
- canvas.drawCentredString(x, y, text)
The coordinates are in points starting at the bottom left corner of the
page. When setting a font, the leading (i.e. inter-line spacing)
defaults to 1.2 * fontsize if the fontsize is not provided.
For more sophisticated operations, you can create a Text Object, defined
in reportlab/pdfgen/testobject.py. Text objects produce tighter PDF, run
faster and have many methods for precise control of spacing and position.
Basic usage goes as follows:
- tx = canvas.beginText(x, y)
- tx.textOut('Hello') # this moves the cursor to the right
- tx.textLine('Hello again') # prints a line and moves down
- y = tx.getY() # getX, getY and getCursor track position
- canvas.drawText(tx) # all gets drawn at the end
The green crosshairs below test whether the text cursor is working
properly. They should appear at the bottom left of each relevant
substring.
""")
t.setFillColorRGB(1,0,0)
t.setTextOrigin(inch, 4*inch)
drawCrossHairs(c, t.getX(),t.getY())
t.textOut('textOut moves across:')
drawCrossHairs(c, t.getX(),t.getY())
t.textOut('textOut moves across:')
drawCrossHairs(c, t.getX(),t.getY())
t.textOut('textOut moves across:')
drawCrossHairs(c, t.getX(),t.getY())
t.textLine('')
drawCrossHairs(c, t.getX(),t.getY())
t.textLine('textLine moves down')
drawCrossHairs(c, t.getX(),t.getY())
t.textLine('textLine moves down')
drawCrossHairs(c, t.getX(),t.getY())
t.textLine('textLine moves down')
drawCrossHairs(c, t.getX(),t.getY())
t.setTextOrigin(4*inch,3.25*inch)
drawCrossHairs(c, t.getX(),t.getY())
t.textLines('This is a multi-line\nstring with embedded newlines\ndrawn with textLines().\n')
drawCrossHairs(c, t.getX(),t.getY())
t.textLines(['This is a list of strings',
'drawn with textLines().'])
c.drawText(t)
t = c.beginText(2*inch,2*inch)
t.setFont('Times-Roman',10)
drawCrossHairs(c, t.getX(),t.getY())
t.textOut('Small text.')
drawCrossHairs(c, t.getX(),t.getY())
t.setFont('Courier',14)
t.textOut('Bigger fixed width text.')
drawCrossHairs(c, t.getX(),t.getY())
t.setFont('Times-Roman',10)
t.textOut('Small text again.')
drawCrossHairs(c, t.getX(),t.getY())
c.drawText(t)
#try out the decimal tabs high on the right.
c.setStrokeColor(colors.silver)
c.line(7*inch, 6*inch, 7*inch, 4.5*inch)
c.setFillColor(colors.black)
c.setFont('Times-Roman',10)
c.drawString(6*inch, 6.2*inch, "Testing decimal alignment")
c.drawString(6*inch, 6.05*inch, "- aim for silver line")
c.line(7*inch, 6*inch, 7*inch, 4.5*inch)
c.drawAlignedString(7*inch, 5.8*inch, "1,234,567.89")
c.drawAlignedString(7*inch, 5.6*inch, "3,456.789")
c.drawAlignedString(7*inch, 5.4*inch, "123")
c.setFillColor(colors.red)
c.drawAlignedString(7*inch, 5.2*inch, "(7,192,302.30)")
#mark the cursor where it stopped
c.showPage()
##############################################################
#
# page 2 - line styles
#
###############################################################
#page 2 - lines and styles
framePage(c, 'Line Drawing Styles')
# three line ends, lines drawn the hard way
#firt make some vertical end markers
c.setDash(4,4)
c.setLineWidth(0)
c.line(inch,9.2*inch,inch, 7.8*inch)
c.line(3*inch,9.2*inch,3*inch, 7.8*inch)
c.setDash() #clears it
c.setLineWidth(5)
c.setLineCap(0)
p = c.beginPath()
p.moveTo(inch, 9*inch)
p.lineTo(3*inch, 9*inch)
c.drawPath(p)
c.drawString(4*inch, 9*inch, 'the default - butt caps project half a width')
makesubsection(c, "caps and joins", 8.5*inch)
c.setLineCap(1)
p = c.beginPath()
p.moveTo(inch, 8.5*inch)
p.lineTo(3*inch, 8.5*inch)
c.drawPath(p)
c.drawString(4*inch, 8.5*inch, 'round caps')
c.setLineCap(2)
p = c.beginPath()
p.moveTo(inch, 8*inch)
p.lineTo(3*inch, 8*inch)
c.drawPath(p)
c.drawString(4*inch, 8*inch, 'square caps')
c.setLineCap(0)
# three line joins
c.setLineJoin(0)
p = c.beginPath()
p.moveTo(inch, 7*inch)
p.lineTo(2*inch, 7*inch)
p.lineTo(inch, 6.7*inch)
c.drawPath(p)
c.drawString(4*inch, 6.8*inch, 'Default - mitered join')
c.setLineJoin(1)
p = c.beginPath()
p.moveTo(inch, 6.5*inch)
p.lineTo(2*inch, 6.5*inch)
p.lineTo(inch, 6.2*inch)
c.drawPath(p)
c.drawString(4*inch, 6.3*inch, 'round join')
c.setLineJoin(2)
p = c.beginPath()
p.moveTo(inch, 6*inch)
p.lineTo(2*inch, 6*inch)
p.lineTo(inch, 5.7*inch)
c.drawPath(p)
c.drawString(4*inch, 5.8*inch, 'bevel join')
c.setDash(6,6)
p = c.beginPath()
p.moveTo(inch, 5*inch)
p.lineTo(3*inch, 5*inch)
c.drawPath(p)
c.drawString(4*inch, 5*inch, 'dash 6 points on, 6 off- setDash(6,6) setLineCap(0)')
makesubsection(c, "dash patterns", 5*inch)
c.setLineCap(1)
p = c.beginPath()
p.moveTo(inch, 4.5*inch)
p.lineTo(3*inch, 4.5*inch)
c.drawPath(p)
c.drawString(4*inch, 4.5*inch, 'dash 6 points on, 6 off- setDash(6,6) setLineCap(1)')
c.setLineCap(0)
c.setDash([1,2,3,4,5,6],0)
p = c.beginPath()
p.moveTo(inch, 4.0*inch)
p.lineTo(3*inch, 4.0*inch)
c.drawPath(p)
c.drawString(4*inch, 4*inch, 'dash growing - setDash([1,2,3,4,5,6],0) setLineCap(0)')
c.setLineCap(1)
c.setLineJoin(1)
c.setDash(32,12)
p = c.beginPath()
p.moveTo(inch, 3.0*inch)
p.lineTo(2.5*inch, 3.0*inch)
p.lineTo(inch, 2*inch)
c.drawPath(p)
c.drawString(4*inch, 3*inch, 'dash pattern, join and cap style interacting - ')
c.drawString(4*inch, 3*inch - 12, 'round join & miter results in sausages')
c.textAnnotation('Annotation',Rect=(4*inch, 3*inch-72, inch,inch-12))
c.showPage()
##############################################################
#
# higher level shapes
#
###############################################################
framePage(c, 'Shape Drawing Routines')
t = c.beginText(inch, 10*inch)
t.textLines("""
Rather than making your own paths, you have access to a range of shape routines.
These are built in pdfgen out of lines and bezier curves, but use the most compact
set of operators possible. We can add any new ones that are of general use at no
cost to performance.""")
t.textLine()
#line demo
makesubsection(c, "lines", 9*inch)
c.line(inch, 9*inch, 3*inch, 9*inch)
t.setTextOrigin(4*inch, 9*inch)
t.textLine('canvas.line(x1, y1, x2, y2)')
#bezier demo - show control points
makesubsection(c, "bezier curves", 8.5*inch)
(x1, y1, x2, y2, x3, y3, x4, y4) = (
inch, 7.8*inch,
1.2*inch, 8.8 * inch,
3*inch, 8.8 * inch,
3.5*inch, 8.05 * inch
)
c.bezier(x1, y1, x2, y2, x3, y3, x4, y4)
c.setDash(3,3)
c.line(x1,y1,x2,y2)
c.line(x3,y3,x4,y4)
c.setDash()
t.setTextOrigin(4*inch, 8.3 * inch)
t.textLine('canvas.bezier(x1, y1, x2, y2, x3, y3, x4, y4)')
#rectangle
makesubsection(c, "rectangles", 8*inch)
c.rect(inch, 7 * inch, 2 * inch, 0.75 * inch)
t.setTextOrigin(4*inch, 7.375 * inch)
t.textLine('canvas.rect(x, y, width, height) - x,y is lower left')
c.roundRect(inch,6.25*inch,2*inch,0.6*inch,0.1*inch)
t.setTextOrigin(4*inch, 6.55*inch)
t.textLine('canvas.roundRect(x,y,width,height,radius)')
makesubsection(c, "arcs", 8*inch)
c.arc(inch,5*inch,3*inch,6*inch,0,90)
t.setTextOrigin(4*inch, 5.5*inch)
t.textLine('canvas.arc(x1, y1, x2, y2, startDeg, extentDeg)')
t.textLine('Note that this is an elliptical arc, not just circular!')
#wedge
makesubsection(c, "wedges", 5*inch)
c.wedge(inch, 4.5*inch, 3*inch, 3.5*inch, 0, 315)
t.setTextOrigin(4*inch, 4*inch)
t.textLine('canvas.wedge(x1, y1, x2, y2, startDeg, extentDeg)')
t.textLine('Note that this is an elliptical arc, not just circular!')
#wedge the other way
c.wedge(inch, 3.75*inch, 3*inch, 2.75*inch, 0, -45)
t.setTextOrigin(4*inch, 3*inch)
t.textLine('Use a negative extent to go clockwise')
#circle
makesubsection(c, "circles", 3.5*inch)
c.circle(1.5*inch, 2*inch, 0.5 * inch)
c.circle(3*inch, 2*inch, 0.5 * inch)
t.setTextOrigin(4*inch, 2 * inch)
t.textLine('canvas.circle(x, y, radius)')
c.drawText(t)
c.showPage()
##############################################################
#
# Page 4 - fonts
#
###############################################################
framePage(c, "Font Control")
c.drawString(inch, 10*inch, 'Listing available fonts...')
y = 9.5*inch
for fontname in c.getAvailableFonts():
c.setFont(fontname,24)
c.drawString(inch, y, 'This should be %s' % fontname)
y = y - 28
makesubsection(c, "fonts and colors", 4*inch)
c.setFont('Times-Roman', 12)
t = c.beginText(inch, 4*inch)
t.textLines("""Now we'll look at the color functions and how they interact
with the text. In theory, a word is just a shape; so setFillColorRGB()
determines most of what you see. If you specify other text rendering
modes, an outline color could be defined by setStrokeColorRGB() too""")
c.drawText(t)
t = c.beginText(inch, 2.75 * inch)
t.setFont('Times-Bold',36)
t.setFillColor(colors.green) #green
t.textLine('Green fill, no stroke')
#t.setStrokeColorRGB(1,0,0) #ou can do this in a text object, or the canvas.
t.setStrokeColor(colors.red) #ou can do this in a text object, or the canvas.
t.setTextRenderMode(2) # fill and stroke
t.textLine('Green fill, red stroke - yuk!')
t.setTextRenderMode(0) # back to default - fill only
t.setFillColorRGB(0,0,0) #back to default
t.setStrokeColorRGB(0,0,0) #ditto
c.drawText(t)
c.showPage()
#########################################################################
#
# Page 5 - coord transforms
#
#########################################################################
framePage(c, "Coordinate Transforms")
c.setFont('Times-Roman', 12)
t = c.beginText(inch, 10 * inch)
t.textLines("""This shows coordinate transformations. We draw a set of axes,
moving down the page and transforming space before each one.
You can use saveState() and restoreState() to unroll transformations.
Note that functions which track the text cursor give the cursor position
in the current coordinate system; so if you set up a 6 inch high frame
2 inches down the page to draw text in, and move the origin to its top
left, you should stop writing text after six inches and not eight.""")
c.drawText(t)
drawAxes(c, "0. at origin")
c.addLiteral('%about to translate space')
c.translate(2*inch, 7 * inch)
drawAxes(c, '1. translate near top of page')
c.saveState()
c.translate(1*inch, -2 * inch)
drawAxes(c, '2. down 2 inches, across 1')
c.restoreState()
c.saveState()
c.translate(0, -3 * inch)
c.scale(2, -1)
drawAxes(c, '3. down 3 from top, scale (2, -1)')
c.restoreState()
c.saveState()
c.translate(0, -5 * inch)
c.rotate(-30)
drawAxes(c, "4. down 5, rotate 30' anticlockwise")
c.restoreState()
c.saveState()
c.translate(3 * inch, -5 * inch)
c.skew(0,30)
drawAxes(c, "5. down 5, 3 across, skew beta 30")
c.restoreState()
c.showPage()
#########################################################################
#
# Page 6 - clipping
#
#########################################################################
framePage(c, "Clipping")
c.setFont('Times-Roman', 12)
t = c.beginText(inch, 10 * inch)
t.textLines("""This shows clipping at work. We draw a chequerboard of rectangles
into a path object, and clip it. This then forms a mask which limits the region of
the page on which one can draw. This paragraph was drawn after setting the clipping
path, and so you should only see part of the text.""")
c.drawText(t)
c.saveState()
#c.setFillColorRGB(0,0,1)
p = c.beginPath()
#make a chesboard effect, 1 cm squares
for i in range(14):
x0 = (3 + i) * cm
for j in range(7):
y0 = (16 + j) * cm
p.rect(x0, y0, 0.85*cm, 0.85*cm)
c.addLiteral('%Begin clip path')
c.clipPath(p)
c.addLiteral('%End clip path')
t = c.beginText(3 * cm, 22.5 * cm)
t.textLines("""This shows clipping at work. We draw a chequerboard of rectangles
into a path object, and clip it. This then forms a mask which limits the region of
the page on which one can draw. This paragraph was drawn after setting the clipping
path, and so you should only see part of the text.
This shows clipping at work. We draw a chequerboard of rectangles
into a path object, and clip it. This then forms a mask which limits the region of
the page on which one can draw. This paragraph was drawn after setting the clipping
path, and so you should only see part of the text.
This shows clipping at work. We draw a chequerboard of rectangles
into a path object, and clip it. This then forms a mask which limits the region of
the page on which one can draw. This paragraph was drawn after setting the clipping
path, and so you should only see part of the text.""")
c.drawText(t)
c.restoreState()
t = c.beginText(inch, 5 * inch)
t.textLines("""You can also use text as an outline for clipping with the text render mode.
The API is not particularly clean on this and one has to follow the right sequence;
this can be optimized shortly.""")
c.drawText(t)
#first the outline
c.saveState()
t = c.beginText(inch, 3.0 * inch)
t.setFont('Helvetica-BoldOblique',108)
t.setTextRenderMode(5) #stroke and add to path
t.textLine('Python!')
t.setTextRenderMode(0)
c.drawText(t) #this will make a clipping mask
#now some small stuff which wil be drawn into the current clip mask
t = c.beginText(inch, 4 * inch)
t.setFont('Times-Roman',6)
t.textLines((('spam ' * 40) + '\n') * 15)
c.drawText(t)
#now reset canvas to get rid of the clipping mask
c.restoreState()
c.showPage()
#########################################################################
#
# Page 7 - images
#
#########################################################################
framePage(c, "Images")
c.setFont('Times-Roman', 12)
t = c.beginText(inch, 10 * inch)
if not haveImages:
c.drawString(inch, 11*inch,
"Python or Java Imaging Library not found! Below you see rectangles instead of images.")
t.textLines("""PDFgen uses the Python Imaging Library (or, under Jython, java.awt.image and javax.imageio)
to process a very wide variety of image formats.
This page shows image capabilities. If I've done things right, the bitmap should have
its bottom left corner aligned with the crosshairs.
There are two methods for drawing images. The recommended use is to call drawImage.
This produces the smallest PDFs and the fastest generation times as each image's binary data is
only embedded once in the file. Also you can use advanced features like transparency masks.
You can also use drawInlineImage, which puts images in the page stream directly.
This is slightly faster for Acrobat to render or for very small images, but wastes
space if you use images more than once.""")
c.drawText(t)
if haveImages:
from reportlab.lib.testutils import testsFolder
gif = os.path.join(testsFolder,'pythonpowered.gif')
c.drawInlineImage(gif,2*inch, 7*inch)
c.drawInlineImage(os.path.join(testsFolder,'pythonpowered-gs.gif'),4*inch, 7.5*inch)
tif = os.path.join(testsFolder,'test-cross.tiff') #example of a mode '1' image
c.drawInlineImage(tif,1*inch, 1*inch)
from reportlab.lib.utils import Image as PilImage
if PilImage:
c.drawInlineImage(PilImage.open(tif),1.25*inch, 1*inch)
else:
c.rect(2*inch, 7*inch, 110, 44)
c.rect(4*inch, 7*inch, 110, 44)
c.line(1.5*inch, 7*inch, 4*inch, 7*inch)
c.line(2*inch, 6.5*inch, 2*inch, 8*inch)
c.drawString(4.5 * inch, 7.25*inch, 'inline image drawn at natural size')
if haveImages:
c.drawInlineImage(gif,2*inch, 5*inch, inch, inch)
else:
c.rect(2*inch, 5*inch, inch, inch)
c.line(1.5*inch, 5*inch, 4*inch, 5*inch)
c.line(2*inch, 4.5*inch, 2*inch, 6*inch)
c.drawString(4.5 * inch, 5.25*inch, 'inline image distorted to fit box')
c.drawString(1.5 * inch, 4*inch, 'Image XObjects can be defined once in the file and drawn many times.')
c.drawString(1.5 * inch, 3.75*inch, 'This results in faster generation and much smaller files.')
for i in range(5):
if haveImages:
(w, h) = c.drawImage(gif, (1.5 + i)*inch, 3*inch)
else:
(w, h) = (144, 10)
c.rect((1.5 + i)*inch, 3*inch, 110, 44)
myMask = [254,255,222,223,0,1]
c.drawString(1.5 * inch, 2.5*inch, "The optional 'mask' parameter lets you define transparent colors. We used a color picker")
c.drawString(1.5 * inch, 2.3*inch, "to determine that the yellow in the image above is RGB=(225,223,0). We then define a mask")
c.drawString(1.5 * inch, 2.1*inch, "spanning these RGB values: %s. The background vanishes!!" % myMask)
c.drawString(2.5*inch, 1.2*inch, 'This would normally be obscured')
if haveImages:
c.drawImage(gif, 1*inch, 1.2*inch, w, h, mask=myMask)
c.drawImage(gif, 3*inch, 1.2*inch, w, h, mask='auto')
c.drawImage(os.path.join(testsFolder,'test-rgba.png'),5*inch,1.2*inch,width=10,height=10,mask='auto')
c.drawImage(os.path.join(testsFolder,'test-indexed.png'),5.5*inch,1.2*inch,width=10,height=10,mask='auto')
else:
c.rect(1*inch, 1.2*inch, w, h)
c.rect(3*inch, 1.2*inch, w, h)
c.showPage()
c.drawString(1*inch, 10.25*inch, "For rgba type images we can use the alpha channel if we set mask='auto'.")
c.drawString(1*inch, 10.25*inch-14.4, "The first image is solid red with variable alpha.")
c.drawString(1*inch, 10.25*inch-2*14.4, "The second image is white alpha=0% to purple=100%")
for i in range(8):
c.drawString(1*inch,8*inch+i*14.4,"mask=None Line %d"%i)
c.drawString(3*inch,8*inch+i*14.4,"mask='auto' Line %d"%i)
c.drawString(1*inch,6*inch+i*14.4,"mask=None Line %d"%i)
c.drawString(3*inch,6*inch+i*14.4,"mask='auto' Line %d"%i)
w = 100
h = 75
c.rect(1*inch, 8+14.4*inch, w, h)
c.rect(3*inch, 8+14.4*inch, w, h)
c.rect(1*inch, 6+14.4*inch, w, h)
c.rect(3*inch, 6+14.4*inch, w, h)
if haveImages:
from reportlab.lib.testutils import testsFolder
png = os.path.join(testsFolder,'solid_red_alpha.png')
c.drawImage(png, 1*inch, 8*inch+14.4, w, h, mask=None)
c.drawImage(png, 3*inch, 8*inch+14.4, w, h, mask='auto')
png = os.path.join(testsFolder,'alpha_test.png')
c.drawImage(png, 1*inch, 6*inch+14.4, w, h, mask=None)
c.drawImage(png, 3*inch, 6*inch+14.4, w, h, mask='auto')
c.showPage()
if haveImages:
import shutil
c.drawString(1*inch, 10.25*inch, 'This jpeg is actually a gif')
jpg = outputfile('_i_am_actually_a_gif.jpg')
shutil.copyfile(gif,jpg)
c.drawImage(jpg, 1*inch, 9.25*inch, w, h, mask='auto')
tjpg = os.path.join(os.path.dirname(os.path.dirname(gif)),'docs','images','lj8100.jpg')
if os.path.isfile(tjpg):
c.drawString(4*inch, 10.25*inch, 'This gif is actually a jpeg')
tgif = outputfile(os.path.basename('_i_am_actually_a_jpeg.gif'))
shutil.copyfile(tjpg,tgif)
c.drawImage(tgif, 4*inch, 9.25*inch, w, h, mask='auto')
c.drawString(inch, 9.0*inch, 'Image positioning tests with preserveAspectRatio')
#preserveAspectRatio test
c.drawString(inch, 8.8*inch, 'Both of these should appear within the boxes, vertically centered')
x, y, w, h = inch, 6.75* inch, 2*inch, 2*inch
c.rect(x, y, w, h)
(w2, h2) = c.drawImage(gif, #anchor southwest, drawImage
x, y, width=w, height=h,
preserveAspectRatio=True,
anchor='c'
)
#now test drawInlineImage across the page
x = 5 * inch
c.rect(x, y, w, h)
(w2, h2) = c.drawInlineImage(gif, #anchor southwest, drawInlineImage
x, y, width=w, height=h,
preserveAspectRatio=True,
anchor='c'
)
c.drawString(inch, 5.75*inch,
'anchored by respective corners - use both a wide and a tall one as tests')
x = 0.25 * inch
for anchor in ['nw','n','ne','w','c','e','sw','s','se']:
x += 0.75*inch
c.rect(x, 5*inch, 0.6*inch, 0.6*inch)
c.drawImage(
gif, x, 5*inch,
width=0.6*inch, height=0.6*inch,
preserveAspectRatio=True,
anchor=anchor
)
c.drawString(x, 4.9*inch, anchor)
x = 0.25 * inch
tall_red = os.path.join(testsFolder,'tall_red.png')
for anchor in ['nw','n','ne','w','c','e','sw','s','se']:
x += 0.75*inch
c.rect(x, 4*inch, 0.6*inch, 0.6*inch)
c.drawImage(
tall_red, x, 4*inch,
width=0.6*inch, height=0.6*inch,
preserveAspectRatio=True,
anchor=anchor
)
c.drawString(x, 3.9*inch, anchor)
c.showPage()
#########################################################################
#
# Page 8 - Forms and simple links
#
#########################################################################
framePage(c, "Forms and Links")
c.setFont('Times-Roman', 12)
t = c.beginText(inch, 10 * inch)
t.textLines("""Forms are sequences of text or graphics operations
which are stored only once in a PDF file and used as many times
as desired. The blue logo bar to the left is an example of a form
in this document. See the function framePageForm in this demo script
for an example of how to use canvas.beginForm(name, ...) ... canvas.endForm().
Documents can also contain cross references where (for example) a rectangle
on a page may be bound to a position on another page. If the user clicks
on the rectangle the PDF viewer moves to the bound position on the other
page. There are many other types of annotations and links supported by PDF.
For example, there is a bookmark to each page in this document and below
is a browsable index that jumps to those pages. In addition we show two
URL hyperlinks; for these, you specify a rectangle but must draw the contents
or any surrounding rectangle yourself.
""")
c.drawText(t)
nentries = len(titlelist)
xmargin = 3*inch
xmax = 7*inch
ystart = 6.54*inch
ydelta = 0.4*inch
for i in range(nentries):
yposition = ystart - i*ydelta
title = titlelist[i]
c.drawString(xmargin, yposition, title)
c.linkAbsolute(title, title, (xmargin-ydelta/4, yposition-ydelta/4, xmax, yposition+ydelta/2))
# test URLs
r1 = (inch, 3*inch, 5*inch, 3.25*inch) # this is x1,y1,x2,y2
c.linkURL('http://www.reportlab.com/', r1, thickness=1, color=colors.green)
c.drawString(inch+3, 3*inch+6, 'Hyperlink to www.reportlab.com, with green border')
r1 = (inch, 2.5*inch, 5*inch, 2.75*inch) # this is x1,y1,x2,y2
c.linkURL('mailto:reportlab-users@egroups.com', r1) #, border=0)
c.drawString(inch+3, 2.5*inch+6, 'mailto: hyperlink, without border')
r1 = (inch, 2*inch, 5*inch, 2.25*inch) # this is x1,y1,x2,y2
c.linkURL('http://www.reportlab.com/', r1,
thickness=2,
dashArray=[2,4],
color=colors.magenta)
c.drawString(inch+3, 2*inch+6, 'Hyperlink with custom border style')
xpdf = fileName2FSEnc(outputfile('test_hello.pdf').replace('\\','/'))
link = 'Hard link to %s, with red border' % xpdf
r1 = (inch, 1.5*inch, inch+2*3+c.stringWidth(link,c._fontname, c._fontsize), 1.75*inch) # this is x1,y1,x2,y2
c.linkURL(xpdf, r1, thickness=1, color=colors.red, kind='GoToR')
c.drawString(inch+3, 1.5*inch+6, link )
c.showPage()
############# colour gradients
title = 'Gradients code contributed by Peter Johnson <johnson.peter@gmail.com>'
c.drawString(1*inch,10.8*inch,title)
c.addOutlineEntry(title+" section", title, level=0, closed=True)
c.bookmarkHorizontalAbsolute(title, 10.8*inch)
from reportlab.lib.colors import red, green, blue
c.saveState()
p = c.beginPath()
p.moveTo(1*inch,2*inch)
p.lineTo(1.5*inch,2.5*inch)
p.curveTo(2*inch,3*inch,3.0*inch,3*inch,4*inch,2.9*inch)
p.lineTo(5.5*inch,2.1*inch)
p.close()
c.clipPath(p)
# Draw a linear gradient from (0, 2*inch) to (5*inch, 3*inch), from orange to white.
# The gradient will extend past the endpoints (so you probably want a clip path in place)
c.linearGradient(1*inch, 2*inch, 6*inch, 3*inch, (red, blue))
c.restoreState()
# Draw a radial gradient with a radius of 3 inches.
# The color starts orange and stays orange until 20% of the radius,
# then fades to white at 80%, and ends up green at 3 inches from the center.
# Since extend is false, the gradient stops drawing at the edge of the circle.
c.radialGradient(4*inch, 6*inch, 3*inch, (red, green, blue), (0.2, 0.8, 1.0), extend=False)
c.showPage()
### now do stuff for the outline
#for x in outlinenametree: print x
#stop
#c.setOutlineNames0(*outlinenametree)
return c
def run(filename):
c = makeDocument(filename)
c.setAuthor('R\xfcp\xe9rt B\xe8\xe4r')
c.setTitle('R\xc3\xbcp\xc3\xa9rt B\xc3\xa8\xc3\xa4r\'s Book')
c.setCreator('Some Creator')
c.setSubject('Some Subject')
c.save()
c = makeDocument(filename)
import os
f = os.path.splitext(filename)
f = open('%sm%s' % (f[0],f[1]),'wb')
f.write(c.getpdfdata())
f.close()
def pageShapes(c):
"""Demonstrates the basic lines and shapes"""
c.showPage()
framePage(c, "Basic line and shape routines""")
c.setTextOrigin(inch, 10 * inch)
c.setFont('Times-Roman', 12)
c.textLines("""pdfgen provides some basic routines for drawing straight and curved lines,
and also for solid shapes.""")
y = 9 * inch
d = DocBlock()
d.comment1 = 'Lesson one'
d.code = "canvas.textOut('hello, world')"
print(d.code)
d.comment2 = 'Lesson two'
d.draw(c, inch, 9 * inch)
class PdfgenTestCase(unittest.TestCase):
"Make documents with lots of Pdfgen features"
def test0(self):
"Make a PDFgen document with most graphics features"
run(outputfile('test_pdfgen_general.pdf'))
def test1(self):
c=canvas.Canvas(outputfile('test_pdfgen_obscure.pdf'))
c.setViewerPreference('PrintScaling','None')
c.setViewerPreference('HideToolbar','true')
c.setViewerPreference('HideMenubar','true')
c.addPageLabel(0, prefix="Front")
c.addPageLabel(1, style='ROMAN_LOWER', start=2)
c.addPageLabel(8, style='ARABIC')
# (These are fixes for missing pages)
c.addPageLabel(11, style='ARABIC',start=6)
c.addPageLabel(17, style='ARABIC', start=14)
c.addPageLabel(21, style='ARABIC', start=22)
c.addPageLabel(99, style='LETTERS_UPPER')
c.addPageLabel(102, prefix="Back",start=1)
# Make some (mostly) empty pages
for i in range(113):
c.drawString(100, 100, 'Tis is page '+str(i))
c.showPage()
# Output the PDF
c.save()
def test2(self):
c=canvas.Canvas('test_pdfgen_autocropmarks.pdf',cropMarks=True)
c.saveState()
c.setStrokeColor((1,0,0))
c.rect(0,0,c._pagesize[0],c._pagesize[1],stroke=1)
c.restoreState()
c.drawString(72,c._pagesize[1]-72,'Auto Crop Marks')
c.showPage()
c.saveState()
c.setStrokeColor((1,0,0))
c.rect(0,0,c._pagesize[0],c._pagesize[1],stroke=1)
c.restoreState()
c.drawString(72,c._pagesize[1]-72,'Auto Crop Marks Another Page')
c.showPage()
c.save()
def test3(self):
'''some special properties'''
palette = [
colors.CMYKColorSep(0.6,0.34,0,0.1,spotName='625C',density=1),
colors.CMYKColorSep(0.13,0.51,0.87,0.48,spotName='464c',density=1),
]
canv = canvas.Canvas( 'test_pdfgen_general_spots.pdf',
pagesize=(346,102),
)
canv.setLineWidth(1)
canv.setStrokeColor(colors.CMYKColor(0,0,0,1))
x=10
y=10
for c in palette:
c.density = 1.0
canv.setFillColor(c)
canv.setFont('Helvetica',20)
canv.drawString(x,80,'This is %s' % c.spotName)
canv.setFont('Helvetica',6)
canv.rect(x,y,50,50,fill=1)
canv.setFillColor(c.clone(density=0.5))
canv.rect(x+55,y,20,20,fill=1)
canv.setFillColor(colors.CMYKColor(0,0,1,0))
canv.rect(x+80,y,30,30,fill=1)
canv.rect(x+120,y,30,30,fill=1)
alpha = c is palette[0] and 1 or 0.5
op = c is palette[0] and True or False
canv.setFillAlpha(alpha)
canv.setFillColor(colors.CMYKColor(1,0,0,0))
canv.drawString(x+80+1,y+3,'OP=%d' % int(False))
canv.drawString(x+80+1,y+23,'Alpha=%.1f' % alpha)
canv.rect(x+90,y+10,10,10,fill=1)
canv.setFillOverprint(op)
canv.drawString(x+120+1,y+3,'OP=%d' % int(op))
canv.drawString(x+120+1,y+23,'Alpha=%.1f' % alpha)
canv.rect(x+130,y+10,10,10,fill=1)
canv.setFillAlpha(1)
canv.setFillOverprint(False)
x += canv._pagesize[0]*0.5
canv.showPage()
canv.save()
def test4(self):
sc = colors.CMYKColorSep
rgb = ['red','green','blue', 'black']
cmykb = [(0,0,0,1)]
cmyk = [(1,0,0,0),(0,1,0,0),(0,0,1,0)]+cmykb
seps = [sc(1,1,0,0,spotName='sep0'),sc(0,1,1,0,spotName='sep1')]
sepb = [sc(0,0,0,1,spotName='sepb')]
#these should all work
trySomeColors(rgb+cmyk+seps)
trySomeColors(rgb,'rgb')
trySomeColors(cmyk,'cmyk')
trySomeColors(seps+cmyk,'sep_cmyk')
trySomeColors(seps+sepb,'sep') #we need a fake black for now
trySomeColors(seps+['black']+cmykb,'sep_black')
self.assertRaises(ValueError,trySomeColors,rgb+cmyk+seps,'rgb')
self.assertRaises(ValueError,trySomeColors,rgb+cmyk,'rgb')
self.assertRaises(ValueError,trySomeColors,rgb+seps,'rgb')
trySomeColors(rgb+sepb,'rgb') #should work because blacks are convertible
trySomeColors(rgb+cmykb,'rgb')
self.assertRaises(ValueError,trySomeColors,cmyk+rgb+seps,'cmyk')
trySomeColors(cmyk+['black']+seps,'cmyk') #OK because black & seps are convertible
def test5(self,uopw=None):
from reportlab.lib.pagesizes import A4,LETTER
if uopw:
from reportlab.lib import pdfencrypt
encrypt = pdfencrypt.StandardEncryption(uopw[0], uopw[1])
encrypt.setAllPermissions(0)
encrypt.canPrint = 1
canv = canvas.Canvas(outputfile('test_pdfgen_general_page_sizes_encrypted.pdf'),pagesize=A4)
canv._doc.encrypt = encrypt
else:
canv = canvas.Canvas(outputfile('test_pdfgen_general_page_sizes.pdf'),pagesize=A4)
canv.setFont('Helvetica',10)
S = A4
canv.drawString(0,S[1]-10,'Top Left=(%s,%s) Page Size=%s x %s' % (0,S[1],S[0],S[1]))
canv.drawCentredString(0.5*S[0],0.5*S[1],'Center =(%s,%s) Page Size=%s x %s' % (0.5*S[0],0.5*S[1],S[0],S[1]))
canv.drawRightString(S[0],2,'Bottom Right=(%s,%s) Page Size=%s x %s' % (S[0],0,S[0],S[1]))
canv.showPage()
S = LETTER
canv.setPageSize(S)
canv.drawString(0,S[1]-10,'Top Left=(%s,%s) Page Size=%s x %s' % (0,S[1],S[0],S[1]))
canv.drawCentredString(0.5*S[0],0.5*S[1],'Center =(%s,%s) Page Size=%s x %s' % (0.5*S[0],0.5*S[1],S[0],S[1]))
canv.drawRightString(S[0],2,'Bottom Right=(%s,%s) Page Size=%s x %s' % (S[0],0,S[0],S[1]))
canv.showPage()
S = A4
canv.setPageSize(S)
canv.setPageRotation(180)
canv.drawString(0,S[1]-10,'Top Left=(%s,%s) Page Size=%s x %s' % (0,S[1],S[0],S[1]))
canv.drawCentredString(0.5*S[0],0.5*S[1],'Center =(%s,%s) Page Size=%s x %s' % (0.5*S[0],0.5*S[1],S[0],S[1]))
canv.drawRightString(S[0],2,'Bottom Right=(%s,%s) Page Size=%s x %s' % (S[0],0,S[0],S[1]))
canv.showPage()
S = A4[1],A4[0]
canv.setPageSize(S)
canv.setPageRotation(0)
canv.drawString(0,S[1]-30,'Top Left=(%s,%s) Page Size=%s x %s' % (0,S[1],S[0],S[1]))
canv.drawCentredString(0.5*S[0],0.5*S[1],'Center =(%s,%s) Page Size=%s x %s' % (0.5*S[0],0.5*S[1],S[0],S[1]))
canv.drawRightString(S[0],32,'Bottom Right=(%s,%s) Page Size=%s x %s' % (S[0],0,S[0],S[1]))
canv.showPage()
canv.save()
def test6(self):
self.test5(('User','Password'))
def testMultipleSavesOk(self):
c=canvas.Canvas(outputfile('test_pdfgen_savetwice.pdf'))
c.drawString(100, 700, 'Hello. This was saved twice')
c.showPage()
# Output the PDF
stuff = c.getpdfdata()
#multiple calls to save / getpdfdata used to cause errors
stuff = c.getpdfdata()
def trySomeColors(C,enforceColorSpace=None):
from reportlab.lib.utils import getBytesIO
out=getBytesIO()
canv = canvas.Canvas(out,enforceColorSpace=enforceColorSpace)
canv.setFont('Helvetica',10)
x = 0
y = 0
w,h = canv._pagesize
for c in C:
if y+10>h:
y = 0
x += 10
canv.setFillColor(c)
canv.rect(x,y,10,10,fill=1,stroke=0)
y += 10
canv.showPage()
canv.save()
def makeSuite():
return makeSuiteForClasses(PdfgenTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| |
"""Component to interface with cameras."""
import asyncio
import base64
import collections
from contextlib import suppress
from datetime import timedelta
import logging
import hashlib
from random import SystemRandom
import attr
from aiohttp import web
import async_timeout
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
CONF_FILENAME,
)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.loader import bind_hass
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.components.http import HomeAssistantView, KEY_AUTHENTICATED
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
SERVICE_PLAY_MEDIA,
DOMAIN as DOMAIN_MP,
)
from homeassistant.components.stream import request_stream
from homeassistant.components.stream.const import (
OUTPUT_FORMATS,
FORMAT_CONTENT_TYPE,
CONF_STREAM_SOURCE,
CONF_LOOKBACK,
CONF_DURATION,
SERVICE_RECORD,
DOMAIN as DOMAIN_STREAM,
)
from homeassistant.components import websocket_api
import homeassistant.helpers.config_validation as cv
from homeassistant.setup import async_when_setup
from .const import DOMAIN, DATA_CAMERA_PREFS
from .prefs import CameraPreferences
# mypy: allow-untyped-calls, allow-untyped-defs
_LOGGER = logging.getLogger(__name__)
SERVICE_ENABLE_MOTION = "enable_motion_detection"
SERVICE_DISABLE_MOTION = "disable_motion_detection"
SERVICE_SNAPSHOT = "snapshot"
SERVICE_PLAY_STREAM = "play_stream"
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
ATTR_FILENAME = "filename"
ATTR_MEDIA_PLAYER = "media_player"
ATTR_FORMAT = "format"
STATE_RECORDING = "recording"
STATE_STREAMING = "streaming"
STATE_IDLE = "idle"
# Bitfield of features supported by the camera entity
SUPPORT_ON_OFF = 1
SUPPORT_STREAM = 2
DEFAULT_CONTENT_TYPE = "image/jpeg"
ENTITY_IMAGE_URL = "/api/camera_proxy/{0}?token={1}"
TOKEN_CHANGE_INTERVAL = timedelta(minutes=5)
_RND = SystemRandom()
MIN_STREAM_INTERVAL = 0.5 # seconds
CAMERA_SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids})
CAMERA_SERVICE_SNAPSHOT = CAMERA_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_FILENAME): cv.template}
)
CAMERA_SERVICE_PLAY_STREAM = CAMERA_SERVICE_SCHEMA.extend(
{
vol.Required(ATTR_MEDIA_PLAYER): cv.entities_domain(DOMAIN_MP),
vol.Optional(ATTR_FORMAT, default="hls"): vol.In(OUTPUT_FORMATS),
}
)
CAMERA_SERVICE_RECORD = CAMERA_SERVICE_SCHEMA.extend(
{
vol.Required(CONF_FILENAME): cv.template,
vol.Optional(CONF_DURATION, default=30): vol.Coerce(int),
vol.Optional(CONF_LOOKBACK, default=0): vol.Coerce(int),
}
)
WS_TYPE_CAMERA_THUMBNAIL = "camera_thumbnail"
SCHEMA_WS_CAMERA_THUMBNAIL = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): WS_TYPE_CAMERA_THUMBNAIL,
vol.Required("entity_id"): cv.entity_id,
}
)
@attr.s
class Image:
"""Represent an image."""
content_type = attr.ib(type=str)
content = attr.ib(type=bytes)
@bind_hass
async def async_request_stream(hass, entity_id, fmt):
"""Request a stream for a camera entity."""
camera = _get_camera_from_entity_id(hass, entity_id)
camera_prefs = hass.data[DATA_CAMERA_PREFS].get(entity_id)
async with async_timeout.timeout(10):
source = await camera.stream_source()
if not source:
raise HomeAssistantError(
f"{camera.entity_id} does not support play stream service"
)
return request_stream(hass, source, fmt=fmt, keepalive=camera_prefs.preload_stream)
@bind_hass
async def async_get_image(hass, entity_id, timeout=10):
"""Fetch an image from a camera entity."""
camera = _get_camera_from_entity_id(hass, entity_id)
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
async with async_timeout.timeout(timeout):
image = await camera.async_camera_image()
if image:
return Image(camera.content_type, image)
raise HomeAssistantError("Unable to get image")
@bind_hass
async def async_get_mjpeg_stream(hass, request, entity_id):
"""Fetch an mjpeg stream from a camera entity."""
camera = _get_camera_from_entity_id(hass, entity_id)
return await camera.handle_async_mjpeg_stream(request)
async def async_get_still_stream(request, image_cb, content_type, interval):
"""Generate an HTTP MJPEG stream from camera images.
This method must be run in the event loop.
"""
response = web.StreamResponse()
response.content_type = "multipart/x-mixed-replace; " "boundary=--frameboundary"
await response.prepare(request)
async def write_to_mjpeg_stream(img_bytes):
"""Write image to stream."""
await response.write(
bytes(
"--frameboundary\r\n"
"Content-Type: {}\r\n"
"Content-Length: {}\r\n\r\n".format(content_type, len(img_bytes)),
"utf-8",
)
+ img_bytes
+ b"\r\n"
)
last_image = None
while True:
img_bytes = await image_cb()
if not img_bytes:
break
if img_bytes != last_image:
await write_to_mjpeg_stream(img_bytes)
# Chrome seems to always ignore first picture,
# print it twice.
if last_image is None:
await write_to_mjpeg_stream(img_bytes)
last_image = img_bytes
await asyncio.sleep(interval)
return response
def _get_camera_from_entity_id(hass, entity_id):
"""Get camera component from entity_id."""
component = hass.data.get(DOMAIN)
if component is None:
raise HomeAssistantError("Camera integration not set up")
camera = component.get_entity(entity_id)
if camera is None:
raise HomeAssistantError("Camera not found")
if not camera.is_on:
raise HomeAssistantError("Camera is off")
return camera
async def async_setup(hass, config):
"""Set up the camera component."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
prefs = CameraPreferences(hass)
await prefs.async_initialize()
hass.data[DATA_CAMERA_PREFS] = prefs
hass.http.register_view(CameraImageView(component))
hass.http.register_view(CameraMjpegStream(component))
hass.components.websocket_api.async_register_command(
WS_TYPE_CAMERA_THUMBNAIL, websocket_camera_thumbnail, SCHEMA_WS_CAMERA_THUMBNAIL
)
hass.components.websocket_api.async_register_command(ws_camera_stream)
hass.components.websocket_api.async_register_command(websocket_get_prefs)
hass.components.websocket_api.async_register_command(websocket_update_prefs)
await component.async_setup(config)
async def preload_stream(hass, _):
for camera in component.entities:
camera_prefs = prefs.get(camera.entity_id)
if not camera_prefs.preload_stream:
continue
async with async_timeout.timeout(10):
source = await camera.stream_source()
if not source:
continue
request_stream(hass, source, keepalive=True)
async_when_setup(hass, DOMAIN_STREAM, preload_stream)
@callback
def update_tokens(time):
"""Update tokens of the entities."""
for entity in component.entities:
entity.async_update_token()
hass.async_create_task(entity.async_update_ha_state())
hass.helpers.event.async_track_time_interval(update_tokens, TOKEN_CHANGE_INTERVAL)
component.async_register_entity_service(
SERVICE_ENABLE_MOTION, CAMERA_SERVICE_SCHEMA, "async_enable_motion_detection"
)
component.async_register_entity_service(
SERVICE_DISABLE_MOTION, CAMERA_SERVICE_SCHEMA, "async_disable_motion_detection"
)
component.async_register_entity_service(
SERVICE_TURN_OFF, CAMERA_SERVICE_SCHEMA, "async_turn_off"
)
component.async_register_entity_service(
SERVICE_TURN_ON, CAMERA_SERVICE_SCHEMA, "async_turn_on"
)
component.async_register_entity_service(
SERVICE_SNAPSHOT, CAMERA_SERVICE_SNAPSHOT, async_handle_snapshot_service
)
component.async_register_entity_service(
SERVICE_PLAY_STREAM,
CAMERA_SERVICE_PLAY_STREAM,
async_handle_play_stream_service,
)
component.async_register_entity_service(
SERVICE_RECORD, CAMERA_SERVICE_RECORD, async_handle_record_service
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class Camera(Entity):
"""The base class for camera entities."""
def __init__(self):
"""Initialize a camera."""
self.is_streaming = False
self.content_type = DEFAULT_CONTENT_TYPE
self.access_tokens: collections.deque = collections.deque([], 2)
self.async_update_token()
@property
def should_poll(self):
"""No need to poll cameras."""
return False
@property
def entity_picture(self):
"""Return a link to the camera feed as entity picture."""
return ENTITY_IMAGE_URL.format(self.entity_id, self.access_tokens[-1])
@property
def supported_features(self):
"""Flag supported features."""
return 0
@property
def is_recording(self):
"""Return true if the device is recording."""
return False
@property
def brand(self):
"""Return the camera brand."""
return None
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return None
@property
def model(self):
"""Return the camera model."""
return None
@property
def frame_interval(self):
"""Return the interval between frames of the mjpeg stream."""
return 0.5
async def stream_source(self):
"""Return the source of the stream."""
return None
def camera_image(self):
"""Return bytes of camera image."""
raise NotImplementedError()
@callback
def async_camera_image(self):
"""Return bytes of camera image.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.camera_image)
async def handle_async_still_stream(self, request, interval):
"""Generate an HTTP MJPEG stream from camera images.
This method must be run in the event loop.
"""
return await async_get_still_stream(
request, self.async_camera_image, self.content_type, interval
)
async def handle_async_mjpeg_stream(self, request):
"""Serve an HTTP MJPEG stream from the camera.
This method can be overridden by camera plaforms to proxy
a direct stream from the camera.
This method must be run in the event loop.
"""
return await self.handle_async_still_stream(request, self.frame_interval)
@property
def state(self):
"""Return the camera state."""
if self.is_recording:
return STATE_RECORDING
if self.is_streaming:
return STATE_STREAMING
return STATE_IDLE
@property
def is_on(self):
"""Return true if on."""
return True
def turn_off(self):
"""Turn off camera."""
raise NotImplementedError()
@callback
def async_turn_off(self):
"""Turn off camera."""
return self.hass.async_add_job(self.turn_off)
def turn_on(self):
"""Turn off camera."""
raise NotImplementedError()
@callback
def async_turn_on(self):
"""Turn off camera."""
return self.hass.async_add_job(self.turn_on)
def enable_motion_detection(self):
"""Enable motion detection in the camera."""
raise NotImplementedError()
@callback
def async_enable_motion_detection(self):
"""Call the job and enable motion detection."""
return self.hass.async_add_job(self.enable_motion_detection)
def disable_motion_detection(self):
"""Disable motion detection in camera."""
raise NotImplementedError()
@callback
def async_disable_motion_detection(self):
"""Call the job and disable motion detection."""
return self.hass.async_add_job(self.disable_motion_detection)
@property
def state_attributes(self):
"""Return the camera state attributes."""
attrs = {"access_token": self.access_tokens[-1]}
if self.model:
attrs["model_name"] = self.model
if self.brand:
attrs["brand"] = self.brand
if self.motion_detection_enabled:
attrs["motion_detection"] = self.motion_detection_enabled
return attrs
@callback
def async_update_token(self):
"""Update the used token."""
self.access_tokens.append(
hashlib.sha256(_RND.getrandbits(256).to_bytes(32, "little")).hexdigest()
)
class CameraView(HomeAssistantView):
"""Base CameraView."""
requires_auth = False
def __init__(self, component):
"""Initialize a basic camera view."""
self.component = component
async def get(self, request, entity_id):
"""Start a GET request."""
camera = self.component.get_entity(entity_id)
if camera is None:
raise web.HTTPNotFound()
authenticated = (
request[KEY_AUTHENTICATED]
or request.query.get("token") in camera.access_tokens
)
if not authenticated:
raise web.HTTPUnauthorized()
if not camera.is_on:
_LOGGER.debug("Camera is off.")
raise web.HTTPServiceUnavailable()
return await self.handle(request, camera)
async def handle(self, request, camera):
"""Handle the camera request."""
raise NotImplementedError()
class CameraImageView(CameraView):
"""Camera view to serve an image."""
url = "/api/camera_proxy/{entity_id}"
name = "api:camera:image"
async def handle(self, request, camera):
"""Serve camera image."""
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
async with async_timeout.timeout(10):
image = await camera.async_camera_image()
if image:
return web.Response(body=image, content_type=camera.content_type)
raise web.HTTPInternalServerError()
class CameraMjpegStream(CameraView):
"""Camera View to serve an MJPEG stream."""
url = "/api/camera_proxy_stream/{entity_id}"
name = "api:camera:stream"
async def handle(self, request, camera):
"""Serve camera stream, possibly with interval."""
interval = request.query.get("interval")
if interval is None:
return await camera.handle_async_mjpeg_stream(request)
try:
# Compose camera stream from stills
interval = float(request.query.get("interval"))
if interval < MIN_STREAM_INTERVAL:
raise ValueError(f"Stream interval must be be > {MIN_STREAM_INTERVAL}")
return await camera.handle_async_still_stream(request, interval)
except ValueError:
raise web.HTTPBadRequest()
@websocket_api.async_response
async def websocket_camera_thumbnail(hass, connection, msg):
"""Handle get camera thumbnail websocket command.
Async friendly.
"""
try:
image = await async_get_image(hass, msg["entity_id"])
await connection.send_big_result(
msg["id"],
{
"content_type": image.content_type,
"content": base64.b64encode(image.content).decode("utf-8"),
},
)
except HomeAssistantError:
connection.send_message(
websocket_api.error_message(
msg["id"], "image_fetch_failed", "Unable to fetch image"
)
)
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "camera/stream",
vol.Required("entity_id"): cv.entity_id,
vol.Optional("format", default="hls"): vol.In(OUTPUT_FORMATS),
}
)
async def ws_camera_stream(hass, connection, msg):
"""Handle get camera stream websocket command.
Async friendly.
"""
try:
entity_id = msg["entity_id"]
camera = _get_camera_from_entity_id(hass, entity_id)
camera_prefs = hass.data[DATA_CAMERA_PREFS].get(entity_id)
async with async_timeout.timeout(10):
source = await camera.stream_source()
if not source:
raise HomeAssistantError(
f"{camera.entity_id} does not support play stream service"
)
fmt = msg["format"]
url = request_stream(
hass, source, fmt=fmt, keepalive=camera_prefs.preload_stream
)
connection.send_result(msg["id"], {"url": url})
except HomeAssistantError as ex:
_LOGGER.error("Error requesting stream: %s", ex)
connection.send_error(msg["id"], "start_stream_failed", str(ex))
except asyncio.TimeoutError:
_LOGGER.error("Timeout getting stream source")
connection.send_error(
msg["id"], "start_stream_failed", "Timeout getting stream source"
)
@websocket_api.async_response
@websocket_api.websocket_command(
{vol.Required("type"): "camera/get_prefs", vol.Required("entity_id"): cv.entity_id}
)
async def websocket_get_prefs(hass, connection, msg):
"""Handle request for account info."""
prefs = hass.data[DATA_CAMERA_PREFS].get(msg["entity_id"])
connection.send_result(msg["id"], prefs.as_dict())
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "camera/update_prefs",
vol.Required("entity_id"): cv.entity_id,
vol.Optional("preload_stream"): bool,
}
)
async def websocket_update_prefs(hass, connection, msg):
"""Handle request for account info."""
prefs = hass.data[DATA_CAMERA_PREFS]
changes = dict(msg)
changes.pop("id")
changes.pop("type")
entity_id = changes.pop("entity_id")
await prefs.async_update(entity_id, **changes)
connection.send_result(msg["id"], prefs.get(entity_id).as_dict())
async def async_handle_snapshot_service(camera, service):
"""Handle snapshot services calls."""
hass = camera.hass
filename = service.data[ATTR_FILENAME]
filename.hass = hass
snapshot_file = filename.async_render(variables={ATTR_ENTITY_ID: camera})
# check if we allow to access to that file
if not hass.config.is_allowed_path(snapshot_file):
_LOGGER.error("Can't write %s, no access to path!", snapshot_file)
return
image = await camera.async_camera_image()
def _write_image(to_file, image_data):
"""Executor helper to write image."""
with open(to_file, "wb") as img_file:
img_file.write(image_data)
try:
await hass.async_add_executor_job(_write_image, snapshot_file, image)
except OSError as err:
_LOGGER.error("Can't write image to file: %s", err)
async def async_handle_play_stream_service(camera, service_call):
"""Handle play stream services calls."""
async with async_timeout.timeout(10):
source = await camera.stream_source()
if not source:
raise HomeAssistantError(
f"{camera.entity_id} does not support play stream service"
)
hass = camera.hass
camera_prefs = hass.data[DATA_CAMERA_PREFS].get(camera.entity_id)
fmt = service_call.data[ATTR_FORMAT]
entity_ids = service_call.data[ATTR_MEDIA_PLAYER]
url = request_stream(hass, source, fmt=fmt, keepalive=camera_prefs.preload_stream)
data = {
ATTR_ENTITY_ID: entity_ids,
ATTR_MEDIA_CONTENT_ID: f"{hass.config.api.base_url}{url}",
ATTR_MEDIA_CONTENT_TYPE: FORMAT_CONTENT_TYPE[fmt],
}
await hass.services.async_call(
DOMAIN_MP, SERVICE_PLAY_MEDIA, data, blocking=True, context=service_call.context
)
async def async_handle_record_service(camera, call):
"""Handle stream recording service calls."""
async with async_timeout.timeout(10):
source = await camera.stream_source()
if not source:
raise HomeAssistantError(f"{camera.entity_id} does not support record service")
hass = camera.hass
filename = call.data[CONF_FILENAME]
filename.hass = hass
video_path = filename.async_render(variables={ATTR_ENTITY_ID: camera})
data = {
CONF_STREAM_SOURCE: source,
CONF_FILENAME: video_path,
CONF_DURATION: call.data[CONF_DURATION],
CONF_LOOKBACK: call.data[CONF_LOOKBACK],
}
await hass.services.async_call(
DOMAIN_STREAM, SERVICE_RECORD, data, blocking=True, context=call.context
)
| |
"""
users must be at least 22 to use the form
users past their full retirement age will get only their current benefit amount
Well need to ask user for DOB and current annual earnings
returns estimated benefit at 62, 67, and 70 based the value of the dollar today
inputs needed:
Date of birth: 8/14/1956
Current earnings: 50000
optional inputs (not currently used):
Last year with earnings
Last earnings
Retirement month/year: 8/2026
Benefit in inflated dollars; we're using default of current-year dollars
"""
import re
import requests
import json
import datetime
import math
import lxml
import time
import signal
from bs4 import BeautifulSoup as bs
from .ss_utilities import get_retirement_age, get_current_age, past_fra_test
timeout_seconds = 10
down_note = """<span class="h4">Sorry, the Social Security website \
is not responding, so we can't estimate your benefits.</span> \
Please try again in a few minutes."""
no_earnings_note = """<span class="h4">Sorry, we cannot provide an estimate \
because your entered annual income is less than \
the minimum needed to make the estimate.</span>"""
base_url = "http://www.ssa.gov"
quick_url = "%s/OACT/quickcalc/" % base_url # where users go; not needed here
result_url = "%s/cgi-bin/benefit6.cgi" % base_url
chart_ages = range(62, 71)
comment = re.compile(r"<!--[\s\S]*?-->") # regex for parsing indexing data
def clean_comment(comment):
return comment.replace('<!--', '').replace('-->', '').strip()
def num_test(value=''):
try:
num = int(value)
except:
try:
num = int(float(value))
except:
return False
else:
return True
else:
return True
# unused for now
def parse_details(rows):
datad = {}
if len(rows) == 3:
titlerow = rows[0].split(':')
datad[titlerow[0].strip().upper()] = {'Bend points':
titlerow[1].strip()}
outer = datad[titlerow[0].strip().upper()]
outer['AIME'] = rows[1]
outer['COLA'] = rows[2]
return datad
def interpolate_benefits(benefits, fra_tuple, current_age, born_on_2nd=False):
"""
estimates benefits for years above and below the full-retirement age (FRA);
calculations are slightly different for people born on the 2nd of the month
"""
fra = fra_tuple[0] # could be 66 + x number of months, or 67
# fill out the missing years, working backward and forward from the FRA
if fra == 67:
base = benefits['age 67']
if born_on_2nd:
benefits['age 62'] = int(round(base - base*(3*12*(0.00555555)) -
base*(2*12*0.004166666)))
else:
benefits['age 62'] = int(round(base - base*(3*12*(0.00555555)) -
base*(2*11*0.004166666)))
benefits['age 63'] = int(round(base - base*(3*12*(0.00555555)) -
base*(1*12*0.004166666)))
benefits['age 64'] = int(round(base - base*(3*12*(0.00555555))))
benefits['age 65'] = int(round(base - base*(2*12*(0.00555555))))
benefits['age 66'] = int(round(base - base*(1*12*(0.00555555))))
benefits['age 68'] = int(round(base + (base * 0.08)))
benefits['age 69'] = int(round(base + (2 * (base * 0.08))))
benefits['age 70'] = int(round(base + (3 * (base * 0.08))))
elif fra == 66 and current_age < 66:
base = benefits['age 66']
month_increment = (base * 0.08)/12
diff_forward = 12 - fra_tuple[1]
diff_back = 12 + fra_tuple[1]
benefits['age 67'] = int(round(base +
(month_increment*diff_forward)))
benefits['age 68'] = int(round(base +
(month_increment*(12 + diff_forward))))
benefits['age 69'] = int(round(base +
(month_increment*(24 + diff_forward))))
benefits['age 70'] = int(round(base +
(month_increment*(36 + diff_forward))))
if current_age == 65:
# FRA is 66; need to fill in 65
benefits['age 62'] = 0
benefits['age 63'] = 0
benefits['age 64'] = 0
benefits['age 65'] = int(round(base -
base*(diff_back*(0.00555555))))
elif current_age == 64:
# FRA is 66; need to fill in 64 and 65
benefits['age 62'] = 0
benefits['age 63'] = 0
benefits['age 64'] = int(round(base -
base*((diff_back + 12)*(0.00555555))))
benefits['age 65'] = int(round(base -
base*(diff_back*(0.00555555))))
elif current_age in range(55, 64):
# ages 55 to 63: FRA is 66; need to fill in 62, 63, 64 and 65
if born_on_2nd:
benefits['age 62'] = int(round(base -
base*((diff_back + 24)*(0.00555555)) -
base*(1*12*0.004166666)))
else:
benefits['age 62'] = int(round(base -
base*((diff_back + 24)*(0.00555555)) -
base*(1*11*0.004166666)))
benefits['age 63'] = int(round(base -
base*((diff_back + 24)*(0.00555555))))
benefits['age 64'] = int(round(base -
base*((diff_back + 12)*(0.00555555))))
benefits['age 65'] = int(round(base -
base*(diff_back*(0.00555555))))
return benefits
# sample params
params = {
'dobmon': 8,
'dobday': 14,
'yob': 1970,
'earnings': 70000,
'lastYearEarn': '', # possible use for unemployed or already retired
'lastEarn': '', # possible use for unemployed or already retired
'retiremonth': '', # leve blank to get triple calculation -- 62, 67 and 70
'retireyear': '', # leve blank to get triple calculation -- 62, 67 and 70
'dollars': 1, # benefits to be calculated in current-year dollars
'prgf': 2
}
def get_retire_data(params):
born_on_2nd = False
if params['dobday']:
try:
born_on_test = int(params['dobday'])
except:
pass
else:
if born_on_test == 2:
born_on_2nd = True
starter = datetime.datetime.now()
collector = {}
benefits = {}
for age in chart_ages:
benefits["age %s" % age] = 0
dobstring = "%s-%s-%s" % (params['yob'],
params['dobmon'],
params['dobday'])
results = {'data': {
'early retirement age': '',
'full retirement age': '',
'benefits': benefits,
'params': params,
'disability': '',
'survivor benefits': {
'child': '',
'spouse caring for child': '',
'spouse at full retirement age': '',
'family maximum': ''
}
},
'current_age': 0,
'error': '',
'note': '',
'past_fra': False,
}
BENS = results['data']['benefits']
current_age = get_current_age(dobstring)
results['current_age'] = current_age
past_fra = past_fra_test(dobstring)
if past_fra is False:
pass
elif past_fra is True:
results['past_fra'] = True
results['note'] = "Age %s is past your full benefit claiming age." % current_age
# results['note'] = "You are past Social Security's full retirement age."
else: # if neither False nor True, there's an error and we need to bail
if current_age > 70:
results['past_fra'] = True
results['note'] = past_fra
results['error'] = "visitor too old for tool"
return json.dumps(results)
elif current_age < 22:
results['note'] = past_fra
results['error'] = "visitor too young for tool"
return json.dumps(results)
elif 'invalid' in past_fra:
results['note'] = "An invalid date was entered."
results['error'] = past_fra
return json.dumps(results)
try:
req = requests.post(result_url, data=params, timeout=timeout_seconds)
except requests.exceptions.ConnectionError as e:
results['error'] = "connection error at SSA's website: %s" % e
results['note'] = down_note
return json.dumps(results)
except requests.exceptions.Timeout:
results['error'] = "SSA's website timed out"
results['note'] = down_note
return json.dumps(results)
except requests.exceptions.RequestException as e:
results['error'] = "request error at SSA's website: %s" % e
results['note'] = down_note
return json.dumps(results)
except:
results['error'] = "%s error at SSA's website" % req.reason
results['note'] = down_note
return json.dumps(results)
if not req.ok:
results['error'] = "SSA's website is not responding.\
Status code: %s (%s)" % (req.status_code,
req.reason)
results['note'] = down_note
return json.dumps(results)
if int(params['dobmon']) == 1 and int(params['dobday']) == 1:
# SSA has a special rule for people born on Jan. 1:
# http://www.socialsecurity.gov/OACT/ProgData/nra.html
yob = int(params['yob']) - 1
yobstring = "%s" % yob
else:
yobstring = params['yob']
fra_tuple = get_retirement_age(yobstring)
soup = bs(req.text, 'lxml')
tables = soup.findAll('table', {'bordercolor': '#6699ff'})
results_table, disability_table, survivors_table = (None, None, None)
for each in tables:
if each.find('th') and 'Retirement age' in each.find('th').text:
results_table = each
elif each.find('th') and 'Disability' in each.find('th').text:
disability_table = each
elif each.find('th') and "Survivors" in each.find('th').text:
survivors_table = each
if past_fra is True:
results['data']['disability'] = "You have reached full retirement age \
and are not eligible for disability benefits."
ret_amount_raw = soup.find('span', {'id': 'ret_amount'})
if not ret_amount_raw:
results['error'] = "benefit is zero"
results['note'] = no_earnings_note
return json.dumps(results)
else:
ret_amount = ret_amount_raw.text.split('.')[0]
base = int(ret_amount.replace(',', ''))
increment = base * 0.08
if current_age == 66:
BENS['age 66'] = round(base)
BENS['age 67'] = round(base + increment)
BENS['age 68'] = round(base + 2*increment)
BENS['age 69'] = round(base + 3*increment)
BENS['age 70'] = round(base + 4*increment)
elif current_age == 67:
BENS['age 67'] = round(base)
BENS['age 68'] = round(base + increment)
BENS['age 69'] = round(base + 2*increment)
BENS['age 70'] = round(base + 3*increment)
elif current_age == 68:
BENS['age 68'] = round(base)
BENS['age 69'] = round(base + increment)
BENS['age 70'] = round(base + 2*increment)
elif current_age == 69:
BENS['age 69'] = round(base)
BENS['age 70'] = round(base + increment)
elif current_age == 70:
BENS['age 70'] = round(base)
else: # older than 70
BENS['age 70'] = round(base)
results['note'] = "Your monthly benefit \
at %s is $%s" % (current_age, ret_amount)
else:
if results_table:
result_rows = results_table.findAll('tr')
for row in result_rows:
cells = row.findAll('td')
if cells:
collector[cells[0].text] = cells[1].text
"""
collector:
70 in 2047: "$2,719.00",
67 in 2044: "$2,180.00",
62 and 1 month in 2039: "$1,515.00"
"""
for key in collector:
bits = key.split(' in ')
benefit_age_raw = bits[0]
benefit_age_year = bits[0].split()[0]
# benefit_in_year = bits[1]# not using
benefit_raw = collector[key]
benefit = int(benefit_raw.split('.')[0].replace(',', '').replace('$', ''))
if benefit_age_year == str(fra_tuple[0]):
results['data']['full retirement age'] = benefit_age_raw
BENS['age %s' % benefit_age_year] = benefit
# if benefit_age_year == '62':
# results['data']['early retirement age'] = benefit_age_raw
# BENS['age %s' % benefit_age_year] = benefit
# if benefit_age_year == '70':
# BENS['age %s' % benefit_age_year] = benefit
additions = interpolate_benefits(BENS,
fra_tuple,
current_age,
born_on_2nd=born_on_2nd)
for key in BENS:
if additions[key] and not BENS[key]:
BENS[key] = additions[key]
else:
if soup.find('p') and 'insufficient' in soup.find('p').text:
results['error'] = "benefit is zero"
results['note'] = no_earnings_note
return json.dumps(results)
else:
results['error'] = "SSA is not returning good data"
results['note'] = down_note
return json.dumps(results)
if disability_table:
results['data']['disability'] = disability_table.findAll('td')[1].text.split('.')[0]
# SURVIVORS KEYS
# 'Your child'
# 'Family maximum'
# 'Your spouse at normal retirement age'
# 'Your spouse caring for your child'
#
# RESULTS['DATA']['SURVIVOR BENEFITS'] KEYS
# 'spouse at full retirement age'
# 'family maximum'
# 'spouse caring for child'
# 'child'
if survivors_table:
SURV = results['data']['survivor benefits']
survivors = {}
survivor_rows = survivors_table.findAll('tr')
for row in survivor_rows:
cells = row.findAll('td')
if cells:
survivors[cells[0].text] = cells[1].text.split('.')[0]
if 'Your child' in survivors:
SURV['child'] = survivors['Your child']
if 'Family maximum' in survivors:
SURV['family maximum'] = survivors['Family maximum']
if 'Your spouse at normal retirement age' in survivors:
SURV['spouse at full retirement age'] = survivors['Your spouse at normal retirement age']
if 'Your spouse caring for your child' in survivors:
SURV['spouse caring for child'] = survivors['Your spouse caring for your child']
if not results['data']['full retirement age']:
if fra_tuple[1]:
FRA = "%s and %s months" % (fra_tuple[0], fra_tuple[1])
else:
FRA = "%s" % fra_tuple[0]
results['data']['full retirement age'] = FRA
print "script took %s to run" % (datetime.datetime.now() - starter)
# # to dump json for testing:
# with open('/tmp/ssa.json', 'w') as f:
# f.write(json.dumps(results))
return json.dumps(results)
| |
# S.D. Peckham
# October 14, 2009
import os
import os.path
import sys
import numpy
import bov_files
import file_utils
import rti_files
#-------------------------------------------------------------------
#
# unit_test()
#
# class rts_file():
#
# open_file()
# open_new_file()
# add_grid()
# get_grid()
# close_file()
# close()
# --------------------
# byte_swap_needed()
# number_of_grids()
#
#-------------------------------------------------------------------
def unit_test(nx=4, ny=5, n_grids=6, VERBOSE=False,
file_name="TEST_FILE.rts"):
print 'Running unit_test()...'
#------------------------------------
# Make instance of rts_file() class
#------------------------------------
rts = rts_file()
dx = 100
dy = 100
#---------------------------------
# These are unused for RTS files
#---------------------------------
## grid_name = "depth"
## long_name = "depth of water"
## units_name = "meters"
info = rti_files.make_info( file_name, nx, ny, dx, dy )
OK = rts.open_new_file( file_name, info )
if not(OK):
print 'ERROR during open_new_file().'
return
grid = numpy.arange(nx * ny, dtype='Float32')
grid = grid.reshape( (ny, nx) )
#----------------------------------
# Add some test grids to the file
#----------------------------------
for time_index in xrange(n_grids):
rts.add_grid( grid )
grid = (grid + 1)
rts.close_file()
print 'Finished writing file: ' + file_name
print ' '
#---------------------------------------------
# Re-open the file and read grids one-by-one
#---------------------------------------------
OK = rts.open_file( file_name )
if not(OK): return
n_grids = rts.number_of_grids()
print 'Reading grids from RTS file: '
print 'rts.number_of_grids() =', n_grids
print 'rts.byte_swap_needed() =', rts.byte_swap_needed()
print ' '
for time_index in xrange(n_grids):
grid = rts.get_grid( time_index )
print 'grid[' + str(time_index) + '] = '
print grid
print '-----------------------------------------------'
#----------------------------
# Go back and read 2nd grid
#----------------------------
grid = rts.get_grid( 1 )
print ' '
print 'Reading second grid again...'
print 'Second grid ='
print grid
print '-----------------------------------------------'
rts.close_file()
print 'Finished reading file: ' + file_name
print ' '
#---------------------------------------
# Re-open the file and change one grid
#---------------------------------------
print 'Updating RTS file:', file_name
grid = numpy.ones( (ny, nx), dtype='Float32' )
OK = rts.open_file( file_name, UPDATE=True )
if not(OK): return
rts.add_grid( grid, time_index=0 )
rts.close_file()
print 'Finished updating RTS file.'
print ' '
#---------------------------------------------
# Re-open the file and read grids one-by-one
#---------------------------------------------
OK = rts.open_file( file_name )
if not(OK): return
n_grids = rts.number_of_grids()
print 'Reading grids from RTS file: '
print 'rts.number_of_grids() =', n_grids
print 'rts.byte_swap_needed() =', rts.byte_swap_needed()
print ' '
for time_index in xrange(n_grids):
grid = rts.get_grid( time_index )
print 'grid[' + str(time_index) + '] = '
print grid
print '-----------------------------------------------'
rts.close_file()
print 'Finished reading file: ' + file_name
print ' '
# unit_test()
#-------------------------------------------------------------------
class rts_file():
#----------------------------------------------------------
def open_file(self, file_name, UPDATE=False):
info = rti_files.read_info( file_name )
if (info == -1): return
#----------------------
# Store info in state
#----------------------
self.info = info
self.nx = info.ncols
self.ny = info.nrows
self.dx = info.xres
self.dy = info.yres
BPE = rti_files.get_bpe( info.data_type )
self.grid_size = (self.nx * self.ny * BPE)
self.SWAP_ENDIAN = self.byte_swap_needed()
self.file_name = file_name
self.time_index = 0
#-----------------------------------
# Open file to read only or update
#-----------------------------------
try:
if (UPDATE):
rts_unit = open(file_name, 'rb+')
self.rts_unit = rts_unit
else:
rts_unit = open(file_name, 'rb')
self.rts_unit = rts_unit
### return rts_unit
return True
except:
print 'ERROR during rts.open_file().'
return False
# open_file()
#----------------------------------------------------------
def check_and_store_info(self, file_name, info=None,
var_name='UNKNOWN',
dtype='float32',
MAKE_RTI=True, MAKE_BOV=False):
#-----------------------------------------------------
# Note: This object (self) may be new or it may have
# been used previously. In the latter case,
# "info" should still be available in "self".
# We only need info if MAKE_RTI or MAKE_BOV.
#-----------------------------------------------------
self.format = 'RTS'
self.file_name = file_name
self.dtype = dtype
self.time_index = 0 # (need here for RTS files)
if not(MAKE_RTI or MAKE_BOV): return
#---------------------------------
# Was "info" argument provided ?
#---------------------------------
NEW_INFO = True
if (info is None):
try:
info = self.info
NEW_INFO = False
## print 'Found info in state.'
except:
#------------------------------------------
# Try to find RTI file to copy info from.
# Don't create a new RTI file.
#------------------------------------------
RTI_file = rti_files.try_to_find_rti_file( file_name )
if (RTI_file != 'none'):
info = rti_files.read_info( RTI_file )
## print 'Reading info from: ' + RTI_file
else:
print 'ERROR during open_new_file():'
print ' Could not find RTI file and "info"'
print ' argument was not provided.'
print ' '
return
#-----------------------------
# Update "info" as necessary
#-----------------------------
info.grid_file = file_name
info.data_type = rti_files.get_rti_data_type( dtype )
info.data_source = 'TopoFlow 3.0'
info.gmin = -9999.0
info.gmax = -9999.0
#---------------------------------------
# If new "info" was provided, store it
#---------------------------------------
if (NEW_INFO):
self.info = info
self.nx = info.ncols
self.ny = info.nrows
## print 'Stored new info in state.'
## #---------------------------------
## # Was "info" argument provided ?
## #---------------------------------
## if (info is not None):
## #------------------------------
## # Save info to a new RTI file
## #------------------------------
## prefix = rti_files.get_file_prefix( file_name )
## RTI_file = (prefix + '.rti')
## rti_files.write_info( RTI_file, info )
##
## else:
## #------------------------------------------
## # Try to find RTI file to copy info from.
## # Don't create a new RTI file.
## #------------------------------------------
## RTI_file = rti_files.try_to_find_rti_file( file_name )
## if (RTI_file != 'none'):
## info = rti_files.read_info( RTI_file )
## info.file_name = file_name
## info.data_type = rti_files.get_rti_data_type( dtype )
## else:
## print 'ERROR during open_new_file():'
## print ' Could not find RTI file and "info"'
## print ' argument was not provided.'
## print ' '
## return
#-------------------
# Write RTI file ?
#-------------------
if (MAKE_RTI):
prefix = rti_files.get_file_prefix( file_name )
RTI_file = (prefix + '.rti')
rti_files.write_info( RTI_file, info )
# print 'Wrote grid info to: ' + RTI_file ######
#-------------------
# Write BOV file ?
#-------------------
if (MAKE_BOV):
bov_files.write_info_as_bov( file_name, info, var_name)
### time )
# check_and_store_info()
#----------------------------------------------------------
def open_new_file(self, file_name, info=None,
var_name='UNKNOWN',
dtype='float32',
VERBOSE=False,
MAKE_RTI=True, MAKE_BOV=False):
#----------------------------
# Does file already exist ?
#----------------------------
file_name = file_utils.check_overwrite( file_name )
self.file_name = file_name
#---------------------------------------
# Check and store the grid information
#---------------------------------------
self.check_and_store_info( file_name, info, var_name,
dtype, MAKE_RTI, MAKE_BOV )
#------------------------------------
# Try to open new RTS file to write
#------------------------------------
try:
if (VERBOSE):
print 'Preparing to write new RTS file:'
print ' ' + file_name
self.rts_unit = open(file_name, 'wb')
return True
except:
return False
# open_new_file()
#----------------------------------------------------------
def add_grid(self, grid, time_index=-1):
#------------------------------------------------------
# Notes: If the "grid" argument is actually a scalar
# then we still make sure to write a grid.
#
# In order to maintain a consistent byte
# order within a set of RTG and RTS files,
# the grid may be byte-swapped before it is
# written to the file. However, this must
# be done carefully, without setting the
# "inplace" argument to the byteswap method
# to True on the original grid. Otherwise,
# the byte order of the original grid will
# "flip-flop" every time this function is
# called.
#------------------------------------------------------
dtype = self.dtype # (set in open_new_file())
#---------------------------------------------
# Can use time_index to move file pointer
# and overwrite an existing grid vs. append.
#---------------------------------------------
if (time_index >= 0):
offset = (time_index * self.grid_size)
self.rts_unit.seek( offset )
#-------------------------------------------------
# Convert grid to Float32 (usually from Float64)
#-------------------------------------------------
out_grid = grid.copy().astype(dtype)
#-------------------------------------------------
# Convert byteorder, if needed to match the byte
# order of other RTG and RTS files in data set.
#-------------------------------------------------
if (self.info.SWAP_ENDIAN):
inplace = True
out_grid.byteswap(inplace)
#---------------------------------------
# Convert "grid" from scalar to grid ?
#---------------------------------------
if (numpy.ndim(out_grid) == 0):
out_grid += numpy.zeros([self.ny, self.nx], dtype=dtype)
#--------------------------------------------
# Write grid as binary to existing RTS file
#--------------------------------------------
out_grid.tofile( self.rts_unit )
#-------------------------
# Advance the time_index
#-------------------------
self.time_index += 1
## #--------------------------------------------
## # Write grid as binary to existing RTS file
## #--------------------------------------------
## if (numpy.ndim(grid) == 0):
## #-----------------------------------------------
## # "grid" is actually a scalar (dynamic typing)
## # so convert it to a grid before saving
## #-----------------------------------------------
## grid2 = grid + numpy.zeros([self.ny, self.nx], dtype='Float32')
## if (self.info.SWAP_ENDIAN):
## grid2.byteswap().tofile( self.rts_unit )
## else:
## grid2.tofile( self.rts_unit )
## else:
## if (self.info.SWAP_ENDIAN):
## grid.byteswap().tofile( self.rts_unit )
## else:
## grid.tofile( self.rts_unit )
## self.time_index += 1
#-------------------------------
# Write grid as binary to file
#-------------------------------
## grid.tofile( self.rts_unit )
## self.time_index += 1
# add_grid()
#----------------------------------------------------------
def get_grid(self, time_index, dtype='float32'):
#-----------------------------------------------
# Compute offset from time_index and grid_size
#-----------------------------------------------
n_values = self.nx * self.ny
offset = (time_index * self.grid_size)
self.rts_unit.seek( offset )
grid = numpy.fromfile( self.rts_unit, count=n_values,
dtype=dtype )
grid = grid.reshape( self.ny, self.nx )
#--------------------------------
# Swap byte order, if necessary
#--------------------------------
if (self.info.SWAP_ENDIAN):
inplace = True
grid.byteswap(inplace)
return grid
# get_grid()
#-------------------------------------------------------------------
def close_file(self):
self.rts_unit.close()
# close_file()
#-------------------------------------------------------------------
def close(self):
self.rts_unit.close()
# close()
#-------------------------------------------------------------------
def byte_swap_needed(self):
machine_byte_order = rti_files.get_rti_byte_order()
SWAP = (machine_byte_order != self.info.byte_order)
return SWAP
# byte_swap_needed()
#-------------------------------------------------------------------
def number_of_grids(self):
file_size = os.path.getsize( self.file_name )
n_grids = (file_size / self.grid_size)
# self.file_size = file_size
# self.n_grids = n_grids
return n_grids
# number_of_grids()
#-------------------------------------------------------------------
| |
from collections import OrderedDict, Counter, namedtuple
import random
import functools
import json
import math
import uuid
import io
import csv
import datetime
from flask import Flask, render_template, current_app, Markup, abort, url_for
from flask import make_response, request
from flask.json import jsonify
from sqlalchemy import func, and_, or_, create_engine
from sqlalchemy.orm import subqueryload, eagerload, sessionmaker, joinedload
from jinja2 import StrictUndefined
import markdown
from dogpile.cache import make_region
from dogpile.cache.api import NO_VALUE
from . import tables
from . import queries
tau = 2 * math.pi
DONE_STATUSES = {'released', 'dropped', 'py3-only'}
def hello():
db = current_app.config['DB']()
query = queries.collections(db)
query = query.options(subqueryload('collection_statuses'))
collections = list(query)
coll_info = {}
for i, collection in enumerate(collections):
query = db.query(tables.CollectionPackage.status,
func.count(tables.CollectionPackage.id))
query = query.filter(tables.CollectionPackage.collection == collection)
query = query.join(tables.CollectionPackage.status_obj)
query = query.group_by(tables.CollectionPackage.status)
query = query.order_by(tables.Status.order)
data = OrderedDict(query)
total = sum(v for k, v in data.items())
coll_info[collection] = {
'total': total,
'data': data,
}
# Main package query
query = queries.packages(db)
total_pkg_count = query.count()
active = query.filter(tables.Package.status == 'in-progress')
active = queries.order_by_weight(db, active)
active = queries.order_by_name(db, active)
active = active.options(subqueryload('collection_packages'))
active = active.options(subqueryload('collection_packages.links'))
py3_only = query.filter(tables.Package.status == 'py3-only')
py3_only = queries.order_by_name(db, py3_only)
released = query.filter(tables.Package.status == 'released')
released = queries.order_by_name(db, released)
dropped = query.filter(tables.Package.status == 'dropped')
dropped = queries.order_by_name(db, dropped)
mispackaged = query.filter(tables.Package.status == 'mispackaged')
mispackaged = mispackaged.options(subqueryload('collection_packages'))
mispackaged = mispackaged.options(subqueryload('collection_packages.tracking_bugs'))
mispackaged = mispackaged.join(tables.CollectionPackage)
mispackaged = mispackaged.outerjoin(
tables.Link,
and_(tables.Link.type == 'bug',
tables.Link.collection_package_id == tables.CollectionPackage.id))
mispackaged = mispackaged.order_by(func.ifnull(tables.Link.last_update, '9999'))
mispackaged = queries.order_by_name(db, mispackaged)
blocked = query.filter(tables.Package.status == 'blocked')
blocked = blocked.options(subqueryload('requirements'))
blocked = queries.order_by_name(db, blocked)
ready = query.filter(tables.Package.status == 'idle')
ready = ready.options(subqueryload('requirers'))
ready = queries.order_by_name(db, ready)
# Naming policy tracking.
naming_progress, _ = get_naming_policy_progress(db)
active = list(active)
py3_only = list(py3_only)
released = list(released)
ready = list(ready)
blocked = list(blocked)
mispackaged = list(mispackaged)
dropped = list(dropped)
random_mispackaged = random.choice(mispackaged)
# Check we account for all the packages
sum_by_status = sum(len(x) for x in (active, released, py3_only, ready,
blocked, mispackaged, dropped))
assert sum_by_status == total_pkg_count
the_score = (len(py3_only) + len(released) + len(dropped)) / total_pkg_count
# Nonbolocking set query
query = db.query(tables.Package)
query = query.outerjoin(tables.Package.collection_packages)
query = query.filter(tables.CollectionPackage.nonblocking)
nonblocking = set(query)
# Group query
query = db.query(tables.Group)
query = query.join(tables.Group.packages)
query = query.join(tables.Package.status_obj)
query = query.group_by(tables.Group.ident)
query = query.group_by(tables.Package.status)
query = query.order_by(tables.Status.order)
query = query.order_by(tables.Group.name)
query = query.add_columns(tables.Package.status,
func.count(tables.Package.name))
groups = get_groups(db, query.filter(~tables.Group.hidden))
hidden_groups = get_groups(db, query.filter(tables.Group.hidden))
# Statuses with no. of packages
statuses = OrderedDict(
db.query(tables.Status, func.count(tables.Package.name))
.outerjoin(tables.Status.packages)
.group_by(tables.Status.ident)
.order_by(tables.Status.order))
return render_template(
'index.html',
breadcrumbs=(
(url_for('hello'), 'Python 3 Porting Database'),
),
collections=collections,
coll_info=coll_info,
statuses=statuses,
priorities=list(db.query(tables.Priority).order_by(tables.Priority.order)),
total_pkg_count=total_pkg_count,
status_summary=get_status_summary(db),
active_packages=active,
ready_packages=ready,
blocked_packages=blocked,
py3_only_packages=py3_only,
released_packages=released,
dropped_packages=dropped,
mispackaged_packages=mispackaged,
random_mispackaged=random_mispackaged,
groups=groups,
hidden_groups=hidden_groups,
nonblocking=nonblocking,
the_score=the_score,
naming_progress=naming_progress,
)
def get_groups(db, query):
groups = OrderedDict()
for group, status_ident, count in query:
status = db.query(tables.Status).get(status_ident)
pd = groups.setdefault(group, OrderedDict())
pd[status] = pd.get(status, 0) + count
return groups
def jsonstats():
db = current_app.config['DB']()
query = queries.packages(db)
active = query.filter(tables.Package.status == 'in-progress')
released = query.filter(tables.Package.status == 'released')
py3_only = query.filter(tables.Package.status == 'py3-only')
dropped = query.filter(tables.Package.status == 'dropped')
mispackaged = query.filter(tables.Package.status == 'mispackaged')
blocked = query.filter(tables.Package.status == 'blocked')
ready = query.filter(tables.Package.status == 'idle')
stats = {
'in-progress': active.count(),
'released': released.count(),
'py3-only': py3_only.count(),
'dropped': dropped.count(),
'mispackaged': mispackaged.count(),
'blocked': blocked.count(),
'idle': ready.count(),
}
return jsonify(**stats)
def get_status_summary(db, filter=None):
query = db.query(tables.Status)
query = query.join(tables.Package, tables.Status.packages)
query = query.add_column(func.count(tables.Package.name))
if filter:
query = filter(query)
query = query.group_by(tables.Package.status)
query = query.order_by(tables.Status.order)
return list(query)
def get_status_counts(pkgs):
counted = Counter(p.status_obj for p in pkgs)
ordered = OrderedDict(sorted(counted.items(),
key=lambda s_n: s_n[0].order))
return ordered
def package(pkg):
db = current_app.config['DB']()
collections = list(queries.collections(db))
query = db.query(tables.Package)
query = query.options(eagerload('status_obj'))
query = query.options(subqueryload('collection_packages'))
query = query.options(subqueryload('collection_packages.links'))
query = query.options(eagerload('collection_packages.status_obj'))
query = query.options(subqueryload('collection_packages.rpms'))
query = query.options(eagerload('collection_packages.rpms.py_dependencies'))
package = query.get(pkg)
if package is None:
abort(404)
query = queries.dependencies(db, package)
query = query.options(eagerload('status_obj'))
query = query.options(subqueryload('collection_packages'))
query = query.options(subqueryload('collection_packages.links'))
query = query.options(eagerload('collection_packages.status_obj'))
dependencies = list(query)
dependents = list(queries.dependents(db, package))
in_progress_deps = [p for p in dependencies if p.status == 'in-progress']
return render_template(
'package.html',
breadcrumbs=(
(url_for('hello'), 'Python 3 Porting Database'),
(url_for('package', pkg=pkg), pkg),
),
collections=collections,
pkg=package,
dependencies=dependencies,
dependents=dependents,
deptree=[(package, gen_deptree(dependencies))],
in_progress_deps=in_progress_deps,
len_dependencies=len(dependencies),
dependencies_status_counts=get_status_counts(dependencies),
)
def group(grp):
db = current_app.config['DB']()
collections = list(queries.collections(db))
group = db.query(tables.Group).get(grp)
if group is None:
abort(404)
query = db.query(tables.Package)
query = query.join(tables.Package.group_packages)
query = query.join(tables.GroupPackage.group)
query = query.join(tables.Package.status_obj)
query = query.filter(tables.Group.ident == grp)
query = query.order_by(-tables.Status.weight)
query = queries.order_by_name(db, query)
query = query.options(subqueryload('collection_packages'))
query = query.options(subqueryload('collection_packages.links'))
packages = list(query)
query = query.filter(tables.GroupPackage.is_seed)
seed_groups = query
return render_template(
'group.html',
breadcrumbs=(
(url_for('hello'), 'Python 3 Porting Database'),
(url_for('group', grp=grp), group.name),
),
collections=collections,
grp=group,
packages=packages,
len_packages=len(packages),
deptree=list(gen_deptree(seed_groups)),
status_counts=get_status_counts(packages),
)
def gen_deptree(base, *, seen=None):
seen = seen or set()
base = tuple(base)
for pkg in base:
if pkg in seen or pkg.status in {'idle'} | DONE_STATUSES:
yield pkg, []
else:
reqs = sorted(pkg.requirements,
key=lambda p: (-p.status_obj.weight, p.name))
yield pkg, gen_deptree(reqs, seen=seen|{pkg})
seen.add(pkg)
def markdown_filter(text):
return Markup(markdown.markdown(text))
def format_rpm_name(text):
name, version, release = text.rsplit('-', 2)
return Markup('<span class="rpm-name">{}</span>-{}-{}'.format(
name, version, release))
def format_time_ago(date):
"""Displays roughly how long ago the date was in a human readable format"""
now = datetime.datetime.utcnow()
diff = now - date
# Years
if diff.days >= 365:
if diff.days >= 2 * 365:
return "{} years ago".format(math.floor(diff.days / 365))
else:
return "a year ago"
# Months
elif diff.days >= 31:
if diff.days >= 2 * 30:
return "{} months ago".format(math.floor(diff.days / 30))
else:
return "a month ago"
# Weeks
elif diff.days >= 7:
if diff.days >= 2 * 7:
return "{} weeks ago".format(math.floor(diff.days / 7))
else:
return "a week ago"
# Days
elif diff.days >= 2:
return "{} days ago".format(diff.days)
elif diff.days == 1:
return "yesterday"
else:
return "today"
def graph(grp=None, pkg=None):
return render_template(
'graph.html',
breadcrumbs=(
(url_for('hello'), 'Python 3 Porting Database'),
(url_for('graph'), 'Graph'),
),
grp=grp,
pkg=pkg,
)
def graph_grp(grp):
return graph(grp=grp)
def graph_pkg(pkg):
return graph(pkg=pkg)
def graph_json(grp=None, pkg=None):
db = current_app.config['DB']()
if pkg is None:
db = current_app.config['DB']()
query = queries.packages(db)
query = query.filter(~tables.Package.status.in_(DONE_STATUSES))
if grp:
query = query.join(tables.GroupPackage)
query = query.filter(tables.GroupPackage.group_ident == grp)
query = query.options(joinedload(tables.Package.requirers))
packages = list(query)
else:
query = db.query(tables.Package)
root_package = query.get(pkg)
if root_package is None:
abort(404)
todo = {root_package}
requirements = set()
while todo:
package = todo.pop()
if package not in requirements:
requirements.add(package)
todo.update(p for p in package.requirements
if p.status not in DONE_STATUSES)
todo = {root_package}
requirers = set()
while todo:
package = todo.pop()
if package not in requirers:
requirers.add(package)
todo.update(p for p in package.requirers
if p.status not in DONE_STATUSES)
packages = list(requirements | requirers | {root_package})
package_names = {p.name for p in packages}
query = db.query(tables.Dependency)
linked_pairs = {(d.requirer_name, d.requirement_name)
for d in query
if d.requirer_name in package_names
and d.requirement_name in package_names
and not d.requirement.nonblocking}
linked_names = (set(p[0] for p in linked_pairs) |
set(p[1] for p in linked_pairs))
if pkg:
linked_names.add(pkg)
not_included = [p for p in packages if p.name not in linked_names]
nodes = [{'name': p.name,
'status': p.status,
'color': graph_color(p),
'status_color': '#' + p.status_obj.color,
'size': 3.5+math.log((p.loc_python or 1)+(p.loc_capi or 1), 50),
'num_requirers': len(p.pending_requirers),
'num_requirements': len(p.pending_requirements),
}
for p in packages
if p.name in linked_names and p.name in package_names]
names = [n['name'] for n in nodes]
links = [{"source": names.index(d.requirer_name),
"target": names.index(d.requirement_name),
}
for d in query
if d.requirer_name in names and d.requirement_name in names
and not d.requirement.nonblocking]
nodes_in_links = (set(l['source'] for l in links) |
set(l['target'] for l in links))
nodes = [n for i, n in enumerate(nodes) if i in nodes_in_links]
return jsonify(nodes=nodes, links=links)
def graph_json_grp(grp):
return graph_json(grp=grp)
def graph_json_pkg(pkg):
return graph_json(pkg=pkg)
def graph_color(package):
def component_color(c):
c /= 255
c = c / 2
c = c ** 0.2
c = c ** (1.1 ** len(package.pending_requirers))
c *= 255
return '{0:02x}'.format(int(c))
sc = package.status_obj.color
return '#' + ''.join(component_color(int(sc[x:x+2], 16))
for x in (0, 2, 4))
def _piechart(status_summary, bg=None):
total_pkg_count = sum(c for s, c in status_summary)
resp = make_response(render_template(
'piechart.svg',
status_summary=status_summary,
total_pkg_count=total_pkg_count or 1,
sin=math.sin, cos=math.cos, tau=tau,
bg=bg,
))
resp.headers['Content-type'] = 'image/svg+xml'
return resp
def piechart_svg():
db = current_app.config['DB']()
return _piechart(get_status_summary(db))
def piechart_grp(grp):
db = current_app.config['DB']()
group = db.query(tables.Group).get(grp)
if group is None:
abort(404)
def filter(query):
query = query.join(tables.Package.group_packages)
query = query.join(tables.GroupPackage.group)
query = query.filter(tables.Group.ident == grp)
return query
return _piechart(get_status_summary(db, filter=filter))
def piechart_pkg(pkg):
db = current_app.config['DB']()
package = db.query(tables.Package).get(pkg)
if package is None:
abort(404)
return _piechart([], package.status_obj)
def howto():
db = current_app.config['DB']()
query = queries.packages(db)
# Count the blocked packages
blocked_query = query.filter(tables.Package.status == 'blocked')
blocked_len = blocked_query.count()
# Get all the idle packages
idle_query = query.filter(tables.Package.status == 'idle')
idle = list(idle_query)
idle_len = len(idle)
# Get all the mispackaged packages
mispackaged_query = query.filter(tables.Package.status == 'mispackaged')
mispackaged = list(mispackaged_query)
# Pick an idle package at random
random_idle = random.choice(idle)
# Pick a mispackaged package at random
random_mispackaged = random.choice(mispackaged)
# Status objects
query = db.query(tables.Status)
mispackaged_status = query.get('mispackaged')
released_status = query.get('released')
return render_template(
'howto.html',
breadcrumbs=(
(url_for('hello'), 'Python 3 Porting Database'),
(url_for('howto'), 'So you want to contribute?'),
),
idle_len=idle_len,
blocked_len=blocked_len,
mispackaged=mispackaged,
random_idle=random_idle,
random_mispackaged=random_mispackaged,
mispackaged_status=mispackaged_status,
released_status=released_status,
)
def history():
expand = request.args.get('expand', None)
if expand not in ('1', None):
abort(400) # Bad request
return render_template(
'history.html',
breadcrumbs=(
(url_for('hello'), 'Python 3 Porting Database'),
(url_for('history'), 'History'),
),
expand=bool(expand),
)
def history_csv():
db = current_app.config['DB']()
query = db.query(tables.HistoryEntry)
query = query.order_by(tables.HistoryEntry.date)
sio = io.StringIO()
w = csv.DictWriter(sio, ['commit', 'date', 'status', 'num_packages'])
w.writeheader()
for row in query:
w.writerow({
'commit': row.commit,
'date': row.date,
'status': row.status,
'num_packages': row.num_packages,
})
return sio.getvalue()
def group_by_loc(grp):
db = current_app.config['DB']()
group = db.query(tables.Group).get(grp)
if group is None:
abort(404)
query = queries.packages(db)
query = query.join(tables.Package.group_packages)
query = query.filter(tables.GroupPackage.group_ident == grp)
extra_breadcrumbs=(
(url_for('group_by_loc', grp=grp), group.name),
)
return by_loc(query=query, extra_breadcrumbs=extra_breadcrumbs,
extra_args={'grp': group})
def by_loc(query=None, extra_breadcrumbs=(), extra_args=None):
db = current_app.config['DB']()
sort_key = request.args.get('sort', None)
sort_reverse = request.args.get('reverse', None)
print(sort_key)
print(sort_reverse)
if sort_reverse is None:
def descending(p):
return p
def ascending(p):
return p.desc()
elif sort_reverse == '1':
def descending(p):
return p.desc()
def ascending(p):
return p
else:
abort(400) # Bad request
if query is None:
query = queries.packages(db)
query = query.filter(tables.Package.status.in_(('idle', 'in-progress', 'blocked')))
saved = query
query = query.filter(tables.Package.loc_total)
if sort_key == 'name':
query = query.order_by(ascending(func.lower(tables.Package.name)))
elif sort_key == 'loc':
query = query.order_by(descending(tables.Package.loc_total))
elif sort_key == 'python':
query = query.order_by(descending(tables.Package.loc_python))
elif sort_key == 'capi':
query = query.order_by(descending(tables.Package.loc_capi))
elif sort_key == 'py-percent':
query = query.order_by(descending((0.1+tables.Package.loc_python)/tables.Package.loc_total))
elif sort_key == 'capi-percent':
query = query.order_by(descending((0.1+tables.Package.loc_capi)/tables.Package.loc_total))
elif sort_key == 'py-small':
query = query.order_by(ascending(
tables.Package.loc_total - tables.Package.loc_python/1.5))
elif sort_key == 'capi-small':
query = query.order_by(descending(tables.Package.loc_capi>0))
query = query.order_by(ascending(
tables.Package.loc_total -
tables.Package.loc_capi/1.5 +
tables.Package.loc_python/9.9))
elif sort_key == 'py-big':
query = query.order_by(descending(
tables.Package.loc_python * tables.Package.loc_python /
(1.0+tables.Package.loc_total-tables.Package.loc_python)))
elif sort_key == 'capi-big':
query = query.order_by(descending(
tables.Package.loc_capi * tables.Package.loc_capi /
(1.0+tables.Package.loc_total-tables.Package.loc_capi)))
elif sort_key == 'no-py':
query = query.order_by(ascending(
(tables.Package.loc_python + tables.Package.loc_capi + 0.0) /
tables.Package.loc_total))
elif sort_key is None:
query = query.order_by(descending(tables.Package.loc_python +
tables.Package.loc_capi))
else:
abort(400) # Bad request
query = query.order_by(tables.Package.loc_total)
query = query.order_by(func.lower(tables.Package.name))
packages = list(query)
by_name = saved.order_by(func.lower(tables.Package.name))
query = by_name.filter(tables.Package.loc_total == None)
missing_packages = list(query)
query = by_name.filter(tables.Package.loc_total == 0)
no_code_packages = list(query)
if extra_args is None:
extra_args = {}
return render_template(
'by_loc.html',
breadcrumbs=(
(url_for('hello'), 'Python 3 Porting Database'),
(url_for('by_loc'), 'Packages by Code Stats'),
) + extra_breadcrumbs,
packages=packages,
sort_key=sort_key,
sort_reverse=sort_reverse,
missing_packages=missing_packages,
no_code_packages=no_code_packages,
grp=extra_args.get('grp')
)
def mispackaged():
# Parameters
requested = request.args.get('requested', None)
if requested not in ('1', None):
abort(400) # Bad request
db = current_app.config['DB']()
query = db.query(tables.Package)
query = query.filter(tables.Package.status == 'mispackaged')
query = query.join(tables.CollectionPackage)
query = query.filter(
tables.CollectionPackage.collection_ident == 'fedora')
# Do an outer join with Links, but ONLY with rows of type 'bug' so that if
# a package has only e.g. a 'repo' link, it won't affect the results.
query = query.outerjoin(tables.Link, and_(tables.Link.type == 'bug',
tables.Link.collection_package_id == tables.CollectionPackage.id))
# If appropriate: Filter only to packages where maintainer requested a patch
if requested:
query = query.join(tables.TrackingBug)
query = query.filter(tables.TrackingBug.url ==
"https://bugzilla.redhat.com/show_bug.cgi?id=1333765")
# Order by the last_update field, and if it's null, substitute it with the
# year 9999 so it's very last. (Note: sqlite does not support NULLS LAST)
query = query.order_by(func.ifnull(tables.Link.last_update, '9999'))
# Speedup: Prevent starting subqueries for each package.
query = query.options(subqueryload('collection_packages'))
query = query.options(subqueryload('collection_packages.links'))
query = query.options(subqueryload('collection_packages.tracking_bugs'))
mispackaged = list(query)
# Render the page, pass the data
return render_template(
'mispackaged.html',
breadcrumbs=(
(url_for('hello'), 'Python 3 Porting Database'),
(url_for('mispackaged', requested=1), 'Mispackaged'),
),
requested=bool(requested),
mispackaged=mispackaged,
)
def namingpolicy():
"""Naming policy tracking.
"""
db = current_app.config['DB']()
misnamed_package_names = (
db.query(tables.Package.name)
.join(tables.CollectionPackage)
.filter(tables.CollectionPackage.collection_ident == 'fedora',
tables.CollectionPackage.is_misnamed.is_(True)))
progress, data = get_naming_policy_progress(db)
total = sum(dict(progress).values())
# Unversioned requirers within non Python Packages.
require_misnamed_all = (
db.query(tables.Dependency.requirer_name)
.filter(tables.Dependency.unversioned.is_(True))
.outerjoin(tables.Dependency.requirer)
.filter(tables.Package.name.is_(None)).distinct())
blocked = (
require_misnamed_all
.filter(tables.Dependency.requirement_name.in_(misnamed_package_names)))
require_misnamed = sorted(set(require_misnamed_all) - set(blocked))
naming_data = dict(db.query(tables.NamingData.ident, tables.NamingData))
data_outside_portingdb = (
(naming_data['require-misnamed'], len(require_misnamed), require_misnamed),
(naming_data['require-blocked'], blocked.count(), blocked))
return render_template(
'namingpolicy.html',
breadcrumbs=(
(url_for('hello'), 'Python 3 Porting Database'),
(url_for('namingpolicy'), 'Naming Policy'),
),
total_packages=total,
progress=progress,
data=data,
data_outside_portingdb=data_outside_portingdb,
)
def get_naming_policy_progress(db):
misnamed_package_names = (
db.query(tables.Package.name)
.join(tables.CollectionPackage)
.filter(tables.CollectionPackage.collection_ident == 'fedora',
tables.CollectionPackage.is_misnamed.is_(True)))
all_packages = db.query(tables.Package).order_by(tables.Package.name)
misnamed_packages = all_packages.filter(
tables.Package.name.in_(misnamed_package_names))
require_misnamed_all = (
all_packages
.filter(tables.Package.requirement_dependencies.any(
tables.Dependency.unversioned.is_(True)),
~tables.Package.name.in_(misnamed_package_names)))
requires_misnamed = tables.Package.requirement_dependencies.any(
tables.Dependency.requirement_name.in_(misnamed_package_names))
blocked = require_misnamed_all.filter(requires_misnamed)
require_misnamed = require_misnamed_all.filter(~requires_misnamed)
# Naming policy in numbers.
total_packages = all_packages.count()
total_misnamed = misnamed_package_names.count()
total_blocked = blocked.count()
total_require_misnamed = require_misnamed.count()
# Misnamed packages progress bar info.
naming_data = dict(db.query(tables.NamingData.ident, tables.NamingData))
progress = (
(naming_data['name-correct'], total_packages - (
total_misnamed + total_blocked + total_require_misnamed)),
(naming_data['name-misnamed'], total_misnamed),
(naming_data['require-misnamed'], total_require_misnamed),
(naming_data['require-blocked'], total_blocked))
data = list(zip(progress[1:], (misnamed_packages, require_misnamed, blocked)))
return progress, data
def piechart_namingpolicy():
db = current_app.config['DB']()
summary, _ = get_naming_policy_progress(db)
return _piechart(summary)
def history_naming():
return render_template(
'history-naming.html',
breadcrumbs=(
(url_for('hello'), 'Python 3 Porting Database'),
(url_for('namingpolicy'), 'Naming Policy'),
(url_for('history'), 'History'),
)
)
def history_naming_csv():
db = current_app.config['DB']()
query = db.query(tables.HistoryNamingEntry)
query = query.order_by(tables.HistoryNamingEntry.date)
sio = io.StringIO()
writer = csv.DictWriter(sio, ['commit', 'date', 'status', 'num_packages'])
writer.writeheader()
for row in query:
writer.writerow({
'commit': row.commit,
'date': row.date,
'status': row.status,
'num_packages': row.num_packages,
})
return sio.getvalue()
def format_quantity(num):
for prefix in ' KMGT':
if num > 1000:
num /= 1000
else:
break
if num > 100:
num = round(num)
elif num > 10:
num = round(num, 1)
else:
num = round(num, 2)
if abs(num - int(num)) < 0.01:
num = int(num)
return str(num) + prefix
def format_percent(num):
num *= 100
if num > 10:
num = round(num)
if num > 1:
num = round(num, 1)
if abs(num - int(num)) < 0.01:
num = int(num)
else:
for digits in range(1, 3):
rounded = round(num, digits)
if rounded != 0:
break
num = rounded
return str(num) + '%'
def create_app(db_url, cache_config=None):
if cache_config is None:
cache_config = {'backend': 'dogpile.cache.null'}
cache = make_region().configure(**cache_config)
app = Flask(__name__)
app.config['DB'] = sessionmaker(bind=create_engine(db_url))
db = app.config['DB']()
app.config['Cache'] = cache
app.config['CONFIG'] = {c.key: json.loads(c.value)
for c in db.query(tables.Config)}
app.jinja_env.undefined = StrictUndefined
app.jinja_env.filters['md'] = markdown_filter
app.jinja_env.filters['format_rpm_name'] = format_rpm_name
app.jinja_env.filters['format_quantity'] = format_quantity
app.jinja_env.filters['format_percent'] = format_percent
app.jinja_env.filters['format_time_ago'] = format_time_ago
@app.context_processor
def add_template_globals():
return {
'cache_tag': uuid.uuid4(),
'len': len,
'log': math.log,
'config': app.config['CONFIG'],
}
def _add_route(url, func, get_keys=()):
@functools.wraps(func)
def decorated(*args, **kwargs):
creator = functools.partial(func, *args, **kwargs)
key_dict = {'url': url,
'args': args,
'kwargs': kwargs,
'get': {k: request.args.get(k) for k in get_keys}}
key = json.dumps(key_dict, sort_keys=True)
print(key)
return cache.get_or_create(key, creator)
app.route(url)(decorated)
_add_route("/", hello)
_add_route("/stats.json", jsonstats)
_add_route("/pkg/<pkg>/", package)
_add_route("/grp/<grp>/", group)
_add_route("/graph/", graph)
_add_route("/graph/portingdb.json", graph_json)
_add_route("/piechart.svg", piechart_svg)
_add_route("/grp/<grp>/piechart.svg", piechart_grp)
_add_route("/pkg/<pkg>/piechart.svg", piechart_pkg)
_add_route("/grp/<grp>/graph/", graph_grp)
_add_route("/grp/<grp>/graph/data.json", graph_json_grp)
_add_route("/pkg/<pkg>/graph/", graph_pkg)
_add_route("/pkg/<pkg>/graph/data.json", graph_json_pkg)
_add_route("/by_loc/", by_loc, get_keys={'sort', 'reverse'})
_add_route("/by_loc/grp/<grp>/", group_by_loc, get_keys={'sort', 'reverse'})
_add_route("/mispackaged/", mispackaged, get_keys={'requested'})
_add_route("/namingpolicy/", namingpolicy)
_add_route("/namingpolicy/piechart.svg", piechart_namingpolicy)
_add_route("/namingpolicy/history/", history_naming)
_add_route("/namingpolicy/history/data.csv", history_naming_csv)
_add_route("/history/", history, get_keys={'expand'})
_add_route("/history/data.csv", history_csv)
_add_route("/howto/", howto)
return app
def main(db_url, cache_config=None, debug=False, port=5000):
app = create_app(db_url, cache_config=cache_config)
app.run(debug=debug, port=port)
| |
""" test indexing with ix """
import pytest
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_scalar
from pandas.compat import lrange
from pandas import Series, DataFrame, option_context, MultiIndex
from pandas.util import testing as tm
from pandas.errors import PerformanceWarning
class TestIX(object):
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
assert result == expected
else:
assert expected.equals(result)
# failure cases for .loc, but these work for .ix
df = DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
pytest.raises(TypeError, lambda: df.loc[key])
df = DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
pytest.raises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: np.nan,
4: np.nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.loc[:, 'B'].copy()
df.loc[:, 'B'] = df.loc[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.loc[indexer, 'y'] = v
assert expected.loc[indexer, 'y'] == v
df.loc[df.x % 2 == 0, 'y'] = df.loc[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.loc[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.loc[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].loc[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
with catch_warnings(record=True):
assert df.ix['e', 8] == 2
assert df.loc['e', 8] == 2
with catch_warnings(record=True):
df.ix['e', 8] = 42
assert df.ix['e', 8] == 42
assert df.loc['e', 8] == 42
df.loc['e', 8] = 45
with catch_warnings(record=True):
assert df.ix['e', 8] == 45
assert df.loc['e', 8] == 45
def test_ix_slicing_strings(self):
# see gh-3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
with catch_warnings(record=True):
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_ix_setitem_out_of_bounds_axis_0(self):
df = DataFrame(
np.random.randn(2, 5), index=["row%s" % i for i in range(2)],
columns=["col%s" % i for i in range(5)])
with catch_warnings(record=True):
pytest.raises(ValueError, df.ix.__setitem__, (2, 0), 100)
def test_ix_setitem_out_of_bounds_axis_1(self):
df = DataFrame(
np.random.randn(5, 2), index=["row%s" % i for i in range(5)],
columns=["col%s" % i for i in range(2)])
with catch_warnings(record=True):
pytest.raises(ValueError, df.ix.__setitem__, (0, 2), 100)
def test_ix_empty_list_indexer_is_ok(self):
with catch_warnings(record=True):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.ix[:, []], df.iloc[:, :0],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[], :], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_ix_duplicate_returns_series(self):
df = DataFrame(np.random.randn(3, 3), index=[0.1, 0.2, 0.2],
columns=list('abc'))
with catch_warnings(record=True):
r = df.ix[0.2, 'a']
e = df.loc[0.2, 'a']
tm.assert_series_equal(r, e)
| |
# Copyright 2017 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# author: Saurabh Jain <saurabh@scalyr.com>
from __future__ import unicode_literals
from __future__ import absolute_import
from io import open
__author__ = "saurabh@scalyr.com"
import os
import platform
try:
import psutil
except ImportError:
psutil = None
from scalyr_agent.compat import custom_defaultdict as defaultdict
from scalyr_agent.builtin_monitors.linux_process_metrics import (
ProcessMonitor,
Metric,
ProcessList,
StatReader,
)
from scalyr_agent.test_base import ScalyrTestCase
import scalyr_agent.scalyr_logging as scalyr_logging
from scalyr_agent.scalyr_monitor import MonitorInformation
from scalyr_agent.test_base import skipIf
from six.moves import range
BASE_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
FIXTURES_DIR = os.path.join(BASE_DIR, "fixtures")
class MetricInformationTestCase(ScalyrTestCase):
def test_define_metric_metric_units(self):
# Verify all the metrics have the correct type defined
monitor_info = MonitorInformation.get_monitor_info(
"scalyr_agent.builtin_monitors.linux_process_metrics"
)
metrics = monitor_info.metrics
self.assertEqual(len(metrics), 16)
disk_requests_metrics = [
m for m in metrics if m.metric_name == "app.disk.requests"
]
self.assertEqual(len(disk_requests_metrics), 2)
self.assertEqual(disk_requests_metrics[0].unit, None)
self.assertEqual(disk_requests_metrics[1].unit, None)
class TestMetricClass(ScalyrTestCase):
def test_metric_as_object_key(self):
"""
This protects against a bug introduced in v2.0.47 whereby the Metric object didn't implement __hash__ and
__eq__ dunder methods. Metric is repeatedly used as a key during linux process metrics generation.
Without those dunder methods, the key/val is never replaced and the dictionary keeps growing.
(Reference AGENT-142)
"""
dd = {}
for x in range(1000):
dd[Metric("name1a", "name1b")] = x
dd[Metric("name2a", "name2b")] = x
dd[Metric("name3a", "name3b")] = x
dd[Metric("name4a", "name4b")] = x
dd[Metric("name5a", "name5b")] = x
self.assertEqual(len(dd), 5)
self.assertEqual(dd[Metric("name1a", "name1b")], 999)
self.assertEqual(dd[Metric("name2a", "name2b")], 999)
self.assertEqual(dd[Metric("name3a", "name3b")], 999)
self.assertEqual(dd[Metric("name4a", "name4b")], 999)
self.assertEqual(dd[Metric("name5a", "name5b")], 999)
dd = {}
for x in range(1000):
dd[Metric("name1a", "name1b")] = 1
dd[Metric("name2a", "name2b")] = 1
dd[Metric("name3a", "name3b")] = 1
dd[Metric("name4a", "name4b")] = 1
dd[Metric("name5a", "name5b")] = 1
self.assertEqual(len(dd), 5)
self.assertEqual(dd[Metric("name1a", "name1b")], 1)
self.assertEqual(dd[Metric("name2a", "name2b")], 1)
self.assertEqual(dd[Metric("name3a", "name3b")], 1)
self.assertEqual(dd[Metric("name4a", "name4b")], 1)
self.assertEqual(dd[Metric("name5a", "name5b")], 1)
def test_basic_namedtuple_access(self):
m = Metric("abc", 123)
# Ensure name and type fields are present
self.assertEquals(m.name, "abc")
self.assertEquals(m.type, 123)
# Non-existent
self.assertRaises(AttributeError, lambda: m.asdf) # pylint: disable=no-member
# Ensure cannot mutate
def mutate():
m.name = "mutated value"
self.assertRaises(AttributeError, lambda: mutate())
class TestProcessMonitorInitialize(ScalyrTestCase):
def setUp(self):
super(TestProcessMonitorInitialize, self).setUp()
self.config_commandline = {
"module": "scalyr_agent.builtin_monitors.linux_process_metrics",
"id": "myapp",
"commandline": ".foo.*",
}
def test_initialize_monitor(self):
monitor = ProcessMonitor(
self.config_commandline, scalyr_logging.getLogger("syslog_monitor[test]")
)
self.assertEqual(monitor._ProcessMonitor__metrics_history, defaultdict(dict))
self.assertEqual(monitor._ProcessMonitor__aggregated_metrics, {})
class TestProcessMonitorRecordMetrics(ScalyrTestCase):
"""
Tests the record_metrics method of ProcessMonitor class
"""
def setUp(self):
super(TestProcessMonitorRecordMetrics, self).setUp()
self.config_commandline = {
"module": "scalyr_agent.builtin_monitors.linux_process_metrics",
"id": "myapp",
"commandline": ".foo.*",
}
self.monitor = ProcessMonitor(
self.config_commandline, scalyr_logging.getLogger("syslog_monitor[test]")
)
def test_empty_metrics(self):
self.monitor.record_metrics(666, {})
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, defaultdict(dict)
)
def test_single_process_single_epoch(self):
metric = Metric("fakemetric", "faketype")
metrics = {metric: 21}
self.monitor.record_metrics(555, metrics)
expected_history = {555: {metric: [21]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(self.monitor._ProcessMonitor__aggregated_metrics, {})
def test_single_process_multiple_epochs(self):
metric = Metric("fakemetric", "faketype")
# epoch 1
self.monitor.record_metrics(777, {metric: 1.2})
expected_history = {777: {metric: [1.2]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(self.monitor._ProcessMonitor__aggregated_metrics, {})
# epoch 2
self.monitor.record_metrics(777, {metric: 1.9})
expected_history = {777: {metric: [1.2, 1.9]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(self.monitor._ProcessMonitor__aggregated_metrics, {})
def test_multi_process_single_epoch(self):
metric1 = Metric("fakemetric1", "faketype1")
metric2 = Metric("fakemetric2", "faketype2")
self.monitor.record_metrics(111, {metric1: 1.2})
self.monitor.record_metrics(222, {metric2: 2.87})
expected_history = {111: {metric1: [1.2]}, 222: {metric2: [2.87]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(self.monitor._ProcessMonitor__aggregated_metrics, {})
def test_multi_process_multi_epochs(self):
metric1 = Metric("fakemetric1", "faketype1")
metric2 = Metric("fakemetric2", "faketype2")
# epoch 1
self.monitor.record_metrics(111, {metric1: 1.2})
self.monitor.record_metrics(222, {metric2: 2.87})
expected_history = {111: {metric1: [1.2]}, 222: {metric2: [2.87]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(self.monitor._ProcessMonitor__aggregated_metrics, {})
# epoch 2
self.monitor.record_metrics(111, {metric1: 1.6})
self.monitor.record_metrics(222, {metric2: 2.92})
expected_history = {111: {metric1: [1.2, 1.6]}, 222: {metric2: [2.87, 2.92]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(self.monitor._ProcessMonitor__aggregated_metrics, {})
class TestProcessListUtility(ScalyrTestCase):
@skipIf(
not psutil,
"Skipping tests because psutil is not available (likely running under Python 2.6 on Circle CI)",
)
def setUp(self):
super(TestProcessListUtility, self).setUp()
self.ps = ProcessList()
@skipIf(
not psutil,
"Skipping tests because psutil is not available (likely running under Python 2.6 on Circle CI)",
)
def test_no_process(self):
# override
self.ps.parent_to_children_map = defaultdict(list)
self.ps.processes = []
self.assertEqual(self.ps.get_child_processes("bad pid"), [])
self.assertEqual(self.ps.get_matches_commandline(".*"), [])
self.assertEqual(self.ps.get_matches_commandline_with_children(".*"), [])
@skipIf(
not psutil,
"Skipping tests because psutil is not available (likely running under Python 2.6 on Circle CI)",
)
def test_single_process_no_children(self):
# override
# process id 0 is basically no process. PID 1 is the main process of a terminal
self.ps.processes = [
{"pid": 2, "ppid": 1, "cmd": "python hello.py"},
{"pid": 1, "ppid": 0, "cmd": "/bin/bash"},
]
self.ps.parent_to_children_map = defaultdict(list)
self.ps.parent_to_children_map[1] = [2]
self.ps.parent_to_children_map[0] = [1]
self.assertEqual(self.ps.get_child_processes("bad pid"), [])
self.assertEqual(self.ps.get_child_processes(1), [2])
# positive match
self.assertEqual(set(self.ps.get_matches_commandline(".*")), set([1, 2]))
self.assertEqual(self.ps.get_matches_commandline(".*bash.*"), [1])
self.assertEqual(self.ps.get_matches_commandline(".*py.*"), [2])
self.assertEqual(
set(self.ps.get_matches_commandline_with_children(".*")), set([1, 2])
)
@skipIf(
not psutil,
"Skipping tests because psutil is not available (likely running under Python 2.6 on Circle CI)",
)
def test_single_process_with_children(self):
# override
# process id 0 is basically no process. PID 1 is the main process of a terminal
self.ps.processes = [
{"pid": 2, "ppid": 1, "cmd": "python hello.py"},
{"pid": 3, "ppid": 2, "cmd": "sleep 2"},
{"pid": 1, "ppid": 0, "cmd": "/bin/bash"},
]
self.ps.parent_to_children_map = defaultdict(list)
self.ps.parent_to_children_map[1] = [2]
self.ps.parent_to_children_map[2] = [3]
self.ps.parent_to_children_map[0] = [1]
self.assertEqual(self.ps.get_child_processes("bad pid"), [])
self.assertEqual(set(self.ps.get_child_processes(1)), set([2, 3]))
self.assertEqual(self.ps.get_child_processes(2), [3])
# positive match
self.assertEqual(set(self.ps.get_matches_commandline(".*")), set([1, 2, 3]))
self.assertEqual(self.ps.get_matches_commandline(".*bash.*"), [1])
self.assertEqual(self.ps.get_matches_commandline(".*py.*"), [2])
self.assertEqual(
set(self.ps.get_matches_commandline_with_children(".*")), set([1, 2, 3])
)
@skipIf(
not psutil,
"Skipping tests because psutil is not available (likely running under Python 2.6 on Circle CI)",
)
def test_multiple_processes_with_children(self):
# override
# process id 0 is basically no process. PID 1 is the main process of a terminal
self.ps.processes = [
{"pid": 2, "ppid": 1, "cmd": "python hello.py"},
{"pid": 3, "ppid": 2, "cmd": "sleep 2"},
{"pid": 1, "ppid": 0, "cmd": "/bin/bash"},
{"pid": 4, "ppid": 0, "cmd": "sleep 10000"},
]
self.ps.parent_to_children_map = defaultdict(list)
self.ps.parent_to_children_map[1] = [2]
self.ps.parent_to_children_map[2] = [3]
self.ps.parent_to_children_map[0] = [1, 4]
self.assertEqual(self.ps.get_child_processes("bad pid"), [])
self.assertEqual(set(self.ps.get_child_processes(1)), set([2, 3]))
self.assertEqual(self.ps.get_child_processes(2), [3])
# positive match
self.assertEqual(set(self.ps.get_matches_commandline(".*")), set([1, 2, 3, 4]))
self.assertEqual(self.ps.get_matches_commandline(".*bash.*"), [1])
self.assertEqual(self.ps.get_matches_commandline(".*py.*"), [2])
self.assertEqual(
set(self.ps.get_matches_commandline_with_children(".*")), set([1, 2, 3, 4])
)
class TestProcessMonitorRunningTotal(ScalyrTestCase):
"""
Tests the calculations of the running totals of the metrics.
"""
def setUp(self):
super(TestProcessMonitorRunningTotal, self).setUp()
self.config_commandline = {
"module": "scalyr_agent.builtin_monitors.linux_process_metrics",
"id": "myapp",
"commandline": ".foo.*",
}
self.monitor = ProcessMonitor(
self.config_commandline, scalyr_logging.getLogger("syslog_monitor[test]")
)
def test_no_history(self):
self.assertEqual(self.monitor._ProcessMonitor__aggregated_metrics, {})
def test_single_process_single_epoch(self):
metric = Metric("fakemetric", "faketype")
metrics = {metric: 21}
self.monitor.record_metrics(555, metrics)
self.monitor._ProcessMonitor__pids = [555]
self.monitor._calculate_aggregated_metrics()
expected_history = {555: {metric: [21]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(self.monitor._ProcessMonitor__aggregated_metrics, {metric: 21})
def test_single_process_multiple_epoch(self):
metric = Metric("fakemetric", "faketype")
# epoch 1
metrics = {metric: 21}
self.monitor.record_metrics(555, metrics)
self.monitor._ProcessMonitor__pids = [555]
self.monitor._calculate_aggregated_metrics()
expected_history = {555: {metric: [21]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(self.monitor._ProcessMonitor__aggregated_metrics, {metric: 21})
# epoch 2
# before epoch 2, the reset is called for absolute metrics
self.monitor._reset_absolute_metrics()
metrics = {metric: 21.5}
self.monitor.record_metrics(555, metrics)
self.monitor._ProcessMonitor__pids = [555]
self.monitor._calculate_aggregated_metrics()
expected_history = {555: {metric: [21, 21.5]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(
self.monitor._ProcessMonitor__aggregated_metrics, {metric: 21.5}
)
def test_multiple_process_multiple_epochs(self):
metric1 = Metric("fakemetric1", "faketype1")
metric2 = Metric("fakemetric2", "faketype2")
# epoch 1
metrics1 = {metric1: 21}
metrics2 = {metric2: 100.0}
self.monitor.record_metrics(1, metrics1)
self.monitor.record_metrics(2, metrics2)
self.monitor._ProcessMonitor__pids = [1, 2]
self.monitor._calculate_aggregated_metrics()
expected_history = {1: {metric1: [21]}, 2: {metric2: [100.0]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(
self.monitor._ProcessMonitor__aggregated_metrics,
{metric1: 21, metric2: 100.0},
)
# epoch 2
# before epoch 2, the reset is called for absolute metrics
self.monitor._reset_absolute_metrics()
metrics1 = {metric1: 21.11}
metrics2 = {metric2: 100.11}
self.monitor.record_metrics(1, metrics1)
self.monitor.record_metrics(2, metrics2)
self.monitor._ProcessMonitor__pids = [1, 2]
self.monitor._calculate_aggregated_metrics()
expected_history = {1: {metric1: [21, 21.11]}, 2: {metric2: [100.0, 100.11]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(
self.monitor._ProcessMonitor__aggregated_metrics,
{metric1: 21.11, metric2: 100.11},
)
def test_multiple_process_multiple_epochs_cumulative_metrics(self):
metric1 = Metric("app.cpu", "system")
# epoch 1
metrics1 = {metric1: 20}
metrics2 = {metric1: 40}
self.monitor.record_metrics(1, metrics1)
self.monitor.record_metrics(2, metrics2)
self.monitor._ProcessMonitor__pids = [1, 2]
self.monitor._calculate_aggregated_metrics()
expected_history = {1: {metric1: [20]}, 2: {metric1: [40]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(self.monitor._ProcessMonitor__aggregated_metrics, {metric1: 0})
# epoch 2
# before epoch 2, the reset is called for absolute metrics
self.monitor._reset_absolute_metrics()
metrics1 = {metric1: 22}
metrics2 = {metric1: 44}
self.monitor.record_metrics(1, metrics1)
self.monitor.record_metrics(2, metrics2)
self.monitor._ProcessMonitor__pids = [1, 2]
self.monitor._calculate_aggregated_metrics()
expected_history = {1: {metric1: [20, 22]}, 2: {metric1: [40, 44]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(
self.monitor._ProcessMonitor__aggregated_metrics,
{metric1: (22 - 20) + (44 - 40)},
)
# epoch 3
self.monitor._reset_absolute_metrics()
metrics1 = {metric1: 25}
metrics2 = {metric1: 48}
self.monitor.record_metrics(1, metrics1)
self.monitor.record_metrics(2, metrics2)
self.monitor._ProcessMonitor__pids = [1, 2]
self.monitor._calculate_aggregated_metrics()
# we only keep the last 2 historical values
expected_history = {1: {metric1: [22, 25]}, 2: {metric1: [44, 48]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(
self.monitor._ProcessMonitor__aggregated_metrics,
{metric1: (22 - 20) + (44 - 40) + (25 - 22) + (48 - 44)},
)
def test_multiple_process_multiple_epochs_cumulative_metrics_one_process_death(
self,
):
"""
Same as test_multiple_process_multiple_epochs_cumulative_metrics
but one process dies after epoch 2
"""
metric1 = Metric("app.cpu", "system")
# epoch 1
metrics1 = {metric1: 21}
metrics2 = {metric1: 100.0}
self.monitor.record_metrics(1, metrics1)
self.monitor.record_metrics(2, metrics2)
self.monitor._ProcessMonitor__pids = [1, 2]
self.monitor._calculate_aggregated_metrics()
expected_history = {1: {metric1: [21]}, 2: {metric1: [100.0]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(self.monitor._ProcessMonitor__aggregated_metrics, {metric1: 0})
# epoch 2
# before epoch 2, the reset is called for absolute metrics
self.monitor._reset_absolute_metrics()
metrics1 = {metric1: 30.1}
metrics2 = {metric1: 100.2}
self.monitor.record_metrics(1, metrics1)
self.monitor.record_metrics(2, metrics2)
self.monitor._ProcessMonitor__pids = [1, 2]
self.monitor._calculate_aggregated_metrics()
expected_history = {1: {metric1: [21, 30.1]}, 2: {metric1: [100.0, 100.2]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(
self.monitor._ProcessMonitor__aggregated_metrics,
{metric1: (30.1 - 21) + (100.2 - 100.0)},
)
# epoch 3
self.monitor._reset_absolute_metrics()
metrics1 = {metric1: 26.0}
metrics2 = {metric1: 103}
self.monitor.record_metrics(1, metrics1)
self.monitor.record_metrics(2, metrics2)
# Process 1 dies.. boom
self.monitor._ProcessMonitor__pids = [2]
self.monitor._calculate_aggregated_metrics()
# we only keep the last 2 historical values
expected_history = {1: {metric1: [30.1, 26.0]}, 2: {metric1: [100.2, 103]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(
self.monitor._ProcessMonitor__aggregated_metrics,
{metric1: (30.1 - 21) + (100.2 - 100.0) + (103 - 100.2)},
)
def test_multiple_process_multiple_epochs_cumulative_metrics_all_process_death(
self,
):
"""
Same as test_multiple_process_multiple_epochs_cumulative_metrics_one_process_death
but all processes die after epoch 2
"""
metric1 = Metric("app.cpu", "system")
# epoch 1
metrics1 = {metric1: 20}
metrics2 = {metric1: 40}
self.monitor.record_metrics(1, metrics1)
self.monitor.record_metrics(2, metrics2)
self.monitor._ProcessMonitor__pids = [1, 2]
self.monitor._calculate_aggregated_metrics()
expected_history = {1: {metric1: [20]}, 2: {metric1: [40]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(self.monitor._ProcessMonitor__aggregated_metrics, {metric1: 0})
# epoch 2
# before epoch 2, the reset is called for absolute metrics
self.monitor._reset_absolute_metrics()
metrics1 = {metric1: 25}
metrics2 = {metric1: 46}
self.monitor.record_metrics(1, metrics1)
self.monitor.record_metrics(2, metrics2)
self.monitor._ProcessMonitor__pids = [1, 2]
self.monitor._calculate_aggregated_metrics()
expected_history = {1: {metric1: [20, 25]}, 2: {metric1: [40, 46]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(
self.monitor._ProcessMonitor__aggregated_metrics,
{metric1: (25 - 20) + (46 - 40)},
)
# epoch 3
self.monitor._reset_absolute_metrics()
metrics1 = {metric1: 23}
metrics2 = {metric1: 43}
self.monitor.record_metrics(1, metrics1)
self.monitor.record_metrics(2, metrics2)
# Process 1 and 2 die.. boom
# we should ensure the total running value for metric doesn't go down.
self.monitor._ProcessMonitor__pids = []
self.monitor._calculate_aggregated_metrics()
# we only keep the last 2 historical values
expected_history = {1: {metric1: [25, 23]}, 2: {metric1: [46, 43]}}
self.assertEqual(
self.monitor._ProcessMonitor__metrics_history, expected_history
)
self.assertEqual(
self.monitor._ProcessMonitor__aggregated_metrics,
{metric1: (25 - 20) + (46 - 40)},
)
class StatReaderTestCase(ScalyrTestCase):
@skipIf(platform.system() == "Windows", "Skipping Linux only tests on Windows")
def test_gather_sample(self):
stat_reader = StatReader(pid=1, monitor_id=1, logger=None)
stat_reader._StatReader__get_uptime_ms = lambda: 5 * 60 * 60 * 1000
stat_file = os.path.join(FIXTURES_DIR, "proc_1125_stat")
with open(stat_file, "r") as fp:
result = stat_reader.gather_sample(stat_file=fp)
self.assertEqual(len(result), 7)
self.assertEqual(result[Metric("app.cpu", "user")], 0)
self.assertEqual(result[Metric("app.cpu", "system")], 277)
self.assertEqual(result[Metric("app.uptime", None)], 17993090)
self.assertEqual(result[Metric("app.nice", None)], -20.0)
self.assertEqual(result[Metric("app.threads", None)], 1)
self.assertEqual(result[Metric("app.mem.majflt", None)], 0)
self.assertEqual(result[Metric("app.io.wait", None)], 0)
class TestAgentProcessMetrics(ScalyrTestCase):
def test_late_process_pid_setting(self):
self.config = {
"module": "scalyr_agent.builtin_monitors.linux_process_metrics",
"id": "myapp",
# the process ID is not known yet.
"pid": "$$TBD",
}
self.monitor = ProcessMonitor(
self.config, scalyr_logging.getLogger("syslog_monitor[test]")
)
# it can not select any process until the process id is set.
self.assertEqual(self.monitor._select_processes(), [])
self.assertEqual(self.monitor._select_processes(), [])
# set the process id.
self.monitor.set_pid(os.getpid())
# it has to select the current PID after it has been set.
self.assertEqual(self.monitor._select_processes(), [os.getpid()])
| |
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common functionalities shared between different iLO modules.
"""
import os
import shutil
import tempfile
from ironic_lib import utils as ironic_utils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
import six
import six.moves.urllib.parse as urlparse
from six.moves.urllib.parse import urljoin
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common.i18n import _LW
from ironic.common import images
from ironic.common import swift
from ironic.common import utils
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import deploy_utils
ilo_client = importutils.try_import('proliantutils.ilo.client')
ilo_error = importutils.try_import('proliantutils.exception')
STANDARD_LICENSE = 1
ESSENTIALS_LICENSE = 2
ADVANCED_LICENSE = 3
opts = [
cfg.IntOpt('client_timeout',
default=60,
help=_('Timeout (in seconds) for iLO operations')),
cfg.PortOpt('client_port',
default=443,
help=_('Port to be used for iLO operations')),
cfg.StrOpt('swift_ilo_container',
default='ironic_ilo_container',
help=_('The Swift iLO container to store data.')),
cfg.IntOpt('swift_object_expiry_timeout',
default=900,
help=_('Amount of time in seconds for Swift objects to '
'auto-expire.')),
cfg.BoolOpt('use_web_server_for_images',
default=False,
help=_('Set this to True to use http web server to host '
'floppy images and generated boot ISO. This '
'requires http_root and http_url to be configured '
'in the [deploy] section of the config file. If this '
'is set to False, then Ironic will use Swift '
'to host the floppy images and generated '
'boot_iso.')),
]
CONF = cfg.CONF
CONF.register_opts(opts, group='ilo')
LOG = logging.getLogger(__name__)
REQUIRED_PROPERTIES = {
'ilo_address': _("IP address or hostname of the iLO. Required."),
'ilo_username': _("username for the iLO with administrator privileges. "
"Required."),
'ilo_password': _("password for ilo_username. Required.")
}
OPTIONAL_PROPERTIES = {
'client_port': _("port to be used for iLO operations. Optional."),
'client_timeout': _("timeout (in seconds) for iLO operations. Optional."),
}
CONSOLE_PROPERTIES = {
'console_port': _("node's UDP port to connect to. Only required for "
"console access.")
}
CLEAN_PROPERTIES = {
'ilo_change_password': _("new password for iLO. Required if the clean "
"step 'reset_ilo_credential' is enabled.")
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
DEFAULT_BOOT_MODE = 'LEGACY'
BOOT_MODE_GENERIC_TO_ILO = {'bios': 'legacy', 'uefi': 'uefi'}
BOOT_MODE_ILO_TO_GENERIC = dict(
(v, k) for (k, v) in BOOT_MODE_GENERIC_TO_ILO.items())
def copy_image_to_web_server(source_file_path, destination):
"""Copies the given image to the http web server.
This method copies the given image to the http_root location.
It enables read-write access to the image else the deploy fails
as the image file at the web_server url is inaccessible.
:param source_file_path: The absolute path of the image file
which needs to be copied to the
web server root.
:param destination: The name of the file that
will contain the copied image.
:raises: ImageUploadFailed exception if copying the source
file to the web server fails.
:returns: image url after the source image is uploaded.
"""
image_url = urljoin(CONF.deploy.http_url, destination)
image_path = os.path.join(CONF.deploy.http_root, destination)
try:
shutil.copyfile(source_file_path, image_path)
except IOError as exc:
raise exception.ImageUploadFailed(image_name=destination,
web_server=CONF.deploy.http_url,
reason=exc)
os.chmod(image_path, 0o644)
return image_url
def remove_image_from_web_server(object_name):
"""Removes the given image from the configured web server.
This method removes the given image from the http_root location,
if the image exists.
:param object_name: The name of the image file which needs to be removed
from the web server root.
"""
image_path = os.path.join(CONF.deploy.http_root, object_name)
ironic_utils.unlink_without_raise(image_path)
def copy_image_to_swift(source_file_path, destination_object_name):
"""Uploads the given image to swift.
This method copies the given image to swift.
:param source_file_path: The absolute path of the image file which needs
to be copied to swift.
:param destination_object_name: The name of the object that will contain
the copied image.
:raises: SwiftOperationError, if any operation with Swift fails.
:returns: temp url from swift after the source image is uploaded.
"""
container = CONF.ilo.swift_ilo_container
timeout = CONF.ilo.swift_object_expiry_timeout
object_headers = {'X-Delete-After': timeout}
swift_api = swift.SwiftAPI()
swift_api.create_object(container, destination_object_name,
source_file_path, object_headers=object_headers)
temp_url = swift_api.get_temp_url(container, destination_object_name,
timeout)
LOG.debug("Uploaded image %(destination_object_name)s to %(container)s.",
{'destination_object_name': destination_object_name,
'container': container})
return temp_url
def remove_image_from_swift(object_name, associated_with=None):
"""Removes the given image from swift.
This method removes the given image name from swift. It deletes the
image if it exists in CONF.ilo.swift_ilo_container
:param object_name: The name of the object which needs to be removed
from swift.
:param associated_with: string to depict the component/operation this
object is associated to.
"""
container = CONF.ilo.swift_ilo_container
try:
swift_api = swift.SwiftAPI()
swift_api.delete_object(container, object_name)
except exception.SwiftObjectNotFoundError as e:
LOG.warning(
_LW("Temporary object %(associated_with_msg)s"
"was already deleted from Swift. Error: %(err)s"),
{'associated_with_msg': ("associated with %s " % associated_with
if associated_with else ""), 'err': e})
except exception.SwiftOperationError as e:
LOG.exception(
_LE("Error while deleting temporary swift object %(object_name)s "
"%(associated_with_msg)s from %(container)s. Error: %(err)s"),
{'object_name': object_name, 'container': container,
'associated_with_msg': ("associated with %s" % associated_with
if associated_with else ""), 'err': e})
def parse_driver_info(node):
"""Gets the driver specific Node info.
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver.
:param node: an ironic Node object.
:returns: a dict containing information from driver_info (or where
applicable, config values).
:raises: InvalidParameterValue if any parameters are incorrect
:raises: MissingParameterValue if some mandatory information
is missing on the node
"""
info = node.driver_info
d_info = {}
missing_info = []
for param in REQUIRED_PROPERTIES:
try:
d_info[param] = info[param]
except KeyError:
missing_info.append(param)
if missing_info:
raise exception.MissingParameterValue(_(
"The following required iLO parameters are missing from the "
"node's driver_info: %s") % missing_info)
not_integers = []
for param in OPTIONAL_PROPERTIES:
value = info.get(param, CONF.ilo.get(param))
if param == "client_port":
d_info[param] = utils.validate_network_port(value, param)
else:
try:
d_info[param] = int(value)
except ValueError:
not_integers.append(param)
for param in CONSOLE_PROPERTIES:
value = info.get(param)
if value:
# Currently there's only "console_port" parameter
# in CONSOLE_PROPERTIES
if param == "console_port":
d_info[param] = utils.validate_network_port(value, param)
if not_integers:
raise exception.InvalidParameterValue(_(
"The following iLO parameters from the node's driver_info "
"should be integers: %s") % not_integers)
return d_info
def get_ilo_object(node):
"""Gets an IloClient object from proliantutils library.
Given an ironic node object, this method gives back a IloClient object
to do operations on the iLO.
:param node: an ironic node object.
:returns: an IloClient object.
:raises: InvalidParameterValue on invalid inputs.
:raises: MissingParameterValue if some mandatory information
is missing on the node
"""
driver_info = parse_driver_info(node)
ilo_object = ilo_client.IloClient(driver_info['ilo_address'],
driver_info['ilo_username'],
driver_info['ilo_password'],
driver_info['client_timeout'],
driver_info['client_port'])
return ilo_object
def get_ilo_license(node):
"""Gives the current installed license on the node.
Given an ironic node object, this method queries the iLO
for currently installed license and returns it back.
:param node: an ironic node object.
:returns: a constant defined in this module which
refers to the current license installed on the node.
:raises: InvalidParameterValue on invalid inputs.
:raises: MissingParameterValue if some mandatory information
is missing on the node
:raises: IloOperationError if it failed to retrieve the
installed licenses from the iLO.
"""
# Get the ilo client object, and then the license from the iLO
ilo_object = get_ilo_object(node)
try:
license_info = ilo_object.get_all_licenses()
except ilo_error.IloError as ilo_exception:
raise exception.IloOperationError(operation=_('iLO license check'),
error=str(ilo_exception))
# Check the license to see if the given license exists
current_license_type = license_info['LICENSE_TYPE']
if current_license_type.endswith("Advanced"):
return ADVANCED_LICENSE
elif current_license_type.endswith("Essentials"):
return ESSENTIALS_LICENSE
else:
return STANDARD_LICENSE
def update_ipmi_properties(task):
"""Update ipmi properties to node driver_info
:param task: a task from TaskManager.
"""
node = task.node
info = node.driver_info
# updating ipmi credentials
info['ipmi_address'] = info.get('ilo_address')
info['ipmi_username'] = info.get('ilo_username')
info['ipmi_password'] = info.get('ilo_password')
if 'console_port' in info:
info['ipmi_terminal_port'] = info['console_port']
# saving ipmi credentials to task object
task.node.driver_info = info
def _get_floppy_image_name(node):
"""Returns the floppy image name for a given node.
:param node: the node for which image name is to be provided.
"""
return "image-%s" % node.uuid
def _prepare_floppy_image(task, params):
"""Prepares the floppy image for passing the parameters.
This method prepares a temporary vfat filesystem image. Then it adds
a file into the image which contains the parameters to be passed to
the ramdisk. After adding the parameters, it then uploads the file either
to Swift in 'swift_ilo_container', setting it to auto-expire after
'swift_object_expiry_timeout' seconds or in web server. Then it returns
the temp url for the Swift object or the http url for the uploaded floppy
image depending upon value of CONF.ilo.use_web_server_for_images.
:param task: a TaskManager instance containing the node to act on.
:param params: a dictionary containing 'parameter name'->'value' mapping
to be passed to the deploy ramdisk via the floppy image.
:raises: ImageCreationFailed, if it failed while creating the floppy image.
:raises: ImageUploadFailed, if copying the source file to the
web server fails.
:raises: SwiftOperationError, if any operation with Swift fails.
:returns: the HTTP image URL or the Swift temp url for the floppy image.
"""
with tempfile.NamedTemporaryFile(
dir=CONF.tempdir) as vfat_image_tmpfile_obj:
vfat_image_tmpfile = vfat_image_tmpfile_obj.name
images.create_vfat_image(vfat_image_tmpfile, parameters=params)
object_name = _get_floppy_image_name(task.node)
if CONF.ilo.use_web_server_for_images:
image_url = copy_image_to_web_server(vfat_image_tmpfile,
object_name)
else:
image_url = copy_image_to_swift(vfat_image_tmpfile, object_name)
return image_url
def destroy_floppy_image_from_web_server(node):
"""Removes the temporary floppy image.
It removes the floppy image created for deploy.
:param node: an ironic node object.
"""
object_name = _get_floppy_image_name(node)
remove_image_from_web_server(object_name)
def attach_vmedia(node, device, url):
"""Attaches the given url as virtual media on the node.
:param node: an ironic node object.
:param device: the virtual media device to attach
:param url: the http/https url to attach as the virtual media device
:raises: IloOperationError if insert virtual media failed.
"""
ilo_object = get_ilo_object(node)
try:
ilo_object.insert_virtual_media(url, device=device)
ilo_object.set_vm_status(
device=device, boot_option='CONNECT', write_protect='YES')
except ilo_error.IloError as ilo_exception:
operation = _("Inserting virtual media %s") % device
raise exception.IloOperationError(
operation=operation, error=ilo_exception)
LOG.info(_LI("Attached virtual media %s successfully."), device)
def set_boot_mode(node, boot_mode):
"""Sets the node to boot using boot_mode for the next boot.
:param node: an ironic node object.
:param boot_mode: Next boot mode.
:raises: IloOperationError if setting boot mode failed.
"""
ilo_object = get_ilo_object(node)
try:
p_boot_mode = ilo_object.get_pending_boot_mode()
except ilo_error.IloCommandNotSupportedError:
p_boot_mode = DEFAULT_BOOT_MODE
if BOOT_MODE_ILO_TO_GENERIC[p_boot_mode.lower()] == boot_mode:
LOG.info(_LI("Node %(uuid)s pending boot mode is %(boot_mode)s."),
{'uuid': node.uuid, 'boot_mode': boot_mode})
return
try:
ilo_object.set_pending_boot_mode(
BOOT_MODE_GENERIC_TO_ILO[boot_mode].upper())
except ilo_error.IloError as ilo_exception:
operation = _("Setting %s as boot mode") % boot_mode
raise exception.IloOperationError(
operation=operation, error=ilo_exception)
LOG.info(_LI("Node %(uuid)s boot mode is set to %(boot_mode)s."),
{'uuid': node.uuid, 'boot_mode': boot_mode})
def update_boot_mode(task):
"""Update instance_info with boot mode to be used for deploy.
This method updates instance_info with boot mode to be used for
deploy if node properties['capabilities'] do not have boot_mode.
It sets the boot mode on the node.
:param task: Task object.
:raises: IloOperationError if setting boot mode failed.
"""
node = task.node
boot_mode = deploy_utils.get_boot_mode_for_deploy(node)
if boot_mode is not None:
LOG.debug("Node %(uuid)s boot mode is being set to %(boot_mode)s",
{'uuid': node.uuid, 'boot_mode': boot_mode})
set_boot_mode(node, boot_mode)
return
LOG.debug("Check pending boot mode for node %s.", node.uuid)
ilo_object = get_ilo_object(node)
try:
boot_mode = ilo_object.get_pending_boot_mode()
except ilo_error.IloCommandNotSupportedError:
boot_mode = 'legacy'
if boot_mode != 'UNKNOWN':
boot_mode = BOOT_MODE_ILO_TO_GENERIC[boot_mode.lower()]
if boot_mode == 'UNKNOWN':
# NOTE(faizan) ILO will return this in remote cases and mostly on
# the nodes which supports UEFI. Such nodes mostly comes with UEFI
# as default boot mode. So we will try setting bootmode to UEFI
# and if it fails then we fall back to BIOS boot mode.
try:
boot_mode = 'uefi'
ilo_object.set_pending_boot_mode(
BOOT_MODE_GENERIC_TO_ILO[boot_mode].upper())
except ilo_error.IloError as ilo_exception:
operation = _("Setting %s as boot mode") % boot_mode
raise exception.IloOperationError(operation=operation,
error=ilo_exception)
LOG.debug("Node %(uuid)s boot mode is being set to %(boot_mode)s "
"as pending boot mode is unknown.",
{'uuid': node.uuid, 'boot_mode': boot_mode})
instance_info = node.instance_info
instance_info['deploy_boot_mode'] = boot_mode
node.instance_info = instance_info
node.save()
def setup_vmedia(task, iso, ramdisk_options):
"""Attaches virtual media and sets it as boot device.
This method attaches the given bootable ISO as virtual media, prepares the
arguments for ramdisk in virtual media floppy.
:param task: a TaskManager instance containing the node to act on.
:param iso: a bootable ISO image href to attach to. Should be either
of below:
* A Swift object - It should be of format 'swift:<object-name>'.
It is assumed that the image object is present in
CONF.ilo.swift_ilo_container;
* A Glance image - It should be format 'glance://<glance-image-uuid>'
or just <glance-image-uuid>;
* An HTTP URL.
:param ramdisk_options: the options to be passed to the ramdisk in virtual
media floppy.
:raises: ImageCreationFailed, if it failed while creating the floppy image.
:raises: IloOperationError, if some operation on iLO failed.
"""
setup_vmedia_for_boot(task, iso, ramdisk_options)
# In UEFI boot mode, upon inserting virtual CDROM, one has to reset the
# system to see it as a valid boot device in persistent boot devices.
# But virtual CDROM device is always available for one-time boot.
# During enable/disable of secure boot settings, iLO internally resets
# the server twice. But it retains one time boot settings across internal
# resets. Hence no impact of this change for secure boot deploy.
manager_utils.node_set_boot_device(task, boot_devices.CDROM)
def setup_vmedia_for_boot(task, boot_iso, parameters=None):
"""Sets up the node to boot from the given ISO image.
This method attaches the given boot_iso on the node and passes
the required parameters to it via virtual floppy image.
:param task: a TaskManager instance containing the node to act on.
:param boot_iso: a bootable ISO image to attach to. Should be either
of below:
* A Swift object - It should be of format 'swift:<object-name>'.
It is assumed that the image object is present in
CONF.ilo.swift_ilo_container;
* A Glance image - It should be format 'glance://<glance-image-uuid>'
or just <glance-image-uuid>;
* An HTTP(S) URL.
:param parameters: the parameters to pass in the virtual floppy image
in a dictionary. This is optional.
:raises: ImageCreationFailed, if it failed while creating the floppy image.
:raises: SwiftOperationError, if any operation with Swift fails.
:raises: IloOperationError, if attaching virtual media failed.
"""
LOG.info(_LI("Setting up node %s to boot from virtual media"),
task.node.uuid)
if parameters:
floppy_image_temp_url = _prepare_floppy_image(task, parameters)
attach_vmedia(task.node, 'FLOPPY', floppy_image_temp_url)
boot_iso_url = None
parsed_ref = urlparse.urlparse(boot_iso)
if parsed_ref.scheme == 'swift':
swift_api = swift.SwiftAPI()
container = CONF.ilo.swift_ilo_container
object_name = parsed_ref.path
timeout = CONF.ilo.swift_object_expiry_timeout
boot_iso_url = swift_api.get_temp_url(
container, object_name, timeout)
elif service_utils.is_glance_image(boot_iso):
boot_iso_url = (
images.get_temp_url_for_glance_image(task.context, boot_iso))
attach_vmedia(task.node, 'CDROM', boot_iso_url or boot_iso)
def eject_vmedia_devices(task):
"""Ejects virtual media devices.
This method ejects virtual media floppy and cdrom.
:param task: a TaskManager instance containing the node to act on.
:returns: None
:raises: IloOperationError, if some error was encountered while
trying to eject virtual media floppy or cdrom.
"""
ilo_object = get_ilo_object(task.node)
for device in ('FLOPPY', 'CDROM'):
try:
ilo_object.eject_virtual_media(device)
except ilo_error.IloError as ilo_exception:
LOG.error(_LE("Error while ejecting virtual media %(device)s "
"from node %(uuid)s. Error: %(error)s"),
{'device': device, 'uuid': task.node.uuid,
'error': ilo_exception})
operation = _("Eject virtual media %s") % device.lower()
raise exception.IloOperationError(operation=operation,
error=ilo_exception)
def cleanup_vmedia_boot(task):
"""Cleans a node after a virtual media boot.
This method cleans up a node after a virtual media boot. It deletes the
floppy image if it exists in CONF.ilo.swift_ilo_container or web server.
It also ejects both virtual media cdrom and virtual media floppy.
:param task: a TaskManager instance containing the node to act on.
"""
LOG.debug("Cleaning up node %s after virtual media boot", task.node.uuid)
if not CONF.ilo.use_web_server_for_images:
object_name = _get_floppy_image_name(task.node)
remove_image_from_swift(object_name, 'virtual floppy')
else:
destroy_floppy_image_from_web_server(task.node)
eject_vmedia_devices(task)
def get_secure_boot_mode(task):
"""Retrieves current enabled state of UEFI secure boot on the node
Returns the current enabled state of UEFI secure boot on the node.
:param task: a task from TaskManager.
:raises: MissingParameterValue if a required iLO parameter is missing.
:raises: IloOperationError on an error from IloClient library.
:raises: IloOperationNotSupported if UEFI secure boot is not supported.
:returns: Boolean value indicating current state of UEFI secure boot
on the node.
"""
operation = _("Get secure boot mode for node %s.") % task.node.uuid
secure_boot_state = False
ilo_object = get_ilo_object(task.node)
try:
current_boot_mode = ilo_object.get_current_boot_mode()
if current_boot_mode == 'UEFI':
secure_boot_state = ilo_object.get_secure_boot_mode()
except ilo_error.IloCommandNotSupportedError as ilo_exception:
raise exception.IloOperationNotSupported(operation=operation,
error=ilo_exception)
except ilo_error.IloError as ilo_exception:
raise exception.IloOperationError(operation=operation,
error=ilo_exception)
LOG.debug("Get secure boot mode for node %(node)s returned %(value)s",
{'value': secure_boot_state, 'node': task.node.uuid})
return secure_boot_state
def set_secure_boot_mode(task, flag):
"""Enable or disable UEFI Secure Boot for the next boot
Enable or disable UEFI Secure Boot for the next boot
:param task: a task from TaskManager.
:param flag: Boolean value. True if the secure boot to be
enabled in next boot.
:raises: IloOperationError on an error from IloClient library.
:raises: IloOperationNotSupported if UEFI secure boot is not supported.
"""
operation = (_("Setting secure boot to %(flag)s for node %(node)s.") %
{'flag': flag, 'node': task.node.uuid})
ilo_object = get_ilo_object(task.node)
try:
ilo_object.set_secure_boot_mode(flag)
LOG.debug(operation)
except ilo_error.IloCommandNotSupportedError as ilo_exception:
raise exception.IloOperationNotSupported(operation=operation,
error=ilo_exception)
except ilo_error.IloError as ilo_exception:
raise exception.IloOperationError(operation=operation,
error=ilo_exception)
def update_secure_boot_mode(task, mode):
"""Changes secure boot mode for next boot on the node.
This method changes secure boot mode on the node for next boot. It changes
the secure boot mode setting on node only if the deploy has requested for
the secure boot.
During deploy, this method is used to enable secure boot on the node by
passing 'mode' as 'True'.
During teardown, this method is used to disable secure boot on the node by
passing 'mode' as 'False'.
:param task: a TaskManager instance containing the node to act on.
:param mode: Boolean value requesting the next state for secure boot
:raises: IloOperationNotSupported, if operation is not supported on iLO
:raises: IloOperationError, if some operation on iLO failed.
"""
if deploy_utils.is_secure_boot_requested(task.node):
set_secure_boot_mode(task, mode)
LOG.info(_LI('Changed secure boot to %(mode)s for node %(node)s'),
{'mode': mode, 'node': task.node.uuid})
def remove_single_or_list_of_files(file_location):
"""Removes (deletes) the file or list of files.
This method only accepts single or list of files to delete.
If single file is passed, this method removes (deletes) the file.
If list of files is passed, this method removes (deletes) each of the
files iteratively.
:param file_location: a single or a list of file paths
"""
# file_location is a list of files
if isinstance(file_location, list):
for location in file_location:
ironic_utils.unlink_without_raise(location)
# file_location is a single file path
elif isinstance(file_location, six.string_types):
ironic_utils.unlink_without_raise(file_location)
def verify_image_checksum(image_location, expected_checksum):
"""Verifies checksum (md5) of image file against the expected one.
This method generates the checksum of the image file on the fly and
verifies it against the expected checksum provided as argument.
:param image_location: location of image file whose checksum is verified.
:param expected_checksum: checksum to be checked against
:raises: ImageRefValidationFailed, if invalid file path or
verification fails.
"""
try:
with open(image_location, 'rb') as fd:
actual_checksum = utils.hash_file(fd)
except IOError as e:
LOG.error(_LE("Error opening file: %(file)s"),
{'file': image_location})
raise exception.ImageRefValidationFailed(image_href=image_location,
reason=six.text_type(e))
if actual_checksum != expected_checksum:
msg = (_('Error verifying image checksum. Image %(image)s failed to '
'verify against checksum %(checksum)s. Actual checksum is: '
'%(actual_checksum)s') %
{'image': image_location, 'checksum': expected_checksum,
'actual_checksum': actual_checksum})
LOG.error(msg)
raise exception.ImageRefValidationFailed(image_href=image_location,
reason=msg)
| |
# Copyright 2011 Isaku Yamahata
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import mock
import six
from jacket.compute import test
from jacket.compute import utils
from jacket.compute.virt.disk import api as disk_api
from jacket.compute.virt.disk.mount import api as mount
from jacket.compute.virt import driver
PROC_MOUNTS_CONTENTS = """rootfs / rootfs rw 0 0
sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
udev /dev devtmpfs rw,relatime,size=1013160k,nr_inodes=253290,mode=755 0 0
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620 0 0
tmpfs /run tmpfs rw,nosuid,relatime,size=408904k,mode=755 0 0"""
class TestVirtDriver(test.NoDBTestCase):
def test_block_device(self):
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'num': 0,
'virtual_name': 'ephemeral0',
'device_name': '/dev/sdc1',
'size': 1}]
block_device_mapping = [{'mount_device': '/dev/sde',
'device_path': 'fake_device'}]
block_device_info = {
'root_device_name': '/dev/sda',
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
empty_block_device_info = {}
self.assertEqual(
driver.block_device_info_get_root(block_device_info), '/dev/sda')
self.assertIsNone(
driver.block_device_info_get_root(empty_block_device_info))
self.assertIsNone(driver.block_device_info_get_root(None))
self.assertEqual(
driver.block_device_info_get_swap(block_device_info), swap)
self.assertIsNone(driver.block_device_info_get_swap(
empty_block_device_info)['device_name'])
self.assertEqual(driver.block_device_info_get_swap(
empty_block_device_info)['swap_size'], 0)
self.assertIsNone(
driver.block_device_info_get_swap({'swap': None})['device_name'])
self.assertEqual(
driver.block_device_info_get_swap({'swap': None})['swap_size'],
0)
self.assertIsNone(
driver.block_device_info_get_swap(None)['device_name'])
self.assertEqual(
driver.block_device_info_get_swap(None)['swap_size'], 0)
self.assertEqual(
driver.block_device_info_get_ephemerals(block_device_info),
ephemerals)
self.assertEqual(
driver.block_device_info_get_ephemerals(empty_block_device_info),
[])
self.assertEqual(
driver.block_device_info_get_ephemerals(None),
[])
def test_swap_is_usable(self):
self.assertFalse(driver.swap_is_usable(None))
self.assertFalse(driver.swap_is_usable({'device_name': None}))
self.assertFalse(driver.swap_is_usable({'device_name': '/dev/sdb',
'swap_size': 0}))
self.assertTrue(driver.swap_is_usable({'device_name': '/dev/sdb',
'swap_size': 1}))
class FakeMount(object):
def __init__(self, image, mount_dir, partition=None, device=None):
self.image = image
self.partition = partition
self.mount_dir = mount_dir
self.linked = self.mapped = self.mounted = False
self.device = device
def do_mount(self):
self.linked = True
self.mapped = True
self.mounted = True
self.device = '/dev/fake'
return True
def do_umount(self):
self.linked = True
self.mounted = False
def do_teardown(self):
self.linked = False
self.mapped = False
self.mounted = False
self.device = None
class TestDiskImage(test.NoDBTestCase):
def mock_proc_mounts(self, mock_open):
response = io.StringIO(six.text_type(PROC_MOUNTS_CONTENTS))
mock_open.return_value = response
@mock.patch.object(six.moves.builtins, 'open')
def test_mount(self, mock_open):
self.mock_proc_mounts(mock_open)
image = '/tmp/fake-image'
mountdir = '/mnt/fake_rootfs'
fakemount = FakeMount(image, mountdir, None)
def fake_instance_for_format(image, mountdir, partition):
return fakemount
self.stubs.Set(mount.Mount, 'instance_for_format',
staticmethod(fake_instance_for_format))
diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir)
dev = diskimage.mount()
self.assertEqual(diskimage._mounter, fakemount)
self.assertEqual(dev, '/dev/fake')
@mock.patch.object(six.moves.builtins, 'open')
def test_umount(self, mock_open):
self.mock_proc_mounts(mock_open)
image = '/tmp/fake-image'
mountdir = '/mnt/fake_rootfs'
fakemount = FakeMount(image, mountdir, None)
def fake_instance_for_format(image, mountdir, partition):
return fakemount
self.stubs.Set(mount.Mount, 'instance_for_format',
staticmethod(fake_instance_for_format))
diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir)
dev = diskimage.mount()
self.assertEqual(diskimage._mounter, fakemount)
self.assertEqual(dev, '/dev/fake')
diskimage.umount()
self.assertIsNone(diskimage._mounter)
@mock.patch.object(six.moves.builtins, 'open')
def test_teardown(self, mock_open):
self.mock_proc_mounts(mock_open)
image = '/tmp/fake-image'
mountdir = '/mnt/fake_rootfs'
fakemount = FakeMount(image, mountdir, None)
def fake_instance_for_format(image, mountdir, partition):
return fakemount
self.stubs.Set(mount.Mount, 'instance_for_format',
staticmethod(fake_instance_for_format))
diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir)
dev = diskimage.mount()
self.assertEqual(diskimage._mounter, fakemount)
self.assertEqual(dev, '/dev/fake')
diskimage.teardown()
self.assertIsNone(diskimage._mounter)
class TestVirtDisk(test.NoDBTestCase):
def setUp(self):
super(TestVirtDisk, self).setUp()
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)
def test_lxc_setup_container(self):
image = '/tmp/fake-image'
container_dir = '/mnt/fake_rootfs/'
def proc_mounts(self, mount_point):
return None
def fake_instance_for_format(image, mountdir, partition):
return FakeMount(image, mountdir, partition)
self.stub_out('os.path.exists', lambda _: True)
self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
self.stubs.Set(mount.Mount, 'instance_for_format',
staticmethod(fake_instance_for_format))
self.assertEqual(disk_api.setup_container(image, container_dir),
'/dev/fake')
def test_lxc_teardown_container(self):
def proc_mounts(self, mount_point):
mount_points = {
'/mnt/loop/nopart': '/dev/loop0',
'/mnt/loop/part': '/dev/mapper/loop0p1',
'/mnt/nbd/nopart': '/dev/nbd15',
'/mnt/nbd/part': '/dev/mapper/nbd15p1',
}
return mount_points[mount_point]
self.stub_out('os.path.exists', lambda _: True)
self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
expected_commands = []
disk_api.teardown_container('/mnt/loop/nopart')
expected_commands += [
('umount', '/dev/loop0'),
('losetup', '--detach', '/dev/loop0'),
]
disk_api.teardown_container('/mnt/loop/part')
expected_commands += [
('umount', '/dev/mapper/loop0p1'),
('kpartx', '-d', '/dev/loop0'),
('losetup', '--detach', '/dev/loop0'),
]
disk_api.teardown_container('/mnt/nbd/nopart')
expected_commands += [
('blockdev', '--flushbufs', '/dev/nbd15'),
('umount', '/dev/nbd15'),
('qemu-nbd', '-d', '/dev/nbd15'),
]
disk_api.teardown_container('/mnt/nbd/part')
expected_commands += [
('blockdev', '--flushbufs', '/dev/nbd15'),
('umount', '/dev/mapper/nbd15p1'),
('kpartx', '-d', '/dev/nbd15'),
('qemu-nbd', '-d', '/dev/nbd15'),
]
# NOTE(thomasem): Not adding any commands in this case, because we're
# not expecting an additional umount for LocalBlockImages. This is to
# assert that no additional commands are run in this case.
disk_api.teardown_container('/dev/volume-group/uuid_disk')
self.assertEqual(self.executes, expected_commands)
def test_lxc_teardown_container_with_namespace_cleaned(self):
def proc_mounts(self, mount_point):
return None
self.stub_out('os.path.exists', lambda _: True)
self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
expected_commands = []
disk_api.teardown_container('/mnt/loop/nopart', '/dev/loop0')
expected_commands += [
('losetup', '--detach', '/dev/loop0'),
]
disk_api.teardown_container('/mnt/loop/part', '/dev/loop0')
expected_commands += [
('losetup', '--detach', '/dev/loop0'),
]
disk_api.teardown_container('/mnt/nbd/nopart', '/dev/nbd15')
expected_commands += [
('qemu-nbd', '-d', '/dev/nbd15'),
]
disk_api.teardown_container('/mnt/nbd/part', '/dev/nbd15')
expected_commands += [
('qemu-nbd', '-d', '/dev/nbd15'),
]
self.assertEqual(self.executes, expected_commands)
| |
# -*- coding: utf-8 -*-
# Copyright 2021 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contain a backend that saves the unitary of a quantum circuit."""
import itertools
import math
import random
import warnings
from copy import deepcopy
import numpy as np
from projectq.cengines import BasicEngine
from projectq.meta import LogicalQubitIDTag, get_control_count, has_negative_control
from projectq.ops import AllocateQubitGate, DeallocateQubitGate, FlushGate, MeasureGate
from projectq.types import WeakQubitRef
def _qidmask(target_ids, control_ids, n_qubits):
"""
Calculate index masks.
Args:
target_ids (list): list of target qubit indices
control_ids (list): list of control qubit indices
control_state (list): list of states for the control qubits (0 or 1)
n_qubits (int): number of qubits
"""
mask_list = []
perms = np.array([x[::-1] for x in itertools.product("01", repeat=n_qubits)]).astype(int)
all_ids = np.array(range(n_qubits))
irel_ids = np.delete(all_ids, control_ids + target_ids)
if len(control_ids) > 0:
cmask = np.where(np.all(perms[:, control_ids] == [1] * len(control_ids), axis=1))
else:
cmask = np.array(range(perms.shape[0]))
if len(irel_ids) > 0:
irel_perms = np.array([x[::-1] for x in itertools.product("01", repeat=len(irel_ids))]).astype(int)
for i in range(2 ** len(irel_ids)):
irel_mask = np.where(np.all(perms[:, irel_ids] == irel_perms[i], axis=1))
common = np.intersect1d(irel_mask, cmask)
if len(common) > 0:
mask_list.append(common)
else:
irel_mask = np.array(range(perms.shape[0]))
mask_list.append(np.intersect1d(irel_mask, cmask))
return mask_list
class UnitarySimulator(BasicEngine):
"""
Simulator engine aimed at calculating the unitary transformation that represents the current quantum circuit.
Attributes:
unitary (np.ndarray): Current unitary representing the quantum circuit being processed so far.
history (list<np.ndarray>): List of previous quantum circuit unitaries.
Note:
The current implementation of this backend resets the unitary after the first gate that is neither a qubit
deallocation nor a measurement occurs after one of those two aforementioned gates.
The old unitary call be accessed at anytime after such a situation occurs via the `history` property.
.. code-block:: python
eng = MainEngine(backend=UnitarySimulator(), engine_list=[])
qureg = eng.allocate_qureg(3)
All(X) | qureg
eng.flush()
All(Measure) | qureg
eng.deallocate_qubit(qureg[1])
X | qureg[0] # WARNING: appending gate after measurements or deallocations resets the unitary
"""
def __init__(self):
"""Initialize a UnitarySimulator object."""
super().__init__()
self._qubit_map = {}
self._unitary = [1]
self._num_qubits = 0
self._is_valid = True
self._is_flushed = False
self._state = [1]
self._history = []
@property
def unitary(self):
"""
Access the last unitary matrix directly.
Returns:
A numpy array which is the unitary matrix of the circuit.
"""
return deepcopy(self._unitary)
@property
def history(self):
"""
Access all previous unitary matrices.
The current unitary matrix is appended to this list once a gate is received after either a measurement or a
qubit deallocation has occurred.
Returns:
A list where the elements are all previous unitary matrices representing the circuit, separated by
measurement/deallocate gates.
"""
return deepcopy(self._history)
def is_available(self, cmd):
"""
Test whether a Command is supported by a compiler engine.
Specialized implementation of is_available: The unitary simulator can deal with all arbitrarily-controlled gates
which provide a gate-matrix (via gate.matrix).
Args:
cmd (Command): Command for which to check availability (single- qubit gate, arbitrary controls)
Returns:
True if it can be simulated and False otherwise.
"""
if has_negative_control(cmd):
return False
if isinstance(cmd.gate, (AllocateQubitGate, DeallocateQubitGate, MeasureGate)):
return True
try:
gate_mat = cmd.gate.matrix
if len(gate_mat) > 2 ** 6:
warnings.warn("Potentially large matrix gate encountered! ({} qubits)".format(math.log2(len(gate_mat))))
return True
except AttributeError:
return False
def receive(self, command_list):
"""
Receive a list of commands.
Receive a list of commands from the previous engine and handle them:
* update the unitary of the quantum circuit
* update the internal quantum state if a measurement or a qubit deallocation occurs
prior to sending them on to the next engine.
Args:
command_list (list<Command>): List of commands to execute on the simulator.
"""
for cmd in command_list:
self._handle(cmd)
if not self.is_last_engine:
self.send(command_list)
def _flush(self):
"""Flush the simulator state."""
if not self._is_flushed:
self._is_flushed = True
self._state = self._unitary @ self._state
def _handle(self, cmd):
"""
Handle all commands.
Args:
cmd (Command): Command to handle.
Raises:
RuntimeError: If a measurement is performed before flush gate.
"""
if isinstance(cmd.gate, AllocateQubitGate):
self._qubit_map[cmd.qubits[0][0].id] = self._num_qubits
self._num_qubits += 1
self._unitary = np.kron(np.identity(2), self._unitary)
self._state.extend([0] * len(self._state))
elif isinstance(cmd.gate, DeallocateQubitGate):
pos = self._qubit_map[cmd.qubits[0][0].id]
self._qubit_map = {key: value - 1 if value > pos else value for key, value in self._qubit_map.items()}
self._num_qubits -= 1
self._is_valid = False
elif isinstance(cmd.gate, MeasureGate):
self._is_valid = False
if not self._is_flushed:
raise RuntimeError(
'Please make sure all previous gates are flushed before measurement so the state gets updated'
)
if get_control_count(cmd) != 0:
raise ValueError('Cannot have control qubits with a measurement gate!')
all_qubits = [qb for qr in cmd.qubits for qb in qr]
measurements = self.measure_qubits([qb.id for qb in all_qubits])
for qb, res in zip(all_qubits, measurements):
# Check if a mapper assigned a different logical id
for tag in cmd.tags:
if isinstance(tag, LogicalQubitIDTag):
qb = WeakQubitRef(qb.engine, tag.logical_qubit_id)
break
self.main_engine.set_measurement_result(qb, res)
elif isinstance(cmd.gate, FlushGate):
self._flush()
else:
if not self._is_valid:
self._flush()
warnings.warn(
"Processing of other gates after a qubit deallocation or measurement will reset the unitary,"
"previous unitary can be accessed in history"
)
self._history.append(self._unitary)
self._unitary = np.identity(2 ** self._num_qubits, dtype=complex)
self._state = np.array([1] + ([0] * (2 ** self._num_qubits - 1)), dtype=complex)
self._is_valid = True
self._is_flushed = False
mask_list = _qidmask(
[self._qubit_map[qb.id] for qr in cmd.qubits for qb in qr],
[self._qubit_map[qb.id] for qb in cmd.control_qubits],
self._num_qubits,
)
for mask in mask_list:
cache = np.identity(2 ** self._num_qubits, dtype=complex)
cache[np.ix_(mask, mask)] = cmd.gate.matrix
self._unitary = cache @ self._unitary
def measure_qubits(self, ids):
"""
Measure the qubits with IDs ids and return a list of measurement outcomes (True/False).
Args:
ids (list<int>): List of qubit IDs to measure.
Returns:
List of measurement results (containing either True or False).
"""
random_outcome = random.random()
val = 0.0
i_picked = 0
while val < random_outcome and i_picked < len(self._state):
val += np.abs(self._state[i_picked]) ** 2
i_picked += 1
i_picked -= 1
pos = [self._qubit_map[ID] for ID in ids]
res = [False] * len(pos)
mask = 0
val = 0
for i, _pos in enumerate(pos):
res[i] = ((i_picked >> _pos) & 1) == 1
mask |= 1 << _pos
val |= (res[i] & 1) << _pos
nrm = 0.0
for i, _state in enumerate(self._state):
if (mask & i) != val:
self._state[i] = 0.0
else:
nrm += np.abs(_state) ** 2
self._state *= 1.0 / np.sqrt(nrm)
return res
| |
"""Tests for homekit_controller config flow."""
from unittest import mock
import unittest.mock
from unittest.mock import AsyncMock, patch
import aiohomekit
from aiohomekit.exceptions import AuthenticationError
from aiohomekit.model import Accessories, Accessory
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import ServicesTypes
import pytest
from homeassistant import config_entries
from homeassistant.components import zeroconf
from homeassistant.components.homekit_controller import config_flow
from homeassistant.components.homekit_controller.const import KNOWN_DEVICES
from homeassistant.helpers import device_registry
from tests.common import MockConfigEntry, mock_device_registry
PAIRING_START_FORM_ERRORS = [
(KeyError, "pairing_failed"),
]
PAIRING_START_ABORT_ERRORS = [
(aiohomekit.AccessoryNotFoundError, "accessory_not_found_error"),
(aiohomekit.UnavailableError, "already_paired"),
]
PAIRING_TRY_LATER_ERRORS = [
(aiohomekit.BusyError, "busy_error"),
(aiohomekit.MaxTriesError, "max_tries_error"),
(IndexError, "protocol_error"),
]
PAIRING_FINISH_FORM_ERRORS = [
(aiohomekit.exceptions.MalformedPinError, "authentication_error"),
(aiohomekit.MaxPeersError, "max_peers_error"),
(aiohomekit.AuthenticationError, "authentication_error"),
(aiohomekit.UnknownError, "unknown_error"),
(KeyError, "pairing_failed"),
]
PAIRING_FINISH_ABORT_ERRORS = [
(aiohomekit.AccessoryNotFoundError, "accessory_not_found_error")
]
INSECURE_PAIRING_CODES = [
"111-11-111",
"123-45-678",
"22222222",
"111-11-111 ",
" 111-11-111",
]
INVALID_PAIRING_CODES = [
"aaa-aa-aaa",
"aaa-11-aaa",
"111-aa-aaa",
"aaa-aa-111",
"1111-1-111",
"a111-11-111",
"111-11-111a",
"1111111",
]
VALID_PAIRING_CODES = [
"114-11-111",
"123-45-679",
"123-45-679 ",
"11121111",
"98765432",
" 98765432 ",
]
def _setup_flow_handler(hass, pairing=None):
flow = config_flow.HomekitControllerFlowHandler()
flow.hass = hass
flow.context = {}
finish_pairing = unittest.mock.AsyncMock(return_value=pairing)
discovery = mock.Mock()
discovery.device_id = "00:00:00:00:00:00"
discovery.start_pairing = unittest.mock.AsyncMock(return_value=finish_pairing)
flow.controller = mock.Mock()
flow.controller.pairings = {}
flow.controller.find_ip_by_device_id = unittest.mock.AsyncMock(
return_value=discovery
)
return flow
@pytest.mark.parametrize("pairing_code", INVALID_PAIRING_CODES)
def test_invalid_pairing_codes(pairing_code):
"""Test ensure_pin_format raises for an invalid pin code."""
with pytest.raises(aiohomekit.exceptions.MalformedPinError):
config_flow.ensure_pin_format(pairing_code)
@pytest.mark.parametrize("pairing_code", INSECURE_PAIRING_CODES)
def test_insecure_pairing_codes(pairing_code):
"""Test ensure_pin_format raises for an invalid setup code."""
with pytest.raises(config_flow.InsecureSetupCode):
config_flow.ensure_pin_format(pairing_code)
config_flow.ensure_pin_format(pairing_code, allow_insecure_setup_codes=True)
@pytest.mark.parametrize("pairing_code", VALID_PAIRING_CODES)
def test_valid_pairing_codes(pairing_code):
"""Test ensure_pin_format corrects format for a valid pin in an alternative format."""
valid_pin = config_flow.ensure_pin_format(pairing_code).split("-")
assert len(valid_pin) == 3
assert len(valid_pin[0]) == 3
assert len(valid_pin[1]) == 2
assert len(valid_pin[2]) == 3
def get_flow_context(hass, result):
"""Get the flow context from the result of async_init or async_configure."""
flow = next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
return flow["context"]
def get_device_discovery_info(
device, upper_case_props=False, missing_csharp=False
) -> zeroconf.ZeroconfServiceInfo:
"""Turn a aiohomekit format zeroconf entry into a homeassistant one."""
record = device.info
result = zeroconf.ZeroconfServiceInfo(
host=record["address"],
hostname=record["name"],
name=record["name"],
port=record["port"],
properties={
"md": record["md"],
"pv": record["pv"],
zeroconf.ATTR_PROPERTIES_ID: device.device_id,
"c#": record["c#"],
"s#": record["s#"],
"ff": record["ff"],
"ci": record["ci"],
"sf": 0x01, # record["sf"],
"sh": "",
},
type="_hap._tcp.local.",
)
if missing_csharp:
del result.properties["c#"]
if upper_case_props:
result.properties = {
key.upper(): val for (key, val) in result.properties.items()
}
return result
def setup_mock_accessory(controller):
"""Add a bridge accessory to a test controller."""
bridge = Accessories()
accessory = Accessory.create_with_info(
name="Koogeek-LS1-20833F",
manufacturer="Koogeek",
model="LS1",
serial_number="12345",
firmware_revision="1.1",
)
service = accessory.add_service(ServicesTypes.LIGHTBULB)
on_char = service.add_char(CharacteristicsTypes.ON)
on_char.value = 0
bridge.add_accessory(accessory)
return controller.add_device(bridge)
@pytest.mark.parametrize("upper_case_props", [True, False])
@pytest.mark.parametrize("missing_csharp", [True, False])
async def test_discovery_works(hass, controller, upper_case_props, missing_csharp):
"""Test a device being discovered."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device, upper_case_props, missing_csharp)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
# User initiates pairing - device enters pairing mode and displays code
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "pair"
# Pairing doesn't error error and pairing results
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
assert result["data"] == {}
async def test_abort_duplicate_flow(hass, controller):
"""Already paired."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_pair_already_paired_1(hass, controller):
"""Already paired."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Flag device as already paired
discovery_info.properties["sf"] = 0x0
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "already_paired"
async def test_id_missing(hass, controller):
"""Test id is missing."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Remove id from device
del discovery_info.properties[zeroconf.ATTR_PROPERTIES_ID]
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "invalid_properties"
async def test_discovery_ignored_model(hass, controller):
"""Already paired."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
discovery_info.properties[zeroconf.ATTR_PROPERTIES_ID] = "AA:BB:CC:DD:EE:FF"
discovery_info.properties["md"] = "HHKBridge1,1"
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "ignored_model"
async def test_discovery_ignored_hk_bridge(hass, controller):
"""Ensure we ignore homekit bridges and accessories created by the homekit integration."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
config_entry = MockConfigEntry(domain=config_flow.HOMEKIT_BRIDGE_DOMAIN, data={})
config_entry.add_to_hass(hass)
formatted_mac = device_registry.format_mac("AA:BB:CC:DD:EE:FF")
dev_reg = mock_device_registry(hass)
dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, formatted_mac)},
)
discovery_info.properties[zeroconf.ATTR_PROPERTIES_ID] = "AA:BB:CC:DD:EE:FF"
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "ignored_model"
async def test_discovery_does_not_ignore_non_homekit(hass, controller):
"""Do not ignore devices that are not from the homekit integration."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
config_entry = MockConfigEntry(domain="not_homekit", data={})
config_entry.add_to_hass(hass)
formatted_mac = device_registry.format_mac("AA:BB:CC:DD:EE:FF")
dev_reg = mock_device_registry(hass)
dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, formatted_mac)},
)
discovery_info.properties[zeroconf.ATTR_PROPERTIES_ID] = "AA:BB:CC:DD:EE:FF"
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "form"
async def test_discovery_broken_pairing_flag(hass, controller):
"""
There is already a config entry for the pairing and its pairing flag is wrong in zeroconf.
We have seen this particular implementation error in 2 different devices.
"""
await controller.add_paired_device(Accessories(), "00:00:00:00:00:00")
MockConfigEntry(
domain="homekit_controller",
data={"AccessoryPairingID": "00:00:00:00:00:00"},
unique_id="00:00:00:00:00:00",
).add_to_hass(hass)
# We just added a mock config entry so it must be visible in hass
assert len(hass.config_entries.async_entries()) == 1
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Make sure that we are pairable
assert discovery_info.properties["sf"] != 0x0
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
# Should still be paired.
config_entry_count = len(hass.config_entries.async_entries())
assert config_entry_count == 1
# Even though discovered as pairable, we bail out as already paired.
assert result["reason"] == "already_paired"
async def test_discovery_invalid_config_entry(hass, controller):
"""There is already a config entry for the pairing id but it's invalid."""
pairing = await controller.add_paired_device(Accessories(), "00:00:00:00:00:00")
MockConfigEntry(
domain="homekit_controller",
data={"AccessoryPairingID": "00:00:00:00:00:00"},
unique_id="00:00:00:00:00:00",
).add_to_hass(hass)
# We just added a mock config entry so it must be visible in hass
assert len(hass.config_entries.async_entries()) == 1
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
with patch.object(
pairing,
"list_accessories_and_characteristics",
side_effect=AuthenticationError("Invalid pairing keys"),
):
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
# Discovery of a HKID that is in a pairable state but for which there is
# already a config entry - in that case the stale config entry is
# automatically removed.
config_entry_count = len(hass.config_entries.async_entries())
assert config_entry_count == 0
# And new config flow should continue allowing user to set up a new pairing
assert result["type"] == "form"
async def test_discovery_already_configured(hass, controller):
"""Already configured."""
entry = MockConfigEntry(
domain="homekit_controller",
data={
"AccessoryIP": "4.4.4.4",
"AccessoryPort": 66,
"AccessoryPairingID": "00:00:00:00:00:00",
},
unique_id="00:00:00:00:00:00",
)
entry.add_to_hass(hass)
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Set device as already paired
discovery_info.properties["sf"] = 0x00
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data["AccessoryIP"] == discovery_info.host
assert entry.data["AccessoryPort"] == discovery_info.port
async def test_discovery_already_configured_update_csharp(hass, controller):
"""Already configured and csharp changes."""
entry = MockConfigEntry(
domain="homekit_controller",
data={
"AccessoryIP": "4.4.4.4",
"AccessoryPort": 66,
"AccessoryPairingID": "AA:BB:CC:DD:EE:FF",
},
unique_id="aa:bb:cc:dd:ee:ff",
)
entry.add_to_hass(hass)
connection_mock = AsyncMock()
connection_mock.pairing.connect.reconnect_soon = AsyncMock()
connection_mock.async_refresh_entity_map = AsyncMock()
hass.data[KNOWN_DEVICES] = {"AA:BB:CC:DD:EE:FF": connection_mock}
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Set device as already paired
discovery_info.properties["sf"] = 0x00
discovery_info.properties["c#"] = 99999
discovery_info.properties[zeroconf.ATTR_PROPERTIES_ID] = "AA:BB:CC:DD:EE:FF"
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
await hass.async_block_till_done()
assert entry.data["AccessoryIP"] == discovery_info.host
assert entry.data["AccessoryPort"] == discovery_info.port
assert connection_mock.async_refresh_entity_map.await_count == 1
@pytest.mark.parametrize("exception,expected", PAIRING_START_ABORT_ERRORS)
async def test_pair_abort_errors_on_start(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
# User initiates pairing - device refuses to enter pairing mode
test_exc = exception("error")
with patch.object(device, "start_pairing", side_effect=test_exc):
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "abort"
assert result["reason"] == expected
@pytest.mark.parametrize("exception,expected", PAIRING_TRY_LATER_ERRORS)
async def test_pair_try_later_errors_on_start(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
# User initiates pairing - device refuses to enter pairing mode but may be successful after entering pairing mode or rebooting
test_exc = exception("error")
with patch.object(device, "start_pairing", side_effect=test_exc):
result2 = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result2["step_id"] == expected
assert result2["type"] == "form"
# Device is rebooted or placed into pairing mode as they have been instructed
# We start pairing again
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"], user_input={"any": "key"}
)
# .. and successfully complete pair
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result4["type"] == "create_entry"
assert result4["title"] == "Koogeek-LS1-20833F"
@pytest.mark.parametrize("exception,expected", PAIRING_START_FORM_ERRORS)
async def test_pair_form_errors_on_start(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
}
# User initiates pairing - device refuses to enter pairing mode
test_exc = exception("error")
with patch.object(device, "start_pairing", side_effect=test_exc):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "form"
assert result["errors"]["pairing_code"] == expected
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
}
# User gets back the form
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["errors"] == {}
# User re-tries entering pairing code
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
@pytest.mark.parametrize("exception,expected", PAIRING_FINISH_ABORT_ERRORS)
async def test_pair_abort_errors_on_finish(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
}
# User initiates pairing - this triggers the device to show a pairing code
# and then HA to show a pairing form
finish_pairing = unittest.mock.AsyncMock(side_effect=exception("error"))
with patch.object(device, "start_pairing", return_value=finish_pairing):
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
}
# User enters pairing code
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "abort"
assert result["reason"] == expected
@pytest.mark.parametrize("exception,expected", PAIRING_FINISH_FORM_ERRORS)
async def test_pair_form_errors_on_finish(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
}
# User initiates pairing - this triggers the device to show a pairing code
# and then HA to show a pairing form
finish_pairing = unittest.mock.AsyncMock(side_effect=exception("error"))
with patch.object(device, "start_pairing", return_value=finish_pairing):
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
}
# User enters pairing code
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "form"
assert result["errors"]["pairing_code"] == expected
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
}
async def test_user_works(hass, controller):
"""Test user initiated disovers devices."""
setup_mock_accessory(controller)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert get_flow_context(hass, result) == {
"source": config_entries.SOURCE_USER,
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"device": "TestDevice"}
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert get_flow_context(hass, result) == {
"source": config_entries.SOURCE_USER,
"unique_id": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
async def test_user_pairing_with_insecure_setup_code(hass, controller):
"""Test user initiated disovers devices."""
device = setup_mock_accessory(controller)
device.pairing_code = "123-45-678"
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert get_flow_context(hass, result) == {
"source": config_entries.SOURCE_USER,
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"device": "TestDevice"}
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert get_flow_context(hass, result) == {
"source": config_entries.SOURCE_USER,
"unique_id": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "123-45-678"}
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert result["errors"] == {"pairing_code": "insecure_setup_code"}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={"pairing_code": "123-45-678", "allow_insecure_setup_codes": True},
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
async def test_user_no_devices(hass, controller):
"""Test user initiated pairing where no devices discovered."""
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices"
async def test_user_no_unpaired_devices(hass, controller):
"""Test user initiated pairing where no unpaired devices discovered."""
device = setup_mock_accessory(controller)
# Pair the mock device so that it shows as paired in discovery
finish_pairing = await device.start_pairing(device.device_id)
await finish_pairing(device.pairing_code)
# Device discovery is requested
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices"
async def test_unignore_works(hass, controller):
"""Test rediscovery triggered disovers work."""
device = setup_mock_accessory(controller)
# Device is unignored
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_UNIGNORE},
data={"unique_id": device.device_id},
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_UNIGNORE,
}
# User initiates pairing by clicking on 'configure' - device enters pairing mode and displays code
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "pair"
# Pairing finalized
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
async def test_unignore_ignores_missing_devices(hass, controller):
"""Test rediscovery triggered disovers handle devices that have gone away."""
setup_mock_accessory(controller)
# Device is unignored
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_UNIGNORE},
data={"unique_id": "00:00:00:00:00:01"},
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices"
| |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Tests for sentencepiece_tokenizer."""
import os
import sys
import time
from absl import flags
import numpy as np
import tensorflow.compat.v2 as tf # pylint: disable=g-direct-tensorflow-import
import tensorflow_text
# Force loaded shared object symbols to be globally visible. This is needed so
# that the interpreter_wrapper, in one .so file, can see the op resolver
# in a different .so file. Note that this may already be set by default.
# pylint: disable=g-import-not-at-top,g-bad-import-order,unused-import
if hasattr(sys, "setdlopenflags") and hasattr(sys, "getdlopenflags"):
sys.setdlopenflags(sys.getdlopenflags() | os.RTLD_GLOBAL)
from tensorflow.lite.python import interpreter as interpreter_wrapper # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.platform import resource_loader
from tensorflow_lite_support.custom_ops.python import sentencepiece_tokenizer
from tensorflow_lite_support.custom_ops.kernel.sentencepiece.py import pywrap_tflite_registerer
FLAGS = flags.FLAGS
SENTENCEPIECE_MODEL_FILE = (
"../kernel/sentencepiece/testdata/sentencepiece.model")
def _GetSentencepieceModel():
model_filename = resource_loader.get_path_to_datafile(
SENTENCEPIECE_MODEL_FILE)
with open(model_filename, "rb") as file:
model = file.read()
return model
class SentencepieceTokenizerTest(tf.test.TestCase):
def setUp(self):
super(SentencepieceTokenizerTest, self).setUp()
self.sentencepiece_model = _GetSentencepieceModel()
def test_tftext_sentencepiece_tokenizer(self):
"""Check that the new tokenizer produces the same result that the tftext one."""
tftext_sp = tensorflow_text.SentencepieceTokenizer(self.sentencepiece_model)
opt_sp = sentencepiece_tokenizer.SentencepieceTokenizer(
self.sentencepiece_model)
input_text = [
u" ", u"to be or not to be", u"ignored by length text1",
u"ignored by length text2"
]
tftext_tokenized = tftext_sp.tokenize(input_text)
opt_tokenized = opt_sp.tokenize(input_text)
self.assertAllEqual(tftext_tokenized, opt_tokenized)
def test_tftext_sentencepiece_detokenizer(self):
"""Check that the new tokenizer produces the same result that the tftext one."""
tftext_sp = tensorflow_text.SentencepieceTokenizer(self.sentencepiece_model)
opt_sp = sentencepiece_tokenizer.SentencepieceTokenizer(
self.sentencepiece_model)
input_text = [
u" ", u"to be or not to be", u"ignored by length text1",
u"ignored by length text2"
]
tftext_tokenized = tftext_sp.tokenize(input_text)
# Check detokenizer
tftext_detokenized = tftext_sp.detokenize(tftext_tokenized)
opt_detokenized = opt_sp.detokenize(tftext_tokenized)
self.assertAllEqual(tftext_detokenized, opt_detokenized)
def test_tftext_sentencepiece_tokenizer_bos_eos(self):
"""Check that the new tokenizer produces the same result that the tftext one with bos and eos."""
tftext_sp = tensorflow_text.SentencepieceTokenizer(
self.sentencepiece_model, add_bos=True, add_eos=True)
opt_sp = sentencepiece_tokenizer.SentencepieceTokenizer(
self.sentencepiece_model, add_bos=True, add_eos=True)
input_text = [
u" ", u"to be or not to be", u"ignored by length text1",
u"ignored by length text2"
]
tftext_tokenized = tftext_sp.tokenize(input_text)
opt_tokenized = opt_sp.tokenize(input_text)
self.assertAllEqual(tftext_tokenized, opt_tokenized)
def test_tflite_opt_sentence_tokenizer(self):
"""Check that can convert a Keras model to TFLite and it produces the same result for tokenization."""
class TokenizerLayer(tf.keras.layers.Layer):
def __init__(self, sentencepiece_model, **kwargs):
super(TokenizerLayer, self).__init__(**kwargs)
self.sp = sentencepiece_tokenizer.SentencepieceTokenizer(
sentencepiece_model)
def call(self, input_tensor, **kwargs):
return self.sp.tokenize(input_tensor).flat_values
model = tf.keras.models.Sequential(
[TokenizerLayer(self.sentencepiece_model)])
input_data = np.array([[
u" ", u"to be or not to be", u"ignored by length text1",
u"ignored by length text2"
]])
tf_result = model.predict(input_data)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
converter.target_spec.supported_ops = supported_ops
converter.allow_custom_ops = True
tflite_model = converter.convert()
interpreter = interpreter_wrapper.InterpreterWithCustomOps(
model_content=tflite_model,
custom_op_registerers=["TFLite_SentencepieceTokenizerRegisterer"])
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[0]["index"], input_data)
interpreter.invoke()
output_details = interpreter.get_output_details()
expected_result = [
13, 36, 83, 131, 13, 36, 4, 3127, 152, 130, 30, 2424, 168, 1644, 1524,
4, 3127, 152, 130, 30, 2424, 168, 1644, 636
]
self.assertAllEqual(tf_result, expected_result)
self.assertAllEqual(
interpreter.get_tensor(output_details[0]["index"]), expected_result)
def test_tflite_opt_sentence_detokenizer(self):
"""Check that can convert a Keras model to TFLite and it produces the same result for tokenization."""
class DeTokenizerLayer(tf.keras.layers.Layer):
def __init__(self, sentencepiece_model, **kwargs):
super(DeTokenizerLayer, self).__init__(**kwargs)
self.sp = sentencepiece_tokenizer.SentencepieceTokenizer(
sentencepiece_model)
def call(self, input_tensor, **kwargs):
return self.sp.detokenize(input_tensor)
model = tf.keras.models.Sequential(
[DeTokenizerLayer(self.sentencepiece_model)])
input_data = np.array([[
13, 36, 83, 131, 13, 36, 4, 3127, 152, 130, 30, 2424, 168, 1644, 1524,
4, 3127, 152, 130, 30, 2424, 168, 1644, 636
]],
dtype=np.int32)
tf_result = model.predict(input_data)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
converter.target_spec.supported_ops = supported_ops
converter.allow_custom_ops = True
tflite_model = converter.convert()
interpreter = interpreter_wrapper.InterpreterWithCustomOps(
model_content=tflite_model,
custom_op_registerers=["TFLite_SentencepieceTokenizerRegisterer"])
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[0]["index"], input_data)
interpreter.invoke()
output_details = interpreter.get_output_details()
expected_result = [
"to be or not to be ignored by length text1 ignored by length text2"
]
self.assertAllEqual(tf_result, expected_result)
self.assertAllEqual(
interpreter.get_tensor(output_details[0]["index"]), expected_result)
def test_tflite_opt_sentence_tokenizer_vocab_size(self):
"""Check that can convert a Keras model to TFLite and it produces the same result for vocabulary size."""
class TokenizerLayer(tf.keras.layers.Layer):
def __init__(self, sentencepiece_model, **kwargs):
super(TokenizerLayer, self).__init__(**kwargs)
self.sp = sentencepiece_tokenizer.SentencepieceTokenizer(
sentencepiece_model)
def call(self, input_tensor, **kwargs):
return self.sp.vocab_size()
model = tf.keras.models.Sequential(
[TokenizerLayer(self.sentencepiece_model)])
input_data = np.array([[""]])
tf_result = model.predict(input_data)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
converter.target_spec.supported_ops = supported_ops
converter.allow_custom_ops = True
tflite_model = converter.convert()
interpreter = interpreter_wrapper.InterpreterWithCustomOps(
model_content=tflite_model,
custom_op_registerers=["TFLite_SentencepieceTokenizerRegisterer"])
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[0]["index"], input_data)
interpreter.invoke()
output_details = interpreter.get_output_details()
expected_result = 4000
self.assertEqual(tf_result, expected_result)
self.assertAllEqual(
interpreter.get_tensor(output_details[0]["index"]), expected_result)
class SentencepieceTokenizerBenchmark(tf.test.Benchmark):
def benchmarkTokenizer(self):
sp_model = _GetSentencepieceModel()
test_text = [
"This week we celebrate the casts and creatives who have come together"
" to bring us our favorite.",
"More Stacks products demonstrated commitment to excellent support.",
"Test, test, test."
]
tftext_sp = tensorflow_text.SentencepieceTokenizer(sp_model)
opt_sp = sentencepiece_tokenizer.SentencepieceTokenizer(sp_model)
iter_number = 1000
start = time.time()
for _ in range(iter_number):
_ = opt_sp.tokenize(test_text)
self.report_benchmark(
iters=iter_number, wall_time=time.time() - start, name="opt")
start = time.time()
for _ in range(iter_number):
_ = tftext_sp.tokenize(test_text)
self.report_benchmark(
iters=iter_number, wall_time=time.time() - start, name="tf.text")
if __name__ == "__main__":
tf.test.main()
| |
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetches, embeds, and displays lyrics.
"""
import difflib
import errno
import itertools
import json
import struct
import os.path
import re
import requests
import unicodedata
from unidecode import unidecode
import warnings
import urllib
try:
import bs4
from bs4 import SoupStrainer
HAS_BEAUTIFUL_SOUP = True
except ImportError:
HAS_BEAUTIFUL_SOUP = False
try:
import langdetect
HAS_LANGDETECT = True
except ImportError:
HAS_LANGDETECT = False
try:
# PY3: HTMLParseError was removed in 3.5 as strict mode
# was deprecated in 3.3.
# https://docs.python.org/3.3/library/html.parser.html
from html.parser import HTMLParseError
except ImportError:
class HTMLParseError(Exception):
pass
from beets import plugins
from beets import ui
import beets
DIV_RE = re.compile(r'<(/?)div>?', re.I)
COMMENT_RE = re.compile(r'<!--.*-->', re.S)
TAG_RE = re.compile(r'<[^>]*>')
BREAK_RE = re.compile(r'\n?\s*<br([\s|/][^>]*)*>\s*\n?', re.I)
URL_CHARACTERS = {
'\u2018': "'",
'\u2019': "'",
'\u201c': '"',
'\u201d': '"',
'\u2010': '-',
'\u2011': '-',
'\u2012': '-',
'\u2013': '-',
'\u2014': '-',
'\u2015': '-',
'\u2016': '-',
'\u2026': '...',
}
USER_AGENT = f'beets/{beets.__version__}'
# The content for the base index.rst generated in ReST mode.
REST_INDEX_TEMPLATE = '''Lyrics
======
* :ref:`Song index <genindex>`
* :ref:`search`
Artist index:
.. toctree::
:maxdepth: 1
:glob:
artists/*
'''
# The content for the base conf.py generated.
REST_CONF_TEMPLATE = '''# -*- coding: utf-8 -*-
master_doc = 'index'
project = 'Lyrics'
copyright = 'none'
author = 'Various Authors'
latex_documents = [
(master_doc, 'Lyrics.tex', project,
author, 'manual'),
]
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
epub_exclude_files = ['search.html']
epub_tocdepth = 1
epub_tocdup = False
'''
# Utilities.
def unichar(i):
try:
return chr(i)
except ValueError:
return struct.pack('i', i).decode('utf-32')
def unescape(text):
"""Resolve &#xxx; HTML entities (and some others)."""
if isinstance(text, bytes):
text = text.decode('utf-8', 'ignore')
out = text.replace(' ', ' ')
def replchar(m):
num = m.group(1)
return unichar(int(num))
out = re.sub("&#(\\d+);", replchar, out)
return out
def extract_text_between(html, start_marker, end_marker):
try:
_, html = html.split(start_marker, 1)
html, _ = html.split(end_marker, 1)
except ValueError:
return ''
return html
def search_pairs(item):
"""Yield a pairs of artists and titles to search for.
The first item in the pair is the name of the artist, the second
item is a list of song names.
In addition to the artist and title obtained from the `item` the
method tries to strip extra information like paranthesized suffixes
and featured artists from the strings and add them as candidates.
The artist sort name is added as a fallback candidate to help in
cases where artist name includes special characters or is in a
non-latin script.
The method also tries to split multiple titles separated with `/`.
"""
def generate_alternatives(string, patterns):
"""Generate string alternatives by extracting first matching group for
each given pattern.
"""
alternatives = [string]
for pattern in patterns:
match = re.search(pattern, string, re.IGNORECASE)
if match:
alternatives.append(match.group(1))
return alternatives
title, artist, artist_sort = item.title, item.artist, item.artist_sort
patterns = [
# Remove any featuring artists from the artists name
fr"(.*?) {plugins.feat_tokens()}"]
artists = generate_alternatives(artist, patterns)
# Use the artist_sort as fallback only if it differs from artist to avoid
# repeated remote requests with the same search terms
if artist != artist_sort:
artists.append(artist_sort)
patterns = [
# Remove a parenthesized suffix from a title string. Common
# examples include (live), (remix), and (acoustic).
r"(.+?)\s+[(].*[)]$",
# Remove any featuring artists from the title
r"(.*?) {}".format(plugins.feat_tokens(for_artist=False)),
# Remove part of title after colon ':' for songs with subtitles
r"(.+?)\s*:.*"]
titles = generate_alternatives(title, patterns)
# Check for a dual song (e.g. Pink Floyd - Speak to Me / Breathe)
# and each of them.
multi_titles = []
for title in titles:
multi_titles.append([title])
if '/' in title:
multi_titles.append([x.strip() for x in title.split('/')])
return itertools.product(artists, multi_titles)
def slug(text):
"""Make a URL-safe, human-readable version of the given text
This will do the following:
1. decode unicode characters into ASCII
2. shift everything to lowercase
3. strip whitespace
4. replace other non-word characters with dashes
5. strip extra dashes
This somewhat duplicates the :func:`Google.slugify` function but
slugify is not as generic as this one, which can be reused
elsewhere.
"""
return re.sub(r'\W+', '-', unidecode(text).lower().strip()).strip('-')
if HAS_BEAUTIFUL_SOUP:
def try_parse_html(html, **kwargs):
try:
return bs4.BeautifulSoup(html, 'html.parser', **kwargs)
except HTMLParseError:
return None
else:
def try_parse_html(html, **kwargs):
return None
class Backend:
REQUIRES_BS = False
def __init__(self, config, log):
self._log = log
@staticmethod
def _encode(s):
"""Encode the string for inclusion in a URL"""
if isinstance(s, str):
for char, repl in URL_CHARACTERS.items():
s = s.replace(char, repl)
s = s.encode('utf-8', 'ignore')
return urllib.parse.quote(s)
def build_url(self, artist, title):
return self.URL_PATTERN % (self._encode(artist.title()),
self._encode(title.title()))
def fetch_url(self, url):
"""Retrieve the content at a given URL, or return None if the source
is unreachable.
"""
try:
# Disable the InsecureRequestWarning that comes from using
# `verify=false`.
# https://github.com/kennethreitz/requests/issues/2214
# We're not overly worried about the NSA MITMing our lyrics scraper
with warnings.catch_warnings():
warnings.simplefilter('ignore')
r = requests.get(url, verify=False, headers={
'User-Agent': USER_AGENT,
})
except requests.RequestException as exc:
self._log.debug('lyrics request failed: {0}', exc)
return
if r.status_code == requests.codes.ok:
return r.text
else:
self._log.debug('failed to fetch: {0} ({1})', url, r.status_code)
return None
def fetch(self, artist, title):
raise NotImplementedError()
class MusiXmatch(Backend):
REPLACEMENTS = {
r'\s+': '-',
'<': 'Less_Than',
'>': 'Greater_Than',
'#': 'Number_',
r'[\[\{]': '(',
r'[\]\}]': ')',
}
URL_PATTERN = 'https://www.musixmatch.com/lyrics/%s/%s'
@classmethod
def _encode(cls, s):
for old, new in cls.REPLACEMENTS.items():
s = re.sub(old, new, s)
return super()._encode(s)
def fetch(self, artist, title):
url = self.build_url(artist, title)
html = self.fetch_url(url)
if not html:
return None
if "We detected that your IP is blocked" in html:
self._log.warning('we are blocked at MusixMatch: url %s failed'
% url)
return None
html_parts = html.split('<p class="mxm-lyrics__content')
# Sometimes lyrics come in 2 or more parts
lyrics_parts = []
for html_part in html_parts:
lyrics_parts.append(extract_text_between(html_part, '>', '</p>'))
lyrics = '\n'.join(lyrics_parts)
lyrics = lyrics.strip(',"').replace('\\n', '\n')
# another odd case: sometimes only that string remains, for
# missing songs. this seems to happen after being blocked
# above, when filling in the CAPTCHA.
if "Instant lyrics for all your music." in lyrics:
return None
# sometimes there are non-existent lyrics with some content
if 'Lyrics | Musixmatch' in lyrics:
return None
return lyrics
class Genius(Backend):
"""Fetch lyrics from Genius via genius-api.
Simply adapted from
bigishdata.com/2016/09/27/getting-song-lyrics-from-geniuss-api-scraping/
"""
REQUIRES_BS = True
base_url = "https://api.genius.com"
def __init__(self, config, log):
super().__init__(config, log)
self.api_key = config['genius_api_key'].as_str()
self.headers = {
'Authorization': "Bearer %s" % self.api_key,
'User-Agent': USER_AGENT,
}
def fetch(self, artist, title):
"""Fetch lyrics from genius.com
Because genius doesn't allow accesssing lyrics via the api,
we first query the api for a url matching our artist & title,
then attempt to scrape that url for the lyrics.
"""
json = self._search(artist, title)
if not json:
self._log.debug('Genius API request returned invalid JSON')
return None
# find a matching artist in the json
for hit in json["response"]["hits"]:
hit_artist = hit["result"]["primary_artist"]["name"]
if slug(hit_artist) == slug(artist):
html = self.fetch_url(hit["result"]["url"])
if not html:
return None
return self._scrape_lyrics_from_html(html)
self._log.debug('Genius failed to find a matching artist for \'{0}\'',
artist)
return None
def _search(self, artist, title):
"""Searches the genius api for a given artist and title
https://docs.genius.com/#search-h2
:returns: json response
"""
search_url = self.base_url + "/search"
data = {'q': title + " " + artist.lower()}
try:
response = requests.get(
search_url, params=data, headers=self.headers)
except requests.RequestException as exc:
self._log.debug('Genius API request failed: {0}', exc)
return None
try:
return response.json()
except ValueError:
return None
def _scrape_lyrics_from_html(self, html):
"""Scrape lyrics from a given genius.com html"""
soup = try_parse_html(html)
if not soup:
return
# Remove script tags that they put in the middle of the lyrics.
[h.extract() for h in soup('script')]
# Most of the time, the page contains a div with class="lyrics" where
# all of the lyrics can be found already correctly formatted
# Sometimes, though, it packages the lyrics into separate divs, most
# likely for easier ad placement
lyrics_div = soup.find("div", class_="lyrics")
if not lyrics_div:
self._log.debug('Received unusual song page html')
verse_div = soup.find("div",
class_=re.compile("Lyrics__Container"))
if not verse_div:
if soup.find("div",
class_=re.compile("LyricsPlaceholder__Message"),
string="This song is an instrumental"):
self._log.debug('Detected instrumental')
return "[Instrumental]"
else:
self._log.debug("Couldn't scrape page using known layouts")
return None
lyrics_div = verse_div.parent
for br in lyrics_div.find_all("br"):
br.replace_with("\n")
ads = lyrics_div.find_all("div",
class_=re.compile("InreadAd__Container"))
for ad in ads:
ad.replace_with("\n")
footers = lyrics_div.find_all("div",
class_=re.compile("Lyrics__Footer"))
for footer in footers:
footer.replace_with("")
return lyrics_div.get_text()
class Tekstowo(Backend):
# Fetch lyrics from Tekstowo.pl.
REQUIRES_BS = True
BASE_URL = 'http://www.tekstowo.pl'
URL_PATTERN = BASE_URL + '/wyszukaj.html?search-title=%s&search-artist=%s'
def fetch(self, artist, title):
url = self.build_url(title, artist)
search_results = self.fetch_url(url)
if not search_results:
return None
song_page_url = self.parse_search_results(search_results)
if not song_page_url:
return None
song_page_html = self.fetch_url(song_page_url)
if not song_page_html:
return None
return self.extract_lyrics(song_page_html)
def parse_search_results(self, html):
html = _scrape_strip_cruft(html)
html = _scrape_merge_paragraphs(html)
soup = try_parse_html(html)
if not soup:
return None
content_div = soup.find("div", class_="content")
if not content_div:
return None
card_div = content_div.find("div", class_="card")
if not card_div:
return None
song_rows = card_div.find_all("div", class_="box-przeboje")
if not song_rows:
return None
song_row = song_rows[0]
if not song_row:
return None
link = song_row.find('a')
if not link:
return None
return self.BASE_URL + link.get('href')
def extract_lyrics(self, html):
html = _scrape_strip_cruft(html)
html = _scrape_merge_paragraphs(html)
soup = try_parse_html(html)
if not soup:
return None
lyrics_div = soup.select("div.song-text > div.inner-text")
if not lyrics_div:
return None
return lyrics_div[0].get_text()
def remove_credits(text):
"""Remove first/last line of text if it contains the word 'lyrics'
eg 'Lyrics by songsdatabase.com'
"""
textlines = text.split('\n')
credits = None
for i in (0, -1):
if textlines and 'lyrics' in textlines[i].lower():
credits = textlines.pop(i)
if credits:
text = '\n'.join(textlines)
return text
def _scrape_strip_cruft(html, plain_text_out=False):
"""Clean up HTML
"""
html = unescape(html)
html = html.replace('\r', '\n') # Normalize EOL.
html = re.sub(r' +', ' ', html) # Whitespaces collapse.
html = BREAK_RE.sub('\n', html) # <br> eats up surrounding '\n'.
html = re.sub(r'(?s)<(script).*?</\1>', '', html) # Strip script tags.
html = re.sub('\u2005', " ", html) # replace unicode with regular space
if plain_text_out: # Strip remaining HTML tags
html = COMMENT_RE.sub('', html)
html = TAG_RE.sub('', html)
html = '\n'.join([x.strip() for x in html.strip().split('\n')])
html = re.sub(r'\n{3,}', r'\n\n', html)
return html
def _scrape_merge_paragraphs(html):
html = re.sub(r'</p>\s*<p(\s*[^>]*)>', '\n', html)
return re.sub(r'<div .*>\s*</div>', '\n', html)
def scrape_lyrics_from_html(html):
"""Scrape lyrics from a URL. If no lyrics can be found, return None
instead.
"""
def is_text_notcode(text):
length = len(text)
return (length > 20 and
text.count(' ') > length / 25 and
(text.find('{') == -1 or text.find(';') == -1))
html = _scrape_strip_cruft(html)
html = _scrape_merge_paragraphs(html)
# extract all long text blocks that are not code
soup = try_parse_html(html, parse_only=SoupStrainer(text=is_text_notcode))
if not soup:
return None
# Get the longest text element (if any).
strings = sorted(soup.stripped_strings, key=len, reverse=True)
if strings:
return strings[0]
else:
return None
class Google(Backend):
"""Fetch lyrics from Google search results."""
REQUIRES_BS = True
def __init__(self, config, log):
super().__init__(config, log)
self.api_key = config['google_API_key'].as_str()
self.engine_id = config['google_engine_ID'].as_str()
def is_lyrics(self, text, artist=None):
"""Determine whether the text seems to be valid lyrics.
"""
if not text:
return False
bad_triggers_occ = []
nb_lines = text.count('\n')
if nb_lines <= 1:
self._log.debug("Ignoring too short lyrics '{0}'", text)
return False
elif nb_lines < 5:
bad_triggers_occ.append('too_short')
else:
# Lyrics look legit, remove credits to avoid being penalized
# further down
text = remove_credits(text)
bad_triggers = ['lyrics', 'copyright', 'property', 'links']
if artist:
bad_triggers += [artist]
for item in bad_triggers:
bad_triggers_occ += [item] * len(re.findall(r'\W%s\W' % item,
text, re.I))
if bad_triggers_occ:
self._log.debug('Bad triggers detected: {0}', bad_triggers_occ)
return len(bad_triggers_occ) < 2
def slugify(self, text):
"""Normalize a string and remove non-alphanumeric characters.
"""
text = re.sub(r"[-'_\s]", '_', text)
text = re.sub(r"_+", '_', text).strip('_')
pat = r"([^,\(]*)\((.*?)\)" # Remove content within parentheses
text = re.sub(pat, r'\g<1>', text).strip()
try:
text = unicodedata.normalize('NFKD', text).encode('ascii',
'ignore')
text = str(re.sub(r'[-\s]+', ' ', text.decode('utf-8')))
except UnicodeDecodeError:
self._log.exception("Failing to normalize '{0}'", text)
return text
BY_TRANS = ['by', 'par', 'de', 'von']
LYRICS_TRANS = ['lyrics', 'paroles', 'letras', 'liedtexte']
def is_page_candidate(self, url_link, url_title, title, artist):
"""Return True if the URL title makes it a good candidate to be a
page that contains lyrics of title by artist.
"""
title = self.slugify(title.lower())
artist = self.slugify(artist.lower())
sitename = re.search("//([^/]+)/.*",
self.slugify(url_link.lower())).group(1)
url_title = self.slugify(url_title.lower())
# Check if URL title contains song title (exact match)
if url_title.find(title) != -1:
return True
# or try extracting song title from URL title and check if
# they are close enough
tokens = [by + '_' + artist for by in self.BY_TRANS] + \
[artist, sitename, sitename.replace('www.', '')] + \
self.LYRICS_TRANS
tokens = [re.escape(t) for t in tokens]
song_title = re.sub('(%s)' % '|'.join(tokens), '', url_title)
song_title = song_title.strip('_|')
typo_ratio = .9
ratio = difflib.SequenceMatcher(None, song_title, title).ratio()
return ratio >= typo_ratio
def fetch(self, artist, title):
query = f"{artist} {title}"
url = 'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' \
% (self.api_key, self.engine_id,
urllib.parse.quote(query.encode('utf-8')))
data = self.fetch_url(url)
if not data:
self._log.debug('google backend returned no data')
return None
try:
data = json.loads(data)
except ValueError as exc:
self._log.debug('google backend returned malformed JSON: {}', exc)
if 'error' in data:
reason = data['error']['errors'][0]['reason']
self._log.debug('google backend error: {0}', reason)
return None
if 'items' in data.keys():
for item in data['items']:
url_link = item['link']
url_title = item.get('title', '')
if not self.is_page_candidate(url_link, url_title,
title, artist):
continue
html = self.fetch_url(url_link)
if not html:
continue
lyrics = scrape_lyrics_from_html(html)
if not lyrics:
continue
if self.is_lyrics(lyrics, artist):
self._log.debug('got lyrics from {0}',
item['displayLink'])
return lyrics
return None
class LyricsPlugin(plugins.BeetsPlugin):
SOURCES = ['google', 'musixmatch', 'genius', 'tekstowo']
SOURCE_BACKENDS = {
'google': Google,
'musixmatch': MusiXmatch,
'genius': Genius,
'tekstowo': Tekstowo,
}
def __init__(self):
super().__init__()
self.import_stages = [self.imported]
self.config.add({
'auto': True,
'bing_client_secret': None,
'bing_lang_from': [],
'bing_lang_to': None,
'google_API_key': None,
'google_engine_ID': '009217259823014548361:lndtuqkycfu',
'genius_api_key':
"Ryq93pUGm8bM6eUWwD_M3NOFFDAtp2yEE7W"
"76V-uFL5jks5dNvcGCdarqFjDhP9c",
'fallback': None,
'force': False,
'local': False,
'sources': self.SOURCES,
})
self.config['bing_client_secret'].redact = True
self.config['google_API_key'].redact = True
self.config['google_engine_ID'].redact = True
self.config['genius_api_key'].redact = True
# State information for the ReST writer.
# First, the current artist we're writing.
self.artist = 'Unknown artist'
# The current album: False means no album yet.
self.album = False
# The current rest file content. None means the file is not
# open yet.
self.rest = None
available_sources = list(self.SOURCES)
sources = plugins.sanitize_choices(
self.config['sources'].as_str_seq(), available_sources)
if not HAS_BEAUTIFUL_SOUP:
sources = self.sanitize_bs_sources(sources)
if 'google' in sources:
if not self.config['google_API_key'].get():
# We log a *debug* message here because the default
# configuration includes `google`. This way, the source
# is silent by default but can be enabled just by
# setting an API key.
self._log.debug('Disabling google source: '
'no API key configured.')
sources.remove('google')
self.config['bing_lang_from'] = [
x.lower() for x in self.config['bing_lang_from'].as_str_seq()]
self.bing_auth_token = None
if not HAS_LANGDETECT and self.config['bing_client_secret'].get():
self._log.warning('To use bing translations, you need to '
'install the langdetect module. See the '
'documentation for further details.')
self.backends = [self.SOURCE_BACKENDS[source](self.config, self._log)
for source in sources]
def sanitize_bs_sources(self, sources):
enabled_sources = []
for source in sources:
if self.SOURCE_BACKENDS[source].REQUIRES_BS:
self._log.debug('To use the %s lyrics source, you must '
'install the beautifulsoup4 module. See '
'the documentation for further details.'
% source)
else:
enabled_sources.append(source)
return enabled_sources
def get_bing_access_token(self):
params = {
'client_id': 'beets',
'client_secret': self.config['bing_client_secret'],
'scope': "https://api.microsofttranslator.com",
'grant_type': 'client_credentials',
}
oauth_url = 'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13'
oauth_token = json.loads(requests.post(
oauth_url,
data=urllib.parse.urlencode(params)).content)
if 'access_token' in oauth_token:
return "Bearer " + oauth_token['access_token']
else:
self._log.warning('Could not get Bing Translate API access token.'
' Check your "bing_client_secret" password')
def commands(self):
cmd = ui.Subcommand('lyrics', help='fetch song lyrics')
cmd.parser.add_option(
'-p', '--print', dest='printlyr',
action='store_true', default=False,
help='print lyrics to console',
)
cmd.parser.add_option(
'-r', '--write-rest', dest='writerest',
action='store', default=None, metavar='dir',
help='write lyrics to given directory as ReST files',
)
cmd.parser.add_option(
'-f', '--force', dest='force_refetch',
action='store_true', default=False,
help='always re-download lyrics',
)
cmd.parser.add_option(
'-l', '--local', dest='local_only',
action='store_true', default=False,
help='do not fetch missing lyrics',
)
def func(lib, opts, args):
# The "write to files" option corresponds to the
# import_write config value.
write = ui.should_write()
if opts.writerest:
self.writerest_indexes(opts.writerest)
items = lib.items(ui.decargs(args))
for item in items:
if not opts.local_only and not self.config['local']:
self.fetch_item_lyrics(
lib, item, write,
opts.force_refetch or self.config['force'],
)
if item.lyrics:
if opts.printlyr:
ui.print_(item.lyrics)
if opts.writerest:
self.appendrest(opts.writerest, item)
if opts.writerest and items:
# flush last artist & write to ReST
self.writerest(opts.writerest)
ui.print_('ReST files generated. to build, use one of:')
ui.print_(' sphinx-build -b html %s _build/html'
% opts.writerest)
ui.print_(' sphinx-build -b epub %s _build/epub'
% opts.writerest)
ui.print_((' sphinx-build -b latex %s _build/latex '
'&& make -C _build/latex all-pdf')
% opts.writerest)
cmd.func = func
return [cmd]
def appendrest(self, directory, item):
"""Append the item to an ReST file
This will keep state (in the `rest` variable) in order to avoid
writing continuously to the same files.
"""
if slug(self.artist) != slug(item.albumartist):
# Write current file and start a new one ~ item.albumartist
self.writerest(directory)
self.artist = item.albumartist.strip()
self.rest = "%s\n%s\n\n.. contents::\n :local:\n\n" \
% (self.artist,
'=' * len(self.artist))
if self.album != item.album:
tmpalbum = self.album = item.album.strip()
if self.album == '':
tmpalbum = 'Unknown album'
self.rest += "{}\n{}\n\n".format(tmpalbum, '-' * len(tmpalbum))
title_str = ":index:`%s`" % item.title.strip()
block = '| ' + item.lyrics.replace('\n', '\n| ')
self.rest += "{}\n{}\n\n{}\n\n".format(title_str,
'~' * len(title_str),
block)
def writerest(self, directory):
"""Write self.rest to a ReST file
"""
if self.rest is not None and self.artist is not None:
path = os.path.join(directory, 'artists',
slug(self.artist) + '.rst')
with open(path, 'wb') as output:
output.write(self.rest.encode('utf-8'))
def writerest_indexes(self, directory):
"""Write conf.py and index.rst files necessary for Sphinx
We write minimal configurations that are necessary for Sphinx
to operate. We do not overwrite existing files so that
customizations are respected."""
try:
os.makedirs(os.path.join(directory, 'artists'))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
indexfile = os.path.join(directory, 'index.rst')
if not os.path.exists(indexfile):
with open(indexfile, 'w') as output:
output.write(REST_INDEX_TEMPLATE)
conffile = os.path.join(directory, 'conf.py')
if not os.path.exists(conffile):
with open(conffile, 'w') as output:
output.write(REST_CONF_TEMPLATE)
def imported(self, session, task):
"""Import hook for fetching lyrics automatically.
"""
if self.config['auto']:
for item in task.imported_items():
self.fetch_item_lyrics(session.lib, item,
False, self.config['force'])
def fetch_item_lyrics(self, lib, item, write, force):
"""Fetch and store lyrics for a single item. If ``write``, then the
lyrics will also be written to the file itself.
"""
# Skip if the item already has lyrics.
if not force and item.lyrics:
self._log.info('lyrics already present: {0}', item)
return
lyrics = None
for artist, titles in search_pairs(item):
lyrics = [self.get_lyrics(artist, title) for title in titles]
if any(lyrics):
break
lyrics = "\n\n---\n\n".join([l for l in lyrics if l])
if lyrics:
self._log.info('fetched lyrics: {0}', item)
if HAS_LANGDETECT and self.config['bing_client_secret'].get():
lang_from = langdetect.detect(lyrics)
if self.config['bing_lang_to'].get() != lang_from and (
not self.config['bing_lang_from'] or (
lang_from in self.config[
'bing_lang_from'].as_str_seq())):
lyrics = self.append_translation(
lyrics, self.config['bing_lang_to'])
else:
self._log.info('lyrics not found: {0}', item)
fallback = self.config['fallback'].get()
if fallback:
lyrics = fallback
else:
return
item.lyrics = lyrics
if write:
item.try_write()
item.store()
def get_lyrics(self, artist, title):
"""Fetch lyrics, trying each source in turn. Return a string or
None if no lyrics were found.
"""
for backend in self.backends:
lyrics = backend.fetch(artist, title)
if lyrics:
self._log.debug('got lyrics from backend: {0}',
backend.__class__.__name__)
return _scrape_strip_cruft(lyrics, True)
def append_translation(self, text, to_lang):
from xml.etree import ElementTree
if not self.bing_auth_token:
self.bing_auth_token = self.get_bing_access_token()
if self.bing_auth_token:
# Extract unique lines to limit API request size per song
text_lines = set(text.split('\n'))
url = ('https://api.microsofttranslator.com/v2/Http.svc/'
'Translate?text=%s&to=%s' % ('|'.join(text_lines), to_lang))
r = requests.get(url,
headers={"Authorization ": self.bing_auth_token})
if r.status_code != 200:
self._log.debug('translation API error {}: {}', r.status_code,
r.text)
if 'token has expired' in r.text:
self.bing_auth_token = None
return self.append_translation(text, to_lang)
return text
lines_translated = ElementTree.fromstring(
r.text.encode('utf-8')).text
# Use a translation mapping dict to build resulting lyrics
translations = dict(zip(text_lines, lines_translated.split('|')))
result = ''
for line in text.split('\n'):
result += '{} / {}\n'.format(line, translations[line])
return result
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class NetworkInterfacesOperations(object):
"""NetworkInterfacesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2015-06-15".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-06-15"
self.config = config
def _delete_initial(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, network_interface_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets information about the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkInterface or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2015_06_15.models.NetworkInterface or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _create_or_update_initial(
self, resource_group_name, network_interface_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NetworkInterface')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, network_interface_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to the create or update network
interface operation.
:type parameters:
~azure.mgmt.network.v2015_06_15.models.NetworkInterface
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
NetworkInterface or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2015_06_15.models.NetworkInterface]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all network interfaces in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkInterface
:rtype:
~azure.mgmt.network.v2015_06_15.models.NetworkInterfacePaged[~azure.mgmt.network.v2015_06_15.models.NetworkInterface]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network interfaces in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkInterface
:rtype:
~azure.mgmt.network.v2015_06_15.models.NetworkInterfacePaged[~azure.mgmt.network.v2015_06_15.models.NetworkInterface]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_virtual_machine_scale_set_vm_network_interfaces(
self, resource_group_name, virtual_machine_scale_set_name, virtualmachine_index, custom_headers=None, raw=False, **operation_config):
"""Gets information about all network interfaces in a virtual machine in a
virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine
scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkInterface
:rtype:
~azure.mgmt.network.v2015_06_15.models.NetworkInterfacePaged[~azure.mgmt.network.v2015_06_15.models.NetworkInterface]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_virtual_machine_scale_set_network_interfaces(
self, resource_group_name, virtual_machine_scale_set_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network interfaces in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine
scale set.
:type virtual_machine_scale_set_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkInterface
:rtype:
~azure.mgmt.network.v2015_06_15.models.NetworkInterfacePaged[~azure.mgmt.network.v2015_06_15.models.NetworkInterface]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get_virtual_machine_scale_set_network_interface(
self, resource_group_name, virtual_machine_scale_set_name, virtualmachine_index, network_interface_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Get the specified network interface in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine
scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkInterface or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2015_06_15.models.NetworkInterface or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
## @file ostap/fitting/minuit.py
# Module with decoration of some (T)Minuit functions for efficient use in python
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2011-06-07
# =============================================================================
"""Decoration for some (T)Minuit functions for efficient use in python
"""
# =============================================================================
__version__ = "$Revision$"
__author__ = "Vanya BELYAEV Ivan.Belyaev@itep.ru"
__date__ = "2011-06-07"
__all__ = ()
# =============================================================================
import ROOT, ctypes
from builtins import range
from ostap.core.core import cpp, VE
from ostap.core.ostap_types import integer_types, string_types
from ostap.core.meta_info import root_info
from ostap.logger.colorized import allright, attention
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger( 'ostap.fitting.minuit' )
else : logger = getLogger( __name__ )
# =============================================================================
logger.debug ( 'Some useful decorations for (T)Minuit functions')
# =============================================================================
partypes = integer_types
# =============================================================================
## return codes from MINUIT commands
return_codes = {
0 : allright ( 'command executed normally' ) ,
1 : attention ( 'command is blank, ignored' ) ,
2 : attention ( 'command line unreadable, ignored' ) ,
3 : attention ( 'unknown command, ignored' ) ,
4 : attention ( 'abnormal termination (e.g., MIGRAD not converged)' ),
5 : 'command is a request to read PARAMETER definitions' ,
6 : "'SET INPUT' command" ,
7 : "'SET TITLE' command" ,
8 : "'SET COVAR' command" ,
9 : 'reserved' ,
10 : 'END command' ,
11 : 'EXIT or STOP command' ,
12 : 'RETURN command' ,
}
# =============================================================================
## get the parameter from (T)Minuit
# @code
# mn = ... # TMinuit object
#
# p1 = mn[0] # get the parameter by index
# p1 = mn.par(0) # ditto
# p1 = mn.parameter(0) # ditto
#
# p2 = mn['par2'] # get the parameter by name
# p2 = mn.par('par2') # ditto
# p2 = mn.parameter('par2') # ditto
# @endcode
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2012-09-28
def _mn_par_ ( self , i ) :
"""Get the parameter from (T)Minuit
>>> mn = ... # TMinuit object
>>> p1 = mn[0] # get the parameter by index
>>> p1 = mn.par(0) # ditto
>>> p1 = mn.parameter(0) # ditto
>>> p2 = mn['par2'] # get the parameter by name
>>> p2 = mn.par('par2') # ditto
>>> p2 = mn.parameter('par2') # ditto
"""
if not i in self : raise IndexError
if isinstance ( i , string_types ) : i = _mn_index_ ( self , i )
#
val = ctypes.c_double ( 0 )
err = ctypes.c_double ( 0 )
#
res = self.GetParameter ( i , val , err )
#
val = float ( val.value )
err = float ( err.value )
#
return VE ( val , err * err )
# =============================================================================
## Is given parameter knows to MINUIT ?
# @code
# minuit = ...
# if 5 in minuit : ...
# if 'p6' in minuit : ...
# @endcode
def _mn_contains_ ( self , p ) :
"""Is given parameter known to MINUIT ?
>>> minuit = ...
>>> if 5 in minuit : ...
>>> if 'p6' in minuit : ...
"""
if isinstance ( p, partypes ) :
return 0 <= p < self.GetNumPars()
if isinstance ( p , string_types ) :
val = ctypes.c_double ( 0 )
err = ctypes.c_double ( 0 )
low = ctypes.c_double ( 0 )
up = ctypes.c_double ( 0 )
idx = ctypes.c_int ( 0 )
for i in self :
name = ROOT.TString()
self.mnpout ( i , name , val , err , low , up , idx )
if 0 <= idx.value and str ( name ) == p : return True
return False
ROOT.TMinuit . __contains__ = _mn_contains_
ROOT.TMinuit . __len__ = lambda s : s.GetNumPars()
ROOT.TMinuit . par = _mn_par_
ROOT.TMinuit . parameter = _mn_par_
ROOT.TMinuit . __getitem__ = _mn_par_
ROOT.TMinuit . __call__ = _mn_par_
# =============================================================================
## get the parameter name
# @code
# minuit = ...
# name = minuit.par_name ( 4 )
# @endcode
def _mn_parname_ ( self , index ) :
"""Get the parameter name
>>> minuit = ...
>>> name = minuit.par_name ( 4 )
"""
if not index in self :
raise IndexError ( "No parameter with index %s" % index )
val = ctypes.c_double ( 0 )
err = ctypes.c_double ( 0 )
low = ctypes.c_double ( 0 )
up = ctypes.c_double ( 0 )
idx = ctypes.c_int ( 0 )
name = ROOT.TString()
self.mnpout ( index , name , val , err , low , up , idx )
if 0 <= idx.value : return str ( name )
raise IndexError ( "No parameter with index %s" % index )
ROOT.TMinuit . par_name = _mn_parname_
# =============================================================================
## Get the index for parameter with the name
# code
# minuit = ...
# index = minuit.par_index ( 'p4' )
# @endcode
def _mn_index_ ( self , name ) :
"""Get the index for parameter with the name
>>> minuit = ...
>>> index = minuit.par_index ( 'p4' )
"""
val = ctypes.c_double ( 0 )
err = ctypes.c_double ( 0 )
low = ctypes.c_double ( 0 )
up = ctypes.c_double ( 0 )
idx = ctypes.c_int ( 0 )
for i in self :
pname = ROOT.TString()
self.mnpout ( i , pname , val , err , low , up , idx )
if 0 <= idx.value and str ( pname ) == name : return i
raise IndexError ( "No parameter with name %s" % name )
ROOT.TMinuit . par_index = _mn_index_
ROOT.TMinuit . index = _mn_index_
# =============================================================================
## iterator over TMinuit parameter indices
# @code
# m = ... #TMinuit object
# for i in m : print m[i]
# @endcode
def _mn_iter_ ( self ) :
"""Iterator for TMinuit indices:
>>> m = ... #TMinuit object
>>> for i in m : print m[i]
"""
i = 0
while i < len ( self ) :
yield i
i += 1
ROOT.TMinuit . __iter__ = _mn_iter_
# =============================================================================
## Simple wrapper for <code>ROOT.TMinuit.mnexcm</code> function
# @see TMinuit::mnexcm
# Execute MINUIT command
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2013-04-01
def _mn_exec_ ( self , command , *args ) :
"""Execute MINUIT command
Simple wrapper for `ROOT.TMinuit.mnexcm` function
- see ROOT.TMinuit.mnexcm
"""
if args :
from array import array
arglist = array ( 'd' , [ i for i in args ] )
#
ierr = ctypes.c_int ( 0 )
#
self.mnexcm ( command , arglist , len ( arglist ) , ierr )
result = int ( ierr.value )
else :
result = self.Command ( command )
if result and result in return_codes and result < 10 :
lst = [ str ( i ) for i in args ]
lst = ' '.join ( lst )
logger.warning ( "Command %s -> %s:%s" % ( command + ' ' + lst ,
result ,
return_codes [ result ] ) )
return result
_mn_exec_ . __doc__ += '\n' + ROOT.TMinuit.mnexcm . __doc__
ROOT.TMinuit.execute = _mn_exec_
# =============================================================================
## excute MINUIT "SHOW" command
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2013-04-01
def _mn_show_ ( self , what = 'PAR' , *args ) :
"""Execute MINUIT ``SHOW''-command
"""
return _mn_exec_ ( self , "SHOW %s" % what.upper() , *args )
ROOT.TMinuit.show = _mn_show_
# =============================================================================
## set the parameter (and optionally fix it!)
# @code
# minuit = ...
#
# minuit.setPar ( 0 , 10.0 ) ## par(0) == 10.0
# minuit.setPar ( 0 , 10.0, True ) ## ... and fix it!
#
# minuit.set_par ( 0 , 10.0 ) ## par(0) == 10.0
# minuit.set_par ( 0 , 10.0, True ) ## ... and fix it!
#
# minuit.setParameter ( 0 , 10.0 ) ## par(0) == 10.0
# minuit.setParameter ( 0 , 10.0, True ) ## ... and fix it!
#
# minuit.set_parameter ( 0 , 10.0 ) ## par(0) == 10.0
# minuit.set_parameter ( 0 , 10.0, True ) ## ... and fix it!
#
# minuit [ 0 ] = 10 ## ditto!
# @endcode
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2012-09-28
def _mn_set_par_ ( self , i , val , fix = False ) :
"""Set MINUIT parameter for some value and optionally fix it!
>>> minuit = ...
>>> minuit.setPar ( 0 , 10.0 ) ## par(0) == 10.0
>>> minuit.setPar ( 0 , 10.0, True ) ## ... and fix it!
>>> minuit.set_par ( 0 , 10.0 ) ## par(0) == 10.0
>>> minuit.set_par ( 0 , 10.0, True ) ## ... and fix it!
>>> minuit.setParameter ( 0 , 10.0 ) ## par(0) == 10.0
>>> minuit.setParameter ( 0 , 10.0, True ) ## ... and fix it!
>>> minuit.set_parameter ( 0 , 10.0 ) ## par(0) == 10.0
>>> minuit.set_parameter ( 0 , 10.0, True ) ## ... and fix it!
>>> minuit [ 0 ] = 10 ## ditto!
"""
if not i in self :
raise IndexError ( "Invalid parameter index %s!" % i )
if isinstance ( i , string_types ) : i = _mn_index_ ( self , i )
#
if hasattr ( val , 'value' ) : val = val.value()
#
ierr = _mn_exec_ ( self , "SET PAR" , i + 1 , val )
#
if fix : self.FixParameter ( i )
#
return ierr
ROOT.TMinuit . setPar = _mn_set_par_
ROOT.TMinuit . setParameter = _mn_set_par_
ROOT.TMinuit . set_par = _mn_set_par_
ROOT.TMinuit . set_parameter = _mn_set_par_
ROOT.TMinuit . fixPar = lambda s,i,v: _mn_set_par_ ( s , i , v , True )
ROOT.TMinuit . fix_par = lambda s,i,v: _mn_set_par_ ( s , i , v , True )
ROOT.TMinuit . fixParameter = lambda s,i,v: _mn_set_par_ ( s , i , v , True )
ROOT.TMinuit . fix_parameter = lambda s,i,v: _mn_set_par_ ( s , i , v , True )
ROOT.TMinuit . fix = lambda s,i,v: _mn_set_par_ ( s , i , v , True )
ROOT.TMinuit . __setitem__ = _mn_set_par_
# =============================================================================
## release the parameter
# @code
# mn = ... # TMinuit obejct
# mn.release ( 1 )
# mn.rel ( 1 ) ## ditto!
# mn.rel_par ( 1 ) ## ditto!
# mn.release ( 1 , 2 , 3 )
# mn.rel ( 1 , 2 , 3 ) ## ditto!
# mn.rel_par ( 1 , 2 , 3 ) ## ditto!
# mn.release ( 'p1' )
# mn.rel ( 'p1' ) ## ditto!
# mn.rel_par ( 'p1' ) ## ditto!
# @endcode
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2012-09-28
def _mn_rel_par_ ( self , *pars ) :
"""Release MINUIT parameter for some value
>>> mn = ... # TMinuit obejct
>>> mn.release ( 1 )
>>> mn.rel ( 1 ) ## ditto!
>>> mn.rel_par ( 1 ) ## ditto!
>>> mn.release ( 1 , 2 , 3 )
>>> mn.rel ( 1 , 2 , 3 ) ## ditto!
>>> mn.rel_par ( 1 , 2 , 3 ) ## ditto!
>>> mn.release ( 'p1' )
>>> mn.rel ( 'p1' ) ## ditto!
>>> mn.rel_par ( 'p1' ) ## ditto!
"""
for i in pars :
if not i in self :
raise IndexError ( "Invalid parameter index %s!" % i )
#
if isinstance ( i , string_types ) : i = _mn_index_ ( self , i )
#
_mn_exec_ ( self , "REL" , i + 1 )
ROOT.TMinuit . release = _mn_rel_par_
ROOT.TMinuit . rel = _mn_rel_par_
ROOT.TMinuit . rel_par = _mn_rel_par_
# ===========================================================
## Perform the actual MINUIT minimization:
# @code
# m = ... #
# m.fit() ## run migrade!
# m.migrade () ## ditto
# m.fit ( method = 'MIN' )
# @endcode
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2012-09-28
def _mn_min_ ( self ,
maxcalls = 5000 ,
tolerance = 0.01 ,
method = 'MIGRADE' ) :
"""Perform the actual MINUIT minimization:
>>> m = ... #
>>> m.fit() ## run migrade!
>>> m.migrade () ## ditto
>>> m.fit ( method = 'MIN' )
"""
#
return _mn_exec_ ( self , method , maxcalls , tolerance )
ROOT.TMinuit . migrade = _mn_min_
ROOT.TMinuit . migrad = _mn_min_
ROOT.TMinuit . fit = _mn_min_
ROOT.TMinuit . hesse = lambda s,*a : _mn_exec_ ( s , 'HESSE' , *a )
ROOT.TMinuit . minimize = lambda s,*a : _mn_exec_ ( s , 'MIN' , *a )
ROOT.TMinuit . seek = lambda s,*a : _mn_exec_ ( s , 'SEEK' , *a )
ROOT.TMinuit . simplex = lambda s,*a : _mn_exec_ ( s , 'SIMPLEX' , *a )
#=============================================================================
## Print MINUIT information
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2012-09-28
def _mn_str_ ( self , l = 3 , v = 0.0 ) :
"""Print MINUIT information:
>>> m = ...
>>> print m
"""
#
self.mnprin ( l , v )
return '\n'
ROOT.TMinuit . __str__ = _mn_str_
ROOT.TMinuit . __repr__ = _mn_str_
# =============================================================================
## define/add parameter to TMinuit
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2012-09-28
def _mn_add_par_ ( self ,
name ,
start ,
step = -1 ,
low = 0 ,
high = 0 ) :
"""Define/add parameter to MUNUIT
>>> m.addPar ( 'ququ' , 10 , 0.1 )
"""
if hasattr ( start , 'value' ) : start = start . value()
if hasattr ( step , 'value' ) : step = step . value()
##
if step < 0 :
if low < high : step = 1.e-3 * ( high - low )
elif start : step = 1.e-3 * abs ( start )
##
ipar = len ( self )
##
ierr = ctypes.c_int ( 0 )
##
self.mnparm ( ipar , name , start , step , low , high , ierr )
#
result = int ( ierr.value )
if result : logger.error ("Error from TMinuit.mnparm %s" % result)
##
return result
ROOT.TMinuit . addpar = _mn_add_par_
ROOT.TMinuit . addPar = _mn_add_par_
ROOT.TMinuit . add_par = _mn_add_par_
ROOT.TMinuit . add_parameter = _mn_add_par_
ROOT.TMinuit . defpar = _mn_add_par_
ROOT.TMinuit . def_par = _mn_add_par_
ROOT.TMinuit . define_parameter = _mn_add_par_
ROOT.TMinuit . newpar = _mn_add_par_
ROOT.TMinuit . newPar = _mn_add_par_
ROOT.TMinuit . new_par = _mn_add_par_
ROOT.TMinuit . new_parameter = _mn_add_par_
# =============================================================================
## get MINOS errors
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2012-09-28
def _mn_minerr_ ( self , i ) :
"""Get MINOS errors for parameter:
>>> m = ... # TMinuit object
>>> pos , neg = m.minosErr( 0 )
"""
#
if not i in self : raise IndexError
#
if isinstance ( i , string_types ) : i = _mn_index_ ( self , i )
#
eplus = ctypes.c_double ( 0 )
eminus = ctypes.c_double ( 1 )
epara = ctypes.c_double ( 2 )
gcc = ctypes.c_double ( 3 )
#
self.mnerrs ( i , eplus , eminus , epara , gcc )
#
return float ( eplus.value ) , float ( eminus.value )
ROOT.TMinuit . minErr = _mn_minerr_
ROOT.TMinuit . minosErr = _mn_minerr_
ROOT.TMinuit . minos_err = _mn_minerr_
ROOT.TMinuit . minos_errors = _mn_minerr_
ROOT.TMinuit . minErrs = _mn_minerr_
ROOT.TMinuit . minosErrs = _mn_minerr_
# =============================================================================
## run MINOS
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2012-09-28
def _mn_minos_ ( self , *args ) :
"""Get MINOS errors for parameter:
>>> m = ... # TMinuit object
>>> result = m.minos ( 1 , 2 )
"""
ipars = []
for i in args :
if not i in self : raise IndexError
if isinstance ( i , string_types ) : i = _mn_index_ ( self , i )
ipars.append ( i + 1 ) ## note + 1 here!
return _mn_exec_ ( self , 'MINOS' , 500 , *tuple(ipars) )
ROOT.TMinuit . minos = _mn_minos_
# =============================================================================
# =============================================================================
## get current Minuit statistics
# @author Vanya BELYAEV Ivan.Belyaev@cern.ch
# @date 2013-04-01
def _mn_stat_ ( self ) :
"""Get current Minuit status
>>> mn = ... # TMinuit object
>>> stat = mn.stat()
Returns concerning the current status of the minimization
*-* =========================================================
*-* User-called
*-* Namely, it returns:
*-* FMIN: the best function value found so far
*-* FEDM: the estimated vertical distance remaining to minimum
*-* ERRDEF: the value of UP defining parameter uncertainties
*-* NPARI: the number of currently variable parameters
*-* NPARX: the highest (external) parameter number defined by user
*-* ISTAT: a status integer indicating how good is the covariance
*-* matrix: 0= not calculated at all
*-* 1= approximation only, not accurate
*-* 2= full matrix, but forced positive-definite
*-* 3= full accurate covariance matrix
*
"""
fmin = ctypes.c_double ( 1 )
fedm = ctypes.c_double ( 2 )
errdef = ctypes.c_double ( 3 )
npari = ctypes.c_int ( 1 )
nparx = ctypes.c_int ( 2 )
istat = ctypes.c_int ( 0 )
#
self . mnstat( fmin, fedm, errdef, npari , nparx , istat )
#
return { 'FMIN' : float ( fmin . value ) ,
'FEDM' : float ( fedm . value ) ,
'ERRDEF' : float ( errdef . value ) ,
'NPARI' : int ( npari . value ) ,
'NPARX' : int ( nparx . value ) ,
'ISTAT' : int ( istat . value ) }
_mn_stat_ . __doc__ += '\n' + ROOT.TMinuit.mnstat . __doc__
ROOT.TMinuit.stat = _mn_stat_
# =============================================================================
## get UP-parameter for err-def
# @author Vanya BELYAEV Ivan.Belyaev@cern.ch
# @date 2013-04-01
def _mn_get_errdef_ ( self ) :
"""Get UP-parameter used to define the uncertainties
>>> mn = ... # TMoniut object
>>> up = mn.GetErrorDef()
"""
return _mn_stat_ ( self ) ['ERRDEF']
ROOT.TMinuit.errDef = _mn_get_errdef_
ROOT.TMinuit.err_def = _mn_get_errdef_
ROOT.TMinuit.error_def = _mn_get_errdef_
ROOT.TMinuit.error_definition = _mn_get_errdef_
ROOT.TMinuit.GetErrorDef = _mn_get_errdef_
# =============================================================================
## create N-sigma contour
# @code
# mn = ... # TMinuit object
# graph = mn.contour ( 100 , 'par1' , 'par2' , 1 )
# @endcode
# @author Vanya BELYAEV Ivan.Belyaev@cern.ch
# @date 2011-06-07
def _mn_contour_ ( self , npoint , par1 , par2 , nsigma = 1 ) :
"""Create n-sigma contour for par1 vs par2
>>> mn = ... # TMinuit object
>>> graph = mn.contour ( 100 , 1 , 2 )
"""
if npoint < 4 : raise ValueError ( 'contour: npoint (%s) must be >= 4' % npoint )
if not par1 in self : raise ValueError ( 'contour: par1(%s) is not in Minuit' % par1 )
if not par2 in self : raise ValueError ( 'contour: par2(%s) is not in Minuit' % par2 )
if isinstance ( par1 , string_types ) : par1 = _mn_index_ ( self , par1 )
if isinstance ( par2 , string_types ) : par2 = _mn_index_ ( self , par2 )
if par1 == par2 : raise ValueError ( 'contour: par1 == par2(%s) ' % par2 )
#
name1 = _mn_par_name_ ( self , par1 )
name2 = _mn_par_name_ ( self , par2 )
## save old error defintion
old_err_def = self.GetErrorDef()
## set new error definition
self.SetErrorDef ( nsigma * nsigma )
graph = self.Contour ( npoint , par1 , par2 )
logger.debug ( "Build 2D-contour %s vs %s at %s sigma" % ( name2 , name1 , nsigma ) )
## restore old error defininion
self.SetErrorDef ( old_err_def )
status = self.GetStatus()
#
if graph and 0 == status : return graph
logger.error ( 'TMinuit::Contour: status %i' % status )
return graph
_mn_contour_ . __doc__ += '\n' + ROOT.TMinuit.Contour . __doc__
ROOT.TMinuit . contour = _mn_contour_
# =============================================================================
## create N-sigma contours
# @code
# mn = ... # TMinuit object
# graphs = mn.contours ( 100 , 'par1' , 'par2' , 1 , 2 , 3 )
# @endcode
# @author Vanya BELYAEV Ivan.Belyaev@cern.ch
# @date 2011-06-07
def _mn_contours_ ( self , npoints , par1 , par2 , *nsigmas ) :
"""Create n-sigma contours for par1 vs par2
>>> mn = ... # TMinuit object
>>> graphs = mn.contours ( 100 , 'par1' , 'par2' , 1 , 2 , 3 )
"""
if npoints < 4 : raise ValueError ( 'contour: npoint (%s) must be >= 4' % npoints )
if not par1 in self : raise ValueError ( 'contour: par1(%s) is not in Minuit' % par1 )
if not par2 in self : raise ValueError ( 'contour: par2(%s) is not in Minuit' % par2 )
if isinstance ( par1 , string_types ) : par1 = _mn_index_ ( self , par1 )
if isinstance ( par2 , string_types ) : par2 = _mn_index_ ( self , par2 )
if par1 == par2 : raise ValueError ( 'contour: par1 == par2(%s) ' % par2 )
#
graphs = []
for nsigma in sigmas :
g = _mn_contour_ ( self , npoints , par1 , par2 , nsigma )
graphs.append ( g )
return tuple ( graphs )
ROOT.TMinuit . contours = _mn_contours_
root_major = root_info.major
# =============================================================================
## get the covariance matrix from TMinuit
def _mn_cov_ ( self , size = -1 , root = False ) :
"""Get the covariance matrix from TMinuit
>>> mn = ... # TMinuit object
>>> cov = mn.cov()
"""
#
if size <= 0 : size = len ( self )
size = min ( size , len ( self ) )
#
from array import array
matrix = array ( 'd' , [ 0 for i in range ( 0 , size * size ) ] )
self.mnemat ( matrix , size )
#
import ostap.math.linalg
from ostap.core.core import Ostap
mtrx = Ostap.Math.SymMatrix ( size )()
for i in range ( 0 , size ) :
for j in range ( i , size ) :
mtrx [ i , j ] = matrix [ i * size + j ]
return mtrx
# =============================================================================
## get the correlation matrix from TMinuit
def _mn_cor_ ( self , size = -1 , root = False ) :
"""Get the correlation matrix from TMinuit
>>> mn = ... # TMinuit object
>>> cor = mn.cor()
"""
#
cov = self.cov ( size , root )
#
from math import sqrt
#
if isinstance ( cov , ROOT.TMatrix ) :
size = cov.GetNrows()
root = True
else : size = cov.kRows
## use ROOT matrices
if root : cor = ROOT.TMatrix ( size , size )
else : cor = cov.__class__ ()
for i in range ( 0 , size ) :
d_i = cov ( i , i )
cor [ i , i ] = 1 if 0 < d_i else 0
for j in range ( i + 1 , size ) :
d_j = cov ( j , j )
if 0 != cov ( i , j ) and 0 < d_i and 0 < d_j :
if root and root_major < 6 : cor [ i ] [ j ] = cov ( i , j ) / sqrt ( d_i * d_j )
else : cor [ i , j ] = cov ( i , j ) / sqrt ( d_i * d_j )
else :
if root and root_major < 6 : cor [ i ] [ j ] = 0
else : cor [ i , j ] = 0
return cor
_mn_cor_ . __doc__ += '\n' + ROOT.TMinuit.mnemat . __doc__
ROOT.TMinuit . cov = _mn_cov_
ROOT.TMinuit . cor = _mn_cor_
ROOT.TMinuit . corr = _mn_cor_
# =============================================================================
## Build parameter table from (T)Minuit
def _mn_table_ ( self , title = '' , prefix = '' ) :
"""Build parameter table from (T)Minuit
"""
header = ( 'Parameter' , '' , 'Value' )
rows = []
from ostap.fitting.utils import fit_status , cov_qual
from ostap.logger.utils import pretty_float, pretty_ve, pretty_2ve
status = self.GetStatus()
if status :
status = fit_status ( status )
row = ' Status' , '' , status
rows.append ( row )
stat = _mn_stat_ ( self )
istat = stat.pop ( 'ISTAT' , None )
if not istat is None :
cq = ''
if -1 == istat : cq = cov_qual ( istat )
elif 3 == istat : cq = allright ( cov_qual ( istat ) )
elif istat in ( 0 , 1 , 2 ) : cq = attentiont ( cov_qual ( istat ) )
else : cq = cov_qual ( istat )
row = 'Covariance matrix quality' , '' , cq
rows.append ( row )
fmin = stat.pop ( 'FMIN' , None )
if not fmin is None :
s , n = pretty_float ( fmin )
if n : n = '[10^%+d]' % n
else : n = ''
row = 'Minimized FCN value' , n , s
rows.append ( row )
fedm = stat.pop ( 'FEDM' , None )
if not fedm is None :
s , n = pretty_float ( fedm )
if n : n = '[10^%+d]' % n
else : n = ''
row = 'Estimated distance to minimum' , n , s
rows.append ( row )
errdef = stat.pop ( 'ERRDEF' , None )
## needed ?
has_limits = False
has_minos = False
val = ctypes.c_double ( 0 )
err = ctypes.c_double ( 0 )
low = ctypes.c_double ( 0 )
high = ctypes.c_double ( 0 )
idx = ctypes.c_int ( 0 )
dct_pars = {}
## loop over all parameters
for i in self :
name = ROOT.TString()
self.mnpout ( i , name , val , err , low , high , idx )
if not 0 <= idx.value : continue
dct = {}
dct [ 'name' ] = '%-2d: %s' % ( i , str ( name ).strip() )
dct [ 'value'] = val.value
if low.value < high.value :
dct [ 'low' ] = low .value
dct [ 'high'] = high.value
has_limits = True
if 0 <= err.value :
dct [ 'error' ] = err.value
mn_plus , mn_minus = _mn_minerr_ ( self , str ( name ) )
if 0 < mn_plus or mn_minus < 0 :
dct [ 'minos+' ] = mn_plus
dct [ 'minos-' ] = mn_minus
has_minos = True
dct_pars[ i ] = dct
if has_minos :
## some parameters have MINOS errors, add columns
header = ( header ) + ( 'neg-minos' , 'pos-minos' )
rows = [ r + ( '','' ) for r in rows ]
if has_limits :
## some parameters have LIMITS, add columns
header = ( header ) + ( 'low limit' , 'high limit' )
rows = [ r + ( '','' ) for r in rows ]
for p in dct_pars :
pdict = dct_pars [ p ]
row = []
row.append ( pdict.pop ( 'name' ) )
value = pdict.pop ( 'value' )
error = pdict.pop ( 'error', None )
if error is None :
s , n = pretty_float ( value )
s = "%s(fixed)" % s
else :
s , n = pretty_ve ( VE ( value , error * error ) )
if n : row.append ( '[10^%+d]' % n )
else : row.append ( '' )
row.append ( s )
if has_minos :
mn_plus = pdict.pop ( 'minos+' , None )
mn_minus = pdict.pop ( 'minos-' , None )
if mn_plus is None : mn_plus = ''
else : mn_plus = '%8f' % ( mn_plus * 10** n )
if mn_minus is None : mn_minus = ''
else : mn_minus = '%8f' % ( mn_minus * 10** n )
row.append ( mn_minus )
row.append ( mn_plus )
if has_limits :
low = pdict.pop ( 'low' , None )
high = pdict.pop ( 'high' , None )
if low is None : low = ''
else : low = '%8f' % ( low * 10 ** n )
if high is None : high = ''
else : high = '%8f' % ( high * 10 ** n )
row.append ( low )
row.append ( high )
row = tuple ( row )
rows.append ( row )
rows = [ header ] + rows
from ostap.logger.table import table
return table ( rows , title = title , prefix = prefix )
ROOT.TMinuit .table = _mn_table_
# =============================================================================
if '__main__' == __name__ :
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
# =============================================================================
## The END
# =============================================================================
| |
"""
This module contains an C{L{OpenIDStore}} implementation backed by
flat files.
"""
import string
import os
import time
from errno import EEXIST, ENOENT
try:
from tempfile import mkstemp
except ImportError:
# Python < 2.3
import warnings
warnings.filterwarnings("ignore",
"tempnam is a potential security risk",
RuntimeWarning,
"openid.store.filestore")
def mkstemp(dir):
for _ in range(5):
name = os.tempnam(dir)
try:
fd = os.open(name, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0600)
except OSError, why:
if why.errno != EEXIST:
raise
else:
return fd, name
raise RuntimeError('Failed to get temp file after 5 attempts')
from openid.association import Association
from openid.store.interface import OpenIDStore
from openid.store import nonce
from openid import cryptutil, oidutil
_filename_allowed = string.ascii_letters + string.digits + '.'
try:
# 2.4
set
except NameError:
try:
# 2.3
import sets
except ImportError:
# Python < 2.2
d = {}
for c in _filename_allowed:
d[c] = None
_isFilenameSafe = d.has_key
del d
else:
_isFilenameSafe = sets.Set(_filename_allowed).__contains__
else:
_isFilenameSafe = set(_filename_allowed).__contains__
def _safe64(s):
h64 = oidutil.toBase64(cryptutil.sha1(s))
h64 = h64.replace('+', '_')
h64 = h64.replace('/', '.')
h64 = h64.replace('=', '')
return h64
def _filenameEscape(s):
filename_chunks = []
for c in s:
if _isFilenameSafe(c):
filename_chunks.append(c)
else:
filename_chunks.append('_%02X' % ord(c))
return ''.join(filename_chunks)
def _removeIfPresent(filename):
"""Attempt to remove a file, returning whether the file existed at
the time of the call.
str -> bool
"""
try:
os.unlink(filename)
except OSError, why:
if why.errno == ENOENT:
# Someone beat us to it, but it's gone, so that's OK
return 0
else:
raise
else:
# File was present
return 1
def _ensureDir(dir_name):
"""Create dir_name as a directory if it does not exist. If it
exists, make sure that it is, in fact, a directory.
Can raise OSError
str -> NoneType
"""
try:
os.makedirs(dir_name)
except OSError, why:
if why.errno != EEXIST or not os.path.isdir(dir_name):
raise
class FileOpenIDStore(OpenIDStore):
"""
This is a filesystem-based store for OpenID associations and
nonces. This store should be safe for use in concurrent systems
on both windows and unix (excluding NFS filesystems). There are a
couple race conditions in the system, but those failure cases have
been set up in such a way that the worst-case behavior is someone
having to try to log in a second time.
Most of the methods of this class are implementation details.
People wishing to just use this store need only pay attention to
the C{L{__init__}} method.
Methods of this object can raise OSError if unexpected filesystem
conditions, such as bad permissions or missing directories, occur.
"""
def __init__(self, directory):
"""
Initializes a new FileOpenIDStore. This initializes the
nonce and association directories, which are subdirectories of
the directory passed in.
@param directory: This is the directory to put the store
directories in.
@type directory: C{str}
"""
# Make absolute
directory = os.path.normpath(os.path.abspath(directory))
self.nonce_dir = os.path.join(directory, 'nonces')
self.association_dir = os.path.join(directory, 'associations')
# Temp dir must be on the same filesystem as the assciations
# directory
self.temp_dir = os.path.join(directory, 'temp')
self.max_nonce_age = 6 * 60 * 60 # Six hours, in seconds
self._setup()
def _setup(self):
"""Make sure that the directories in which we store our data
exist.
() -> NoneType
"""
_ensureDir(self.nonce_dir)
_ensureDir(self.association_dir)
_ensureDir(self.temp_dir)
def _mktemp(self):
"""Create a temporary file on the same filesystem as
self.association_dir.
The temporary directory should not be cleaned if there are any
processes using the store. If there is no active process using
the store, it is safe to remove all of the files in the
temporary directory.
() -> (file, str)
"""
fd, name = mkstemp(dir=self.temp_dir)
try:
file_obj = os.fdopen(fd, 'wb')
return file_obj, name
except:
_removeIfPresent(name)
raise
def getAssociationFilename(self, server_url, handle):
"""Create a unique filename for a given server url and
handle. This implementation does not assume anything about the
format of the handle. The filename that is returned will
contain the domain name from the server URL for ease of human
inspection of the data directory.
(str, str) -> str
"""
if server_url.find('://') == -1:
raise ValueError('Bad server URL: %r' % server_url)
proto, rest = server_url.split('://', 1)
domain = _filenameEscape(rest.split('/', 1)[0])
url_hash = _safe64(server_url)
if handle:
handle_hash = _safe64(handle)
else:
handle_hash = ''
filename = '%s-%s-%s-%s' % (proto, domain, url_hash, handle_hash)
return os.path.join(self.association_dir, filename)
def storeAssociation(self, server_url, association):
"""Store an association in the association directory.
(str, Association) -> NoneType
"""
association_s = association.serialize()
filename = self.getAssociationFilename(server_url, association.handle)
tmp_file, tmp = self._mktemp()
try:
try:
tmp_file.write(association_s)
os.fsync(tmp_file.fileno())
finally:
tmp_file.close()
try:
os.rename(tmp, filename)
except OSError, why:
if why.errno != EEXIST:
raise
# We only expect EEXIST to happen only on Windows. It's
# possible that we will succeed in unlinking the existing
# file, but not in putting the temporary file in place.
try:
os.unlink(filename)
except OSError, why:
if why.errno == ENOENT:
pass
else:
raise
# Now the target should not exist. Try renaming again,
# giving up if it fails.
os.rename(tmp, filename)
except:
# If there was an error, don't leave the temporary file
# around.
_removeIfPresent(tmp)
raise
def getAssociation(self, server_url, handle=None):
"""Retrieve an association. If no handle is specified, return
the association with the latest expiration.
(str, str or NoneType) -> Association or NoneType
"""
if handle is None:
handle = ''
# The filename with the empty handle is a prefix of all other
# associations for the given server URL.
filename = self.getAssociationFilename(server_url, handle)
if handle:
return self._getAssociation(filename)
else:
association_files = os.listdir(self.association_dir)
matching_files = []
# strip off the path to do the comparison
name = os.path.basename(filename)
for association_file in association_files:
if association_file.startswith(name):
matching_files.append(association_file)
matching_associations = []
# read the matching files and sort by time issued
for name in matching_files:
full_name = os.path.join(self.association_dir, name)
association = self._getAssociation(full_name)
if association is not None:
matching_associations.append(
(association.issued, association))
matching_associations.sort()
# return the most recently issued one.
if matching_associations:
(_, assoc) = matching_associations[-1]
return assoc
else:
return None
def _getAssociation(self, filename):
try:
assoc_file = file(filename, 'rb')
except IOError, why:
if why.errno == ENOENT:
# No association exists for that URL and handle
return None
else:
raise
else:
try:
assoc_s = assoc_file.read()
finally:
assoc_file.close()
try:
association = Association.deserialize(assoc_s)
except ValueError:
_removeIfPresent(filename)
return None
# Clean up expired associations
if association.getExpiresIn() == 0:
_removeIfPresent(filename)
return None
else:
return association
def removeAssociation(self, server_url, handle):
"""Remove an association if it exists. Do nothing if it does not.
(str, str) -> bool
"""
assoc = self.getAssociation(server_url, handle)
if assoc is None:
return 0
else:
filename = self.getAssociationFilename(server_url, handle)
return _removeIfPresent(filename)
def useNonce(self, server_url, timestamp, salt):
"""Return whether this nonce is valid.
str -> bool
"""
if abs(timestamp - time.time()) > nonce.SKEW:
return False
if server_url:
proto, rest = server_url.split('://', 1)
else:
# Create empty proto / rest values for empty server_url,
# which is part of a consumer-generated nonce.
proto, rest = '', ''
domain = _filenameEscape(rest.split('/', 1)[0])
url_hash = _safe64(server_url)
salt_hash = _safe64(salt)
filename = '%08x-%s-%s-%s-%s' % (timestamp, proto, domain,
url_hash, salt_hash)
filename = os.path.join(self.nonce_dir, filename)
try:
fd = os.open(filename, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0200)
except OSError, why:
if why.errno == EEXIST:
return False
else:
raise
else:
os.close(fd)
return True
def _allAssocs(self):
all_associations = []
association_filenames = map(
lambda filename: os.path.join(self.association_dir, filename),
os.listdir(self.association_dir))
for association_filename in association_filenames:
try:
association_file = file(association_filename, 'rb')
except IOError, why:
if why.errno == ENOENT:
oidutil.log("%s disappeared during %s._allAssocs" % (
association_filename, self.__class__.__name__))
else:
raise
else:
try:
assoc_s = association_file.read()
finally:
association_file.close()
# Remove expired or corrupted associations
try:
association = Association.deserialize(assoc_s)
except ValueError:
_removeIfPresent(association_filename)
else:
all_associations.append(
(association_filename, association))
return all_associations
def cleanup(self):
"""Remove expired entries from the database. This is
potentially expensive, so only run when it is acceptable to
take time.
() -> NoneType
"""
self.cleanupAssociations()
self.cleanupNonces()
def cleanupAssociations(self):
removed = 0
for assoc_filename, assoc in self._allAssocs():
if assoc.getExpiresIn() == 0:
_removeIfPresent(assoc_filename)
removed += 1
return removed
def cleanupNonces(self):
nonces = os.listdir(self.nonce_dir)
now = time.time()
removed = 0
# Check all nonces for expiry
for nonce_fname in nonces:
timestamp = nonce_fname.split('-', 1)[0]
timestamp = int(timestamp, 16)
if abs(timestamp - now) > nonce.SKEW:
filename = os.path.join(self.nonce_dir, nonce_fname)
_removeIfPresent(filename)
removed += 1
return removed
| |
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class Test_BoolFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import _BoolFilter
return _BoolFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_constructor(self):
flag = object()
row_filter = self._make_one(flag)
self.assertIs(row_filter.flag, flag)
def test___eq__type_differ(self):
flag = object()
row_filter1 = self._make_one(flag)
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
def test___eq__same_value(self):
flag = object()
row_filter1 = self._make_one(flag)
row_filter2 = self._make_one(flag)
self.assertEqual(row_filter1, row_filter2)
def test___ne__same_value(self):
flag = object()
row_filter1 = self._make_one(flag)
row_filter2 = self._make_one(flag)
comparison_val = row_filter1 != row_filter2
self.assertFalse(comparison_val)
class TestSinkFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import SinkFilter
return SinkFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_to_pb(self):
flag = True
row_filter = self._make_one(flag)
pb_val = row_filter.to_pb()
expected_pb = _RowFilterPB(sink=flag)
self.assertEqual(pb_val, expected_pb)
class TestPassAllFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import PassAllFilter
return PassAllFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_to_pb(self):
flag = True
row_filter = self._make_one(flag)
pb_val = row_filter.to_pb()
expected_pb = _RowFilterPB(pass_all_filter=flag)
self.assertEqual(pb_val, expected_pb)
class TestBlockAllFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import BlockAllFilter
return BlockAllFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_to_pb(self):
flag = True
row_filter = self._make_one(flag)
pb_val = row_filter.to_pb()
expected_pb = _RowFilterPB(block_all_filter=flag)
self.assertEqual(pb_val, expected_pb)
class Test_RegexFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import _RegexFilter
return _RegexFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_constructor(self):
regex = b"abc"
row_filter = self._make_one(regex)
self.assertIs(row_filter.regex, regex)
def test_constructor_non_bytes(self):
regex = u"abc"
row_filter = self._make_one(regex)
self.assertEqual(row_filter.regex, b"abc")
def test___eq__type_differ(self):
regex = b"def-rgx"
row_filter1 = self._make_one(regex)
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
def test___eq__same_value(self):
regex = b"trex-regex"
row_filter1 = self._make_one(regex)
row_filter2 = self._make_one(regex)
self.assertEqual(row_filter1, row_filter2)
def test___ne__same_value(self):
regex = b"abc"
row_filter1 = self._make_one(regex)
row_filter2 = self._make_one(regex)
comparison_val = row_filter1 != row_filter2
self.assertFalse(comparison_val)
class TestRowKeyRegexFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import RowKeyRegexFilter
return RowKeyRegexFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_to_pb(self):
regex = b"row-key-regex"
row_filter = self._make_one(regex)
pb_val = row_filter.to_pb()
expected_pb = _RowFilterPB(row_key_regex_filter=regex)
self.assertEqual(pb_val, expected_pb)
class TestRowSampleFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import RowSampleFilter
return RowSampleFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_constructor(self):
sample = object()
row_filter = self._make_one(sample)
self.assertIs(row_filter.sample, sample)
def test___eq__type_differ(self):
sample = object()
row_filter1 = self._make_one(sample)
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
def test___eq__same_value(self):
sample = object()
row_filter1 = self._make_one(sample)
row_filter2 = self._make_one(sample)
self.assertEqual(row_filter1, row_filter2)
def test_to_pb(self):
sample = 0.25
row_filter = self._make_one(sample)
pb_val = row_filter.to_pb()
expected_pb = _RowFilterPB(row_sample_filter=sample)
self.assertEqual(pb_val, expected_pb)
class TestFamilyNameRegexFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import FamilyNameRegexFilter
return FamilyNameRegexFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_to_pb(self):
regex = u"family-regex"
row_filter = self._make_one(regex)
pb_val = row_filter.to_pb()
expected_pb = _RowFilterPB(family_name_regex_filter=regex)
self.assertEqual(pb_val, expected_pb)
class TestColumnQualifierRegexFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter
return ColumnQualifierRegexFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_to_pb(self):
regex = b"column-regex"
row_filter = self._make_one(regex)
pb_val = row_filter.to_pb()
expected_pb = _RowFilterPB(column_qualifier_regex_filter=regex)
self.assertEqual(pb_val, expected_pb)
class TestTimestampRange(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import TimestampRange
return TimestampRange
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_constructor(self):
start = object()
end = object()
time_range = self._make_one(start=start, end=end)
self.assertIs(time_range.start, start)
self.assertIs(time_range.end, end)
def test___eq__(self):
start = object()
end = object()
time_range1 = self._make_one(start=start, end=end)
time_range2 = self._make_one(start=start, end=end)
self.assertEqual(time_range1, time_range2)
def test___eq__type_differ(self):
start = object()
end = object()
time_range1 = self._make_one(start=start, end=end)
time_range2 = object()
self.assertNotEqual(time_range1, time_range2)
def test___ne__same_value(self):
start = object()
end = object()
time_range1 = self._make_one(start=start, end=end)
time_range2 = self._make_one(start=start, end=end)
comparison_val = time_range1 != time_range2
self.assertFalse(comparison_val)
def _to_pb_helper(self, pb_kwargs, start=None, end=None):
import datetime
from google.cloud._helpers import _EPOCH
if start is not None:
start = _EPOCH + datetime.timedelta(microseconds=start)
if end is not None:
end = _EPOCH + datetime.timedelta(microseconds=end)
time_range = self._make_one(start=start, end=end)
expected_pb = _TimestampRangePB(**pb_kwargs)
time_pb = time_range.to_pb()
self.assertEqual(
time_pb.start_timestamp_micros, expected_pb.start_timestamp_micros
)
self.assertEqual(time_pb.end_timestamp_micros, expected_pb.end_timestamp_micros)
self.assertEqual(time_pb, expected_pb)
def test_to_pb(self):
start_micros = 30871234
end_micros = 12939371234
start_millis = start_micros // 1000 * 1000
self.assertEqual(start_millis, 30871000)
end_millis = end_micros // 1000 * 1000 + 1000
self.assertEqual(end_millis, 12939372000)
pb_kwargs = {}
pb_kwargs["start_timestamp_micros"] = start_millis
pb_kwargs["end_timestamp_micros"] = end_millis
self._to_pb_helper(pb_kwargs, start=start_micros, end=end_micros)
def test_to_pb_start_only(self):
# Makes sure already milliseconds granularity
start_micros = 30871000
start_millis = start_micros // 1000 * 1000
self.assertEqual(start_millis, 30871000)
pb_kwargs = {}
pb_kwargs["start_timestamp_micros"] = start_millis
self._to_pb_helper(pb_kwargs, start=start_micros, end=None)
def test_to_pb_end_only(self):
# Makes sure already milliseconds granularity
end_micros = 12939371000
end_millis = end_micros // 1000 * 1000
self.assertEqual(end_millis, 12939371000)
pb_kwargs = {}
pb_kwargs["end_timestamp_micros"] = end_millis
self._to_pb_helper(pb_kwargs, start=None, end=end_micros)
class TestTimestampRangeFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import TimestampRangeFilter
return TimestampRangeFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_constructor(self):
range_ = object()
row_filter = self._make_one(range_)
self.assertIs(row_filter.range_, range_)
def test___eq__type_differ(self):
range_ = object()
row_filter1 = self._make_one(range_)
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
def test___eq__same_value(self):
range_ = object()
row_filter1 = self._make_one(range_)
row_filter2 = self._make_one(range_)
self.assertEqual(row_filter1, row_filter2)
def test_to_pb(self):
from google.cloud.bigtable.row_filters import TimestampRange
range_ = TimestampRange()
row_filter = self._make_one(range_)
pb_val = row_filter.to_pb()
expected_pb = _RowFilterPB(timestamp_range_filter=_TimestampRangePB())
self.assertEqual(pb_val, expected_pb)
class TestColumnRangeFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import ColumnRangeFilter
return ColumnRangeFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_constructor_defaults(self):
column_family_id = object()
row_filter = self._make_one(column_family_id)
self.assertIs(row_filter.column_family_id, column_family_id)
self.assertIsNone(row_filter.start_column)
self.assertIsNone(row_filter.end_column)
self.assertTrue(row_filter.inclusive_start)
self.assertTrue(row_filter.inclusive_end)
def test_constructor_explicit(self):
column_family_id = object()
start_column = object()
end_column = object()
inclusive_start = object()
inclusive_end = object()
row_filter = self._make_one(
column_family_id,
start_column=start_column,
end_column=end_column,
inclusive_start=inclusive_start,
inclusive_end=inclusive_end,
)
self.assertIs(row_filter.column_family_id, column_family_id)
self.assertIs(row_filter.start_column, start_column)
self.assertIs(row_filter.end_column, end_column)
self.assertIs(row_filter.inclusive_start, inclusive_start)
self.assertIs(row_filter.inclusive_end, inclusive_end)
def test_constructor_bad_start(self):
column_family_id = object()
self.assertRaises(
ValueError, self._make_one, column_family_id, inclusive_start=True
)
def test_constructor_bad_end(self):
column_family_id = object()
self.assertRaises(
ValueError, self._make_one, column_family_id, inclusive_end=True
)
def test___eq__(self):
column_family_id = object()
start_column = object()
end_column = object()
inclusive_start = object()
inclusive_end = object()
row_filter1 = self._make_one(
column_family_id,
start_column=start_column,
end_column=end_column,
inclusive_start=inclusive_start,
inclusive_end=inclusive_end,
)
row_filter2 = self._make_one(
column_family_id,
start_column=start_column,
end_column=end_column,
inclusive_start=inclusive_start,
inclusive_end=inclusive_end,
)
self.assertEqual(row_filter1, row_filter2)
def test___eq__type_differ(self):
column_family_id = object()
row_filter1 = self._make_one(column_family_id)
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
def test_to_pb(self):
column_family_id = u"column-family-id"
row_filter = self._make_one(column_family_id)
col_range_pb = _ColumnRangePB(family_name=column_family_id)
expected_pb = _RowFilterPB(column_range_filter=col_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
def test_to_pb_inclusive_start(self):
column_family_id = u"column-family-id"
column = b"column"
row_filter = self._make_one(column_family_id, start_column=column)
col_range_pb = _ColumnRangePB(
family_name=column_family_id, start_qualifier_closed=column
)
expected_pb = _RowFilterPB(column_range_filter=col_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
def test_to_pb_exclusive_start(self):
column_family_id = u"column-family-id"
column = b"column"
row_filter = self._make_one(
column_family_id, start_column=column, inclusive_start=False
)
col_range_pb = _ColumnRangePB(
family_name=column_family_id, start_qualifier_open=column
)
expected_pb = _RowFilterPB(column_range_filter=col_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
def test_to_pb_inclusive_end(self):
column_family_id = u"column-family-id"
column = b"column"
row_filter = self._make_one(column_family_id, end_column=column)
col_range_pb = _ColumnRangePB(
family_name=column_family_id, end_qualifier_closed=column
)
expected_pb = _RowFilterPB(column_range_filter=col_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
def test_to_pb_exclusive_end(self):
column_family_id = u"column-family-id"
column = b"column"
row_filter = self._make_one(
column_family_id, end_column=column, inclusive_end=False
)
col_range_pb = _ColumnRangePB(
family_name=column_family_id, end_qualifier_open=column
)
expected_pb = _RowFilterPB(column_range_filter=col_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
class TestValueRegexFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import ValueRegexFilter
return ValueRegexFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_to_pb(self):
regex = b"value-regex"
row_filter = self._make_one(regex)
pb_val = row_filter.to_pb()
expected_pb = _RowFilterPB(value_regex_filter=regex)
self.assertEqual(pb_val, expected_pb)
class TestValueRangeFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import ValueRangeFilter
return ValueRangeFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_constructor_defaults(self):
row_filter = self._make_one()
self.assertIsNone(row_filter.start_value)
self.assertIsNone(row_filter.end_value)
self.assertTrue(row_filter.inclusive_start)
self.assertTrue(row_filter.inclusive_end)
def test_constructor_explicit(self):
start_value = object()
end_value = object()
inclusive_start = object()
inclusive_end = object()
row_filter = self._make_one(
start_value=start_value,
end_value=end_value,
inclusive_start=inclusive_start,
inclusive_end=inclusive_end,
)
self.assertIs(row_filter.start_value, start_value)
self.assertIs(row_filter.end_value, end_value)
self.assertIs(row_filter.inclusive_start, inclusive_start)
self.assertIs(row_filter.inclusive_end, inclusive_end)
def test_constructor_bad_start(self):
self.assertRaises(ValueError, self._make_one, inclusive_start=True)
def test_constructor_bad_end(self):
self.assertRaises(ValueError, self._make_one, inclusive_end=True)
def test___eq__(self):
start_value = object()
end_value = object()
inclusive_start = object()
inclusive_end = object()
row_filter1 = self._make_one(
start_value=start_value,
end_value=end_value,
inclusive_start=inclusive_start,
inclusive_end=inclusive_end,
)
row_filter2 = self._make_one(
start_value=start_value,
end_value=end_value,
inclusive_start=inclusive_start,
inclusive_end=inclusive_end,
)
self.assertEqual(row_filter1, row_filter2)
def test___eq__type_differ(self):
row_filter1 = self._make_one()
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
def test_to_pb(self):
row_filter = self._make_one()
expected_pb = _RowFilterPB(value_range_filter=_ValueRangePB())
self.assertEqual(row_filter.to_pb(), expected_pb)
def test_to_pb_inclusive_start(self):
value = b"some-value"
row_filter = self._make_one(start_value=value)
val_range_pb = _ValueRangePB(start_value_closed=value)
expected_pb = _RowFilterPB(value_range_filter=val_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
def test_to_pb_exclusive_start(self):
value = b"some-value"
row_filter = self._make_one(start_value=value, inclusive_start=False)
val_range_pb = _ValueRangePB(start_value_open=value)
expected_pb = _RowFilterPB(value_range_filter=val_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
def test_to_pb_inclusive_end(self):
value = b"some-value"
row_filter = self._make_one(end_value=value)
val_range_pb = _ValueRangePB(end_value_closed=value)
expected_pb = _RowFilterPB(value_range_filter=val_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
def test_to_pb_exclusive_end(self):
value = b"some-value"
row_filter = self._make_one(end_value=value, inclusive_end=False)
val_range_pb = _ValueRangePB(end_value_open=value)
expected_pb = _RowFilterPB(value_range_filter=val_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
class Test_CellCountFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import _CellCountFilter
return _CellCountFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_constructor(self):
num_cells = object()
row_filter = self._make_one(num_cells)
self.assertIs(row_filter.num_cells, num_cells)
def test___eq__type_differ(self):
num_cells = object()
row_filter1 = self._make_one(num_cells)
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
def test___eq__same_value(self):
num_cells = object()
row_filter1 = self._make_one(num_cells)
row_filter2 = self._make_one(num_cells)
self.assertEqual(row_filter1, row_filter2)
def test___ne__same_value(self):
num_cells = object()
row_filter1 = self._make_one(num_cells)
row_filter2 = self._make_one(num_cells)
comparison_val = row_filter1 != row_filter2
self.assertFalse(comparison_val)
class TestCellsRowOffsetFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import CellsRowOffsetFilter
return CellsRowOffsetFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_to_pb(self):
num_cells = 76
row_filter = self._make_one(num_cells)
pb_val = row_filter.to_pb()
expected_pb = _RowFilterPB(cells_per_row_offset_filter=num_cells)
self.assertEqual(pb_val, expected_pb)
class TestCellsRowLimitFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import CellsRowLimitFilter
return CellsRowLimitFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_to_pb(self):
num_cells = 189
row_filter = self._make_one(num_cells)
pb_val = row_filter.to_pb()
expected_pb = _RowFilterPB(cells_per_row_limit_filter=num_cells)
self.assertEqual(pb_val, expected_pb)
class TestCellsColumnLimitFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import CellsColumnLimitFilter
return CellsColumnLimitFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_to_pb(self):
num_cells = 10
row_filter = self._make_one(num_cells)
pb_val = row_filter.to_pb()
expected_pb = _RowFilterPB(cells_per_column_limit_filter=num_cells)
self.assertEqual(pb_val, expected_pb)
class TestStripValueTransformerFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import StripValueTransformerFilter
return StripValueTransformerFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_to_pb(self):
flag = True
row_filter = self._make_one(flag)
pb_val = row_filter.to_pb()
expected_pb = _RowFilterPB(strip_value_transformer=flag)
self.assertEqual(pb_val, expected_pb)
class TestApplyLabelFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import ApplyLabelFilter
return ApplyLabelFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_constructor(self):
label = object()
row_filter = self._make_one(label)
self.assertIs(row_filter.label, label)
def test___eq__type_differ(self):
label = object()
row_filter1 = self._make_one(label)
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
def test___eq__same_value(self):
label = object()
row_filter1 = self._make_one(label)
row_filter2 = self._make_one(label)
self.assertEqual(row_filter1, row_filter2)
def test_to_pb(self):
label = u"label"
row_filter = self._make_one(label)
pb_val = row_filter.to_pb()
expected_pb = _RowFilterPB(apply_label_transformer=label)
self.assertEqual(pb_val, expected_pb)
class Test_FilterCombination(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import _FilterCombination
return _FilterCombination
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_constructor_defaults(self):
row_filter = self._make_one()
self.assertEqual(row_filter.filters, [])
def test_constructor_explicit(self):
filters = object()
row_filter = self._make_one(filters=filters)
self.assertIs(row_filter.filters, filters)
def test___eq__(self):
filters = object()
row_filter1 = self._make_one(filters=filters)
row_filter2 = self._make_one(filters=filters)
self.assertEqual(row_filter1, row_filter2)
def test___eq__type_differ(self):
filters = object()
row_filter1 = self._make_one(filters=filters)
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
class TestRowFilterChain(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import RowFilterChain
return RowFilterChain
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_to_pb(self):
from google.cloud.bigtable.row_filters import RowSampleFilter
from google.cloud.bigtable.row_filters import StripValueTransformerFilter
row_filter1 = StripValueTransformerFilter(True)
row_filter1_pb = row_filter1.to_pb()
row_filter2 = RowSampleFilter(0.25)
row_filter2_pb = row_filter2.to_pb()
row_filter3 = self._make_one(filters=[row_filter1, row_filter2])
filter_pb = row_filter3.to_pb()
expected_pb = _RowFilterPB(
chain=_RowFilterChainPB(filters=[row_filter1_pb, row_filter2_pb])
)
self.assertEqual(filter_pb, expected_pb)
def test_to_pb_nested(self):
from google.cloud.bigtable.row_filters import CellsRowLimitFilter
from google.cloud.bigtable.row_filters import RowSampleFilter
from google.cloud.bigtable.row_filters import StripValueTransformerFilter
row_filter1 = StripValueTransformerFilter(True)
row_filter2 = RowSampleFilter(0.25)
row_filter3 = self._make_one(filters=[row_filter1, row_filter2])
row_filter3_pb = row_filter3.to_pb()
row_filter4 = CellsRowLimitFilter(11)
row_filter4_pb = row_filter4.to_pb()
row_filter5 = self._make_one(filters=[row_filter3, row_filter4])
filter_pb = row_filter5.to_pb()
expected_pb = _RowFilterPB(
chain=_RowFilterChainPB(filters=[row_filter3_pb, row_filter4_pb])
)
self.assertEqual(filter_pb, expected_pb)
class TestRowFilterUnion(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import RowFilterUnion
return RowFilterUnion
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_to_pb(self):
from google.cloud.bigtable.row_filters import RowSampleFilter
from google.cloud.bigtable.row_filters import StripValueTransformerFilter
row_filter1 = StripValueTransformerFilter(True)
row_filter1_pb = row_filter1.to_pb()
row_filter2 = RowSampleFilter(0.25)
row_filter2_pb = row_filter2.to_pb()
row_filter3 = self._make_one(filters=[row_filter1, row_filter2])
filter_pb = row_filter3.to_pb()
expected_pb = _RowFilterPB(
interleave=_RowFilterInterleavePB(filters=[row_filter1_pb, row_filter2_pb])
)
self.assertEqual(filter_pb, expected_pb)
def test_to_pb_nested(self):
from google.cloud.bigtable.row_filters import CellsRowLimitFilter
from google.cloud.bigtable.row_filters import RowSampleFilter
from google.cloud.bigtable.row_filters import StripValueTransformerFilter
row_filter1 = StripValueTransformerFilter(True)
row_filter2 = RowSampleFilter(0.25)
row_filter3 = self._make_one(filters=[row_filter1, row_filter2])
row_filter3_pb = row_filter3.to_pb()
row_filter4 = CellsRowLimitFilter(11)
row_filter4_pb = row_filter4.to_pb()
row_filter5 = self._make_one(filters=[row_filter3, row_filter4])
filter_pb = row_filter5.to_pb()
expected_pb = _RowFilterPB(
interleave=_RowFilterInterleavePB(filters=[row_filter3_pb, row_filter4_pb])
)
self.assertEqual(filter_pb, expected_pb)
class TestConditionalRowFilter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row_filters import ConditionalRowFilter
return ConditionalRowFilter
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_constructor(self):
base_filter = object()
true_filter = object()
false_filter = object()
cond_filter = self._make_one(
base_filter, true_filter=true_filter, false_filter=false_filter
)
self.assertIs(cond_filter.base_filter, base_filter)
self.assertIs(cond_filter.true_filter, true_filter)
self.assertIs(cond_filter.false_filter, false_filter)
def test___eq__(self):
base_filter = object()
true_filter = object()
false_filter = object()
cond_filter1 = self._make_one(
base_filter, true_filter=true_filter, false_filter=false_filter
)
cond_filter2 = self._make_one(
base_filter, true_filter=true_filter, false_filter=false_filter
)
self.assertEqual(cond_filter1, cond_filter2)
def test___eq__type_differ(self):
base_filter = object()
true_filter = object()
false_filter = object()
cond_filter1 = self._make_one(
base_filter, true_filter=true_filter, false_filter=false_filter
)
cond_filter2 = object()
self.assertNotEqual(cond_filter1, cond_filter2)
def test_to_pb(self):
from google.cloud.bigtable.row_filters import CellsRowOffsetFilter
from google.cloud.bigtable.row_filters import RowSampleFilter
from google.cloud.bigtable.row_filters import StripValueTransformerFilter
row_filter1 = StripValueTransformerFilter(True)
row_filter1_pb = row_filter1.to_pb()
row_filter2 = RowSampleFilter(0.25)
row_filter2_pb = row_filter2.to_pb()
row_filter3 = CellsRowOffsetFilter(11)
row_filter3_pb = row_filter3.to_pb()
row_filter4 = self._make_one(
row_filter1, true_filter=row_filter2, false_filter=row_filter3
)
filter_pb = row_filter4.to_pb()
expected_pb = _RowFilterPB(
condition=_RowFilterConditionPB(
predicate_filter=row_filter1_pb,
true_filter=row_filter2_pb,
false_filter=row_filter3_pb,
)
)
self.assertEqual(filter_pb, expected_pb)
def test_to_pb_true_only(self):
from google.cloud.bigtable.row_filters import RowSampleFilter
from google.cloud.bigtable.row_filters import StripValueTransformerFilter
row_filter1 = StripValueTransformerFilter(True)
row_filter1_pb = row_filter1.to_pb()
row_filter2 = RowSampleFilter(0.25)
row_filter2_pb = row_filter2.to_pb()
row_filter3 = self._make_one(row_filter1, true_filter=row_filter2)
filter_pb = row_filter3.to_pb()
expected_pb = _RowFilterPB(
condition=_RowFilterConditionPB(
predicate_filter=row_filter1_pb, true_filter=row_filter2_pb
)
)
self.assertEqual(filter_pb, expected_pb)
def test_to_pb_false_only(self):
from google.cloud.bigtable.row_filters import RowSampleFilter
from google.cloud.bigtable.row_filters import StripValueTransformerFilter
row_filter1 = StripValueTransformerFilter(True)
row_filter1_pb = row_filter1.to_pb()
row_filter2 = RowSampleFilter(0.25)
row_filter2_pb = row_filter2.to_pb()
row_filter3 = self._make_one(row_filter1, false_filter=row_filter2)
filter_pb = row_filter3.to_pb()
expected_pb = _RowFilterPB(
condition=_RowFilterConditionPB(
predicate_filter=row_filter1_pb, false_filter=row_filter2_pb
)
)
self.assertEqual(filter_pb, expected_pb)
def _ColumnRangePB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.ColumnRange(*args, **kw)
def _RowFilterPB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.RowFilter(*args, **kw)
def _RowFilterChainPB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.RowFilter.Chain(*args, **kw)
def _RowFilterConditionPB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.RowFilter.Condition(*args, **kw)
def _RowFilterInterleavePB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.RowFilter.Interleave(*args, **kw)
def _TimestampRangePB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.TimestampRange(*args, **kw)
def _ValueRangePB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.ValueRange(*args, **kw)
| |
import os
import time
import csv
import datetime
import math
from fractions import *
try:
import urllib.parse as urllib
except ImportError:
import urllib
from _login import Downloader
def collectTrends(username, password, terms, startDt, endDt, granularity='d',
geo='', cat='', gprops='', tz='', sum=False, savePath=None):
"""
Downloads normalized Google trend data between [startDt, endDt).
Args:
username: A string representing a Google username.
password: A string representing the corresponding Google password.
terms: A tuple of strings whose query volume is to be searched.
startDt: A datetime object for the start of the period (inclusive).
Only the month and year are considered.
endDt: A datetime object for the end of the period (exclusive).
Only the month and year are considered.
granularity: The frequency with which the data should be spread.
This can be: 'd'-> daily, or 'w'-> weekly.
geo: A string representing a specific country to query.
Ex: US, UK, DE, FR, etc.
cat: A string representing the specific category code desired.
gprops: A string representing the type of search to be included.
Ex: images, news, froogle, and youtube
tz: A string representing the desired timezone.
sum: Sum values of multiple terms by day/week before normalizing.
savePath: A string for the file path where the data can be saved
Returns:
A list where each line is a list of format:
[datetime, value1, value2, value3, etc.], where "datetime" is the
datetime of the query volume, and "value" is a float of the normalized
query volume (between [0.0, 100.0]), where the largest value is
set to 100. There is a header on the first line, with format:
"date,term1,term2,term3, ... termN" where N is the total number of terms.
Returns empty list if error.
"""
#General checks:
if granularity != 'd' and granularity != 'w':
print("Error: Granularity must be 'd' or 'w,' not "+granularity)
return []
if startDt > endDt:
print("Error: startDt must come earlier in time than endDt")
return []
if startDt < datetime.datetime(month=1, day=1, year=2004):
print("Error: Google Trends does not provide data before 2004, your start date was: "+str(startDt))
return []
if endDt > datetime.datetime.today():
print("Error: Google Trends cannot see the future, your end date is : "
+str(endDt)+", which is later than today")
return []
if not terms:
print("Error: terms tuple is empty, please provide a populated tuple")
return []
#all set to download files:
else:
#Note: Always overlap by 1 month (which is why count = freq-1 ).
if granularity == "d":
countMonth = 1
freq = "2m"
else: #granularity == "w":
countMonth = 5
freq = "6m"
numYears = endDt.year - startDt.year
numMonths = endDt.month - startDt.month
numMonths += numYears*12
numFiles = float(numMonths) / countMonth
numFiles = int(math.ceil(numFiles))
#Packages terms into lists of 5 (the max that can be
#queried at once).
segmentedTerms = _packTerms(terms)
reportData = []
for segTerms in segmentedTerms:
#download each 2m file csv as a string
rawReport = _downloadReport(username, password, segTerms,
startDt, numFiles, countMonth, freq, geo, cat, gprops, tz)
#format rawReport into list of each multi-month list.
report = _prepTrends(rawReport, startDt, numFiles, countMonth, granularity)
#if there is nothing in the report data, then return empty list.
if not report:
print("Error: at least one file was unable to be downloaded."
" Perhaps your search terms are invalid")
return []
reportData.append(report)
#if, in the same period, between two sets, the added constant
#term changes scale, then we must scale the second set to meet
#the first one.
scaleReports = _scaleRep(reportData)
#when more than 4 terms, merge reports into single report.
mergeTrend = _merge(scaleReports)
initValues = mergeTrend[0][0]
#calculate the percent change between subsequent data points
#and merge monthly lists.
percTrend = _calcPerc(numFiles, mergeTrend)
#convert back into levels, all on same scale.
reformTrend = _reformTrend(percTrend, initValues)
if sum == True:
#sum terms query volumes together.
reformTrend = _calcSum(reformTrend)
#normalized between [0.0,100.0].
normTrend = _normalize(reformTrend)
#trim off extra days, which only occur with granularity='w'
trimTrend = _trim(normTrend, endDt)
#add header.
finalTrend = _addHeader(trimTrend, terms)
if savePath != None:
_save(savePath, finalTrend)
return finalTrend
def collectRawTrends(username, password, terms, startDt, endDt, geo='', cat='', gprops='', tz='', savePath=None):
"""
Downloads raw Google Trends data.
The most basic download function. Simply downloads the raw csv file from
Google Trends as a string. No transformations are performed on any of the
data.
Args:
username: A string representing a Google username.
password: A string representing the corresponding Google password.
terms: A tuple of strings whose query volume is to be searched. Google
only accepts 5 terms per query.
startDt: A datetime object for the start of the period (inclusive).
Only the month and year are considered.
endDt: A datetime object for the end of the period (exclusive).
Only the month and year are considered.
savePath: A string for the file path where the data can be saved
Returns:
A list of 1 string representing the entire downloaded csv.
"""
#General checks:
if startDt > endDt:
print("Error: startDt must come earlier in time than endDt")
return []
if startDt < datetime.datetime(month=1, day=1, year=2004):
print("Error: Google Trends does not provide data before 2004, your start date was: "+str(startDt))
return []
if endDt > datetime.datetime.today():
print("Error: Google Trends cannot see the future, your end date is : "
+str(endDt)+", which is later than today")
return []
if not terms:
print("Error: terms tuple is empty, please provide a populated tuple")
return []
if len(terms) > 5:
print("Error: Google Trends only accepts 5 terms at a time")
return []
#all set to download files:
else:
numYears = endDt.year - startDt.year
numMonths = endDt.month - startDt.month
numMonths += numYears*12
report = _downloadReport(username, password, terms, startDt, 1, 0, str(numMonths)+"m", geo, cat, gprops, tz)
if not report:
print("Error: file was unable to be downloaded.")
return []
else:
if savePath != None:
_save(savePath, report)
return report
def _packTerms(terms):
"""
Packages terms into lists of 4.
If the length of terms is not divisible by 4, then the final
list is short a bit (but this is okay).
"""
segmentedTerms = []
index = 0
while True:
endIndex = index + 4
if endIndex >= len(terms):
endIndex = len(terms)
segmentedTerms.append(terms[index : endIndex])
break
else:
segmentedTerms.append(terms[index : endIndex])
index += 4
continue
#this make sure that all data in different segments
#have the same scaling factor. One of the terms is
#used in all the segments for scaling, and then removed
#later
for seg in segmentedTerms:
seg.append(terms[0])
return segmentedTerms
def _downloadReport(username, password, terms, startDt, numFiles,
countMonth, freq, geo, cat, gprops, tz):
"""
Helper function to actually downloading Google trend data.
Must have a maximum of FIVE terms.
"""
report = []
dloader = Downloader(username, password)
for i in range(0, numFiles):
month = startDt.month + i*countMonth
year = startDt.year
while month > 12:
year += 1
month -= 12
#create query
query = "http://www.google.com/trends/trendsReport?&q="
for term in terms:
query += urllib.quote(term)+"%2C"
query = query[:-3] #remove final comma
query += "&geo="+urllib.quote(geo)+"&cat="+urllib.quote(cat)+"&gprop="+urllib.quote(gprops)
query += "&cmpt=q&content=1&export=1&date="+str(month)+"%2F"+str(year)+"%20"+urllib.quote(freq)
print(query)
data = dloader.downloadReport(query)
report.append(data)
return report
def _prepTrends(rawReport, startDt, numFiles, countMonth, granularity):
"""
Helper function which reformats data into list of lists with correct data
types. If anything is empty or has incorrect data, then an empty list is
returned.
"""
#load each rawReport into separate list
reportData = []
for i in range(numFiles):
#convert string to 2d list
raw = rawReport[i]
rawLines = raw.split("\n")
lines = []
for rawLine in rawLines:
line = rawLine.split(",")
lines.append(line)
#check if the actual granularity matches the desired granularity. If
#no, then alter to match and continue
trueGran = lines[4][0]
if granularity == "d" and trueGran == "Week":
print("Error: The file returned from Google Trends doesn't match your desired granularity."
" Altering your desired granularity to match.")
granularity = 'w'
if granularity == "w" and trueGran == "Day":
print("Error: The file returned from Google Trends doesn't match your desired granularity."
" Altering your desired granularity to match.")
granularity = 'd'
#prep data
#remove header
lines = lines[5:]
#remove country data
for j, line in enumerate(lines):
if line[0] == "": #checks if line is empty
lines = lines[:j]
break
else:
continue
#remove 2nd month data (except 1st day)
for j, line in enumerate(lines):
try:
if granularity == 'd':
dt = datetime.datetime.strptime(line[0], "%Y-%m-%d")
else: #granularity == 'w':
dt = line[0][:-13]
dt = datetime.datetime.strptime(dt, "%Y-%m-%d")
finalMonth = startDt.month + (i+1)*countMonth #would just use % operator for this
while finalMonth > 12: #however it doesn't work bc the range
finalMonth -= 12 #runs from 1-12, not 0-11
if dt.month == finalMonth:
lines = lines[:j+1] #+1 bc we want to keep this first day/week/month
break
else:
continue
#If there is a ValueError, then there is incorrectly week data,
#and so we should just return an empty array, bc the data is not
#correct to begin with.
except ValueError:
print("Value Error: Unable to format datetime correctly from file, returning empty list.")
return []
#Checks that there is data. If not, then returns empty list.
if len(lines) == 0:
return []
#Saves data to list, which is element of larger list.
report = []
for line in lines:
try:
newLine = []
if granularity == 'd':
dt = datetime.datetime.strptime(line[0], "%Y-%m-%d")
else: #granularity == 'w':
dt = line[0][:-13]
dt = datetime.datetime.strptime(dt, "%Y-%m-%d")
newLine.append(dt)
#Removes the final item in the line, which is the constant term
#This makes sure that there is the same scaling.
for j in range(1, len(line)-1):
value = int(line[j])
newLine.append(value)
report.append(newLine)
except ValueError: #issue with data, return empty list
print("Value Error: Unable to format datetime correctly from file, returning empty list.")
return []
reportData.append(report)
return reportData
def _scaleRep(reportData):
"""
Scales reports of different sets of terms.
Using the percent change with the 1 month overlap should take care of the
variation in time of a single report. However, if, at the same moment in
time, a secondary report contains a term which is larger than the constant
term and so causes the constant to have different values, then the scale is
off. To fix this, we select a value for the constant term at the same time
across the new and old reports. factor = old / new, and multiply factor
across the new report to have the same scale as the old one.
"""
baseMonth = reportData[0][0]
for i in range(1, len(reportData)):
testMonth = reportData[i][0]
factor = 0.0
for j in range(len(baseMonth)):
old = baseMonth[j][len(baseMonth[j])-1] #last term in line
new = testMonth[j][len(testMonth[j])-1] #ditto
if abs(new - old) > 3:
#^means that there is a large difference and we need to scale
old = 1.0 if old == 0.0 else old
new = 1.0 if new == 0.0 else new
factor = old / float(new)
break
if abs(factor) > 0.0003: #in case floating point error
for j in range(len(reportData[i])):
for k in range(len(reportData[i][j])):
for l in range(1, len(reportData[i][j][k])):
reportData[i][j][k][l] = factor*reportData[i][j][k][l]
return reportData
def _merge(reportData):
"""Merges the separate reports into one large report."""
merged = reportData[0]
for i in range(1, len(reportData)): #the report
for j in range(len(reportData[i])): #the month(s)
for k in range(len(reportData[i][j])): #the line
for l in range(1, len(reportData[i][j][k])): #the values in the line
merged[j][k].append(reportData[i][j][k][l])
return merged
def _calcPerc(numFiles, report):
"""Calculates the percent change between subsequent data points."""
percTrend = []
#calculate the percent change of each datapoint
for i in range(numFiles):
#for first day of first month, percent change is set to 1.
#for each subsequent month, the percent change on the first
#day is calculated at the very end of the previous month.
if len(percTrend) == 0:
#percent change is set to 1.0 for first day of first month
newLine = []
dt = report[0][0][0]
newLine.append(dt)
for k in range(1, len(report[0][0])):
newLine.append(1.0)
percTrend.append(newLine)
#go through each day and calculate percent change over prev day
for j in range(1, len(report[i])):
newLine = []
line = report[i][j]
prevLine = report[i][j-1]
dt = line[0]
newLine.append(dt)
for k in range(1, len(report[i][j])):
#to avoid divide-by-zero error, set all 0's in data to 1's
line[k] = 1 if line[k] == 0 else line[k]
prevLine[k] = 1 if prevLine[k] == 0 else prevLine[k]
perc = Fraction(line[k], prevLine[k])
newLine.append(perc)
percTrend.append(newLine)
return percTrend
def _reformTrend(percs, inits):
"""
Helper function to recreate original trend based on percent change data.
"""
trend = []
trend.append(percs[0])
for i in range(1, len(percs)):
newLine = []
newLine.append(percs[i][0]) #append the date
for j in range(1, len(percs[i])): #for each term on date
level = float(trend[i-1][j]) * percs[i][j].numerator / percs[i][j].denominator #level is the prev level * %change
newLine.append(level)
trend.append(newLine)
return trend
def _calcSum(data):
"""
Sums the values of the reports.
"""
trend = []
for line in data:
sum = 0.0
for i in range(1, len(line)):
sum += line[i]
dt = line[0]
trend.append([dt, sum])
return trend
def _normalize(data):
"""
Helper function to normalize data between [0.0, 100.0].
This is based on largest value found. If the values for individual terms
are not summed across terms, then this is the largest value among all terms
as well.
"""
maxVal = 0.0
for i, line in enumerate(data):
for j in range(1, len(line)):
if line[j] > maxVal:
maxVal = line[j]
trend = []
for line in data:
newLine = []
newLine.append(line[0])
for j in range(1, len(line)):
norm = line[j]*100
norm /= maxVal
norm = round(norm, 3)
newLine.append(norm)
trend.append(newLine)
return trend
def _trim(data, endDt):
"""Removes datetime-value pairs >= endDt to the precision of one month."""
index = 0
for i,line in enumerate(data):
if line[0].month == endDt.month and line[0].year == endDt.year:
index = i
break
return data[0:i]
def _addHeader(data, terms):
header = ["date"]
if sum == True:
other = ""
for term in terms:
other += " "+term
header.append(other)
else:
for term in terms:
header.append(term)
trend = []
trend.append(header)
for line in data:
trend.append(line)
return trend
def _deleteFiles(path, numFiles):
"""Delete unnecissary Google trend report files."""
for i in range(numFiles):
if i == 0:
name = path+"report.csv"
else:
name = path+"report ("+str(i)+").csv"
os.remove(name)
def _save(path, data):
"""Writes data to file."""
file = open(path, "wb")
writer = csv.writer(file, delimiter=",")
#header line
writer.writerow(data[0])
#other lines
for i in range(1, len(data)):
newLine = []
newLine.append(data[i][0].strftime("%Y-%m-%d"))
for j in range(1, len(data[i])):
newLine.append(data[i][j])
writer.writerow(newLine)
file.close()
def _read(path):
"""Reads data into list."""
file = open(path)
reader = csv.reader(file, delimiter=",")
#header line
lines = []
for line in reader:
lines.append(line)
data = []
data.append(lines[0])
#other lines
for i in range(1, len(lines)):
newLine = []
dt = datetime.datetime.strptime(lines[i][0], "%Y-%m-%d")
newLine.append(dt)
for j in range(1, len(lines[i])):
newLine.append(float(lines[i][j]))
data.append(newLine)
file.close()
return data
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Takes a generator of values, and accumulates them for a frontend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import re
import threading
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf.config_pb2 import RunMetadata
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.summary.impl import directory_watcher
from tensorflow.python.summary.impl import event_file_loader
from tensorflow.python.summary.impl import reservoir
from tensorflow.python.util import compat
namedtuple = collections.namedtuple
ScalarEvent = namedtuple('ScalarEvent', ['wall_time', 'step', 'value'])
HealthPillEvent = namedtuple(
'HealthPillEvent',
['wall_time', 'step', 'node_name', 'output_slot', 'value'])
CompressedHistogramEvent = namedtuple('CompressedHistogramEvent',
['wall_time', 'step',
'compressed_histogram_values'])
CompressedHistogramValue = namedtuple('CompressedHistogramValue',
['basis_point', 'value'])
HistogramEvent = namedtuple('HistogramEvent',
['wall_time', 'step', 'histogram_value'])
HistogramValue = namedtuple('HistogramValue', ['min', 'max', 'num', 'sum',
'sum_squares', 'bucket_limit',
'bucket'])
ImageEvent = namedtuple('ImageEvent', ['wall_time', 'step',
'encoded_image_string', 'width',
'height'])
AudioEvent = namedtuple('AudioEvent', ['wall_time', 'step',
'encoded_audio_string', 'content_type',
'sample_rate', 'length_frames'])
## Different types of summary events handled by the event_accumulator
SUMMARY_TYPES = {'simple_value': '_ProcessScalar',
'histo': '_ProcessHistogram',
'image': '_ProcessImage',
'audio': '_ProcessAudio'}
## The tagTypes below are just arbitrary strings chosen to pass the type
## information of the tag from the backend to the frontend
COMPRESSED_HISTOGRAMS = 'compressedHistograms'
HISTOGRAMS = 'histograms'
IMAGES = 'images'
AUDIO = 'audio'
SCALARS = 'scalars'
HEALTH_PILLS = 'health_pills'
GRAPH = 'graph'
META_GRAPH = 'meta_graph'
RUN_METADATA = 'run_metadata'
## Normal CDF for std_devs: (-Inf, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, Inf)
## naturally gives bands around median of width 1 std dev, 2 std dev, 3 std dev,
## and then the long tail.
NORMAL_HISTOGRAM_BPS = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000)
DEFAULT_SIZE_GUIDANCE = {
COMPRESSED_HISTOGRAMS: 500,
IMAGES: 4,
AUDIO: 4,
SCALARS: 10000,
# We store this many health pills per op.
HEALTH_PILLS: 100,
HISTOGRAMS: 1,
}
STORE_EVERYTHING_SIZE_GUIDANCE = {
COMPRESSED_HISTOGRAMS: 0,
IMAGES: 0,
AUDIO: 0,
SCALARS: 0,
HEALTH_PILLS: 0,
HISTOGRAMS: 0,
}
# The tag that values containing health pills have. Health pill data is stored
# in tensors. In order to distinguish health pill values from scalar values, we
# rely on how health pill values have this special tag value.
_HEALTH_PILL_EVENT_TAG = '__health_pill__'
def IsTensorFlowEventsFile(path):
"""Check the path name to see if it is probably a TF Events file.
Args:
path: A file path to check if it is an event file.
Raises:
ValueError: If the path is an empty string.
Returns:
If path is formatted like a TensorFlowEventsFile.
"""
if not path:
raise ValueError('Path must be a nonempty string')
return 'tfevents' in compat.as_str_any(os.path.basename(path))
class EventAccumulator(object):
"""An `EventAccumulator` takes an event generator, and accumulates the values.
The `EventAccumulator` is intended to provide a convenient Python interface
for loading Event data written during a TensorFlow run. TensorFlow writes out
`Event` protobuf objects, which have a timestamp and step number, and often
contain a `Summary`. Summaries can have different kinds of data like an image,
a scalar value, or a histogram. The Summaries also have a tag, which we use to
organize logically related data. The `EventAccumulator` supports retrieving
the `Event` and `Summary` data by its tag.
Calling `Tags()` gets a map from `tagType` (e.g. `'images'`,
`'compressedHistograms'`, `'scalars'`, etc) to the associated tags for those
data types. Then, various functional endpoints (eg
`Accumulator.Scalars(tag)`) allow for the retrieval of all data
associated with that tag.
The `Reload()` method synchronously loads all of the data written so far.
Histograms, audio, and images are very large, so storing all of them is not
recommended.
@@Reload
@@Tags
@@Scalars
@@HealthPills
@@Graph
@@MetaGraph
@@RunMetadata
@@Histograms
@@CompressedHistograms
@@Images
@@Audio
"""
def __init__(self,
path,
size_guidance=DEFAULT_SIZE_GUIDANCE,
compression_bps=NORMAL_HISTOGRAM_BPS,
purge_orphaned_data=True):
"""Construct the `EventAccumulator`.
Args:
path: A file path to a directory containing tf events files, or a single
tf events file. The accumulator will load events from this path.
size_guidance: Information on how much data the EventAccumulator should
store in memory. The DEFAULT_SIZE_GUIDANCE tries not to store too much
so as to avoid OOMing the client. The size_guidance should be a map
from a `tagType` string to an integer representing the number of
items to keep per tag for items of that `tagType`. If the size is 0,
all events are stored.
compression_bps: Information on how the `EventAccumulator` should compress
histogram data for the `CompressedHistograms` tag (for details see
`ProcessCompressedHistogram`).
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
"""
sizes = {}
for key in DEFAULT_SIZE_GUIDANCE:
if key in size_guidance:
sizes[key] = size_guidance[key]
else:
sizes[key] = DEFAULT_SIZE_GUIDANCE[key]
self._first_event_timestamp = None
self._scalars = reservoir.Reservoir(size=sizes[SCALARS])
# Unlike the other reservoir, the reservoir for health pills is keyed by the
# name of the op instead of the tag. This lets us efficiently obtain the
# health pills per node.
self._health_pills = reservoir.Reservoir(size=sizes[HEALTH_PILLS])
self._graph = None
self._graph_from_metagraph = False
self._meta_graph = None
self._tagged_metadata = {}
self._histograms = reservoir.Reservoir(size=sizes[HISTOGRAMS])
self._compressed_histograms = reservoir.Reservoir(
size=sizes[COMPRESSED_HISTOGRAMS], always_keep_last=False)
self._images = reservoir.Reservoir(size=sizes[IMAGES])
self._audio = reservoir.Reservoir(size=sizes[AUDIO])
self._generator_mutex = threading.Lock()
self._generator = _GeneratorFromPath(path)
self._compression_bps = compression_bps
self.purge_orphaned_data = purge_orphaned_data
self.most_recent_step = -1
self.most_recent_wall_time = -1
self.file_version = None
# The attributes that get built up by the accumulator
self.accumulated_attrs = ('_scalars', '_histograms',
'_compressed_histograms', '_images', '_audio')
self._tensor_summaries = {}
def Reload(self):
"""Loads all events added since the last call to `Reload`.
If `Reload` was never called, loads all events in the file.
Returns:
The `EventAccumulator`.
"""
with self._generator_mutex:
for event in self._generator.Load():
self._ProcessEvent(event)
return self
def FirstEventTimestamp(self):
"""Returns the timestamp in seconds of the first event.
If the first event has been loaded (either by this method or by `Reload`,
this returns immediately. Otherwise, it will load in the first event. Note
that this means that calling `Reload` will cause this to block until
`Reload` has finished.
Returns:
The timestamp in seconds of the first event that was loaded.
Raises:
ValueError: If no events have been loaded and there were no events found
on disk.
"""
if self._first_event_timestamp is not None:
return self._first_event_timestamp
with self._generator_mutex:
try:
event = next(self._generator.Load())
self._ProcessEvent(event)
return self._first_event_timestamp
except StopIteration:
raise ValueError('No event timestamp could be found')
def _ProcessEvent(self, event):
"""Called whenever an event is loaded."""
if self._first_event_timestamp is None:
self._first_event_timestamp = event.wall_time
if event.HasField('file_version'):
new_file_version = _ParseFileVersion(event.file_version)
if self.file_version and self.file_version != new_file_version:
## This should not happen.
logging.warn(('Found new file_version for event.proto. This will '
'affect purging logic for TensorFlow restarts. '
'Old: {0} New: {1}').format(self.file_version,
new_file_version))
self.file_version = new_file_version
self._MaybePurgeOrphanedData(event)
## Process the event.
# GraphDef and MetaGraphDef are handled in a special way:
# If no graph_def Event is available, but a meta_graph_def is, and it
# contains a graph_def, then use the meta_graph_def.graph_def as our graph.
# If a graph_def Event is available, always prefer it to the graph_def
# inside the meta_graph_def.
if event.HasField('graph_def'):
if self._graph is not None:
logging.warn(('Found more than one graph event per run, or there was '
'a metagraph containing a graph_def, as well as one or '
'more graph events. Overwriting the graph with the '
'newest event.'))
self._graph = event.graph_def
self._graph_from_metagraph = False
self._UpdateTensorSummaries()
elif event.HasField('meta_graph_def'):
if self._meta_graph is not None:
logging.warn(('Found more than one metagraph event per run. '
'Overwriting the metagraph with the newest event.'))
self._meta_graph = event.meta_graph_def
if self._graph is None or self._graph_from_metagraph:
# We may have a graph_def in the metagraph. If so, and no
# graph_def is directly available, use this one instead.
meta_graph = meta_graph_pb2.MetaGraphDef()
meta_graph.ParseFromString(self._meta_graph)
if meta_graph.graph_def:
if self._graph is not None:
logging.warn(('Found multiple metagraphs containing graph_defs,'
'but did not find any graph events. Overwriting the '
'graph with the newest metagraph version.'))
self._graph_from_metagraph = True
self._graph = meta_graph.graph_def.SerializeToString()
self._UpdateTensorSummaries()
elif event.HasField('tagged_run_metadata'):
tag = event.tagged_run_metadata.tag
if tag in self._tagged_metadata:
logging.warn('Found more than one "run metadata" event with tag ' +
tag + '. Overwriting it with the newest event.')
self._tagged_metadata[tag] = event.tagged_run_metadata.run_metadata
elif event.HasField('summary'):
for value in event.summary.value:
if value.HasField('tensor'):
if value.tag == _HEALTH_PILL_EVENT_TAG:
self._ProcessHealthPillSummary(value, event)
else:
self._ProcessTensorSummary(value, event)
else:
for summary_type, summary_func in SUMMARY_TYPES.items():
if value.HasField(summary_type):
datum = getattr(value, summary_type)
getattr(self, summary_func)(value.tag, event.wall_time,
event.step, datum)
def _ProcessTensorSummary(self, value, event):
"""Process summaries generated by the TensorSummary op.
These summaries are distinguished by the fact that they have a Tensor field,
rather than one of the old idiosyncratic per-summary data fields.
Processing Tensor summaries is complicated by the fact that Tensor summaries
are not self-descriptive; you need to read the NodeDef of the corresponding
TensorSummary op to know the summary_type, the tag, etc.
This method emits ERROR-level messages to the logs if it encounters Tensor
summaries that it cannot process.
Args:
value: A summary_pb2.Summary.Value with a Tensor field.
event: The event_pb2.Event containing that value.
"""
def LogErrorOnce(msg):
logging.log_first_n(logging.ERROR, msg, 1)
name = value.node_name
if self._graph is None:
LogErrorOnce('Attempting to process TensorSummary output, but '
'no graph is present, so processing is impossible. '
'All TensorSummary output will be ignored.')
return
if name not in self._tensor_summaries:
LogErrorOnce('No node_def for TensorSummary {}; skipping this sequence.'.
format(name))
return
summary_description = self._tensor_summaries[name]
type_hint = summary_description.type_hint
if not type_hint:
LogErrorOnce('No type_hint for TensorSummary {}; skipping this sequence.'.
format(name))
return
if type_hint == 'scalar':
scalar = float(tensor_util.MakeNdarray(value.tensor))
self._ProcessScalar(name, event.wall_time, event.step, scalar)
else:
LogErrorOnce(
'Unsupported type {} for TensorSummary {}; skipping this sequence.'.
format(type_hint, name))
def _ProcessHealthPillSummary(self, value, event):
"""Process summaries containing health pills.
These summaries are distinguished by the fact that they have a Tensor field
and have a special tag value.
This method emits ERROR-level messages to the logs if it encounters Tensor
summaries that it cannot process.
Args:
value: A summary_pb2.Summary.Value with a Tensor field.
event: The event_pb2.Event containing that value.
"""
elements = np.fromstring(value.tensor.tensor_content, dtype=np.float64)
# The node_name property of the value object is actually a watch key: a
# combination of node name, output slot, and a suffix. We capture the
# actual node name and the output slot with a regular expression.
match = re.match(r'^(.*):(\d+):DebugNumericSummary$', value.node_name)
if not match:
logging.log_first_n(
logging.ERROR,
'Unsupported watch key %s for health pills; skipping this sequence.',
1,
value.node_name)
return
node_name = match.group(1)
output_slot = int(match.group(2))
self._ProcessHealthPill(
event.wall_time, event.step, node_name, output_slot, elements)
def _UpdateTensorSummaries(self):
g = self.Graph()
for node in g.node:
if node.op == 'TensorSummary':
d = summary.get_summary_description(node)
self._tensor_summaries[node.name] = d
def Tags(self):
"""Return all tags found in the value stream.
Returns:
A `{tagType: ['list', 'of', 'tags']}` dictionary.
"""
return {IMAGES: self._images.Keys(),
AUDIO: self._audio.Keys(),
HISTOGRAMS: self._histograms.Keys(),
SCALARS: self._scalars.Keys(),
COMPRESSED_HISTOGRAMS: self._compressed_histograms.Keys(),
# Use a heuristic: if the metagraph is available, but
# graph is not, then we assume the metagraph contains the graph.
GRAPH: self._graph is not None,
META_GRAPH: self._meta_graph is not None,
RUN_METADATA: list(self._tagged_metadata.keys())}
def Scalars(self, tag):
"""Given a summary tag, return all associated `ScalarEvent`s.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `ScalarEvent`s.
"""
return self._scalars.Items(tag)
def HealthPills(self, node_name):
"""Returns all health pill values for a certain node.
Args:
node_name: The name of the node to obtain health pills for.
Raises:
KeyError: If the node name is not found.
Returns:
An array of `HealthPillEvent`s.
"""
return self._health_pills.Items(node_name)
def Graph(self):
"""Return the graph definition, if there is one.
If the graph is stored directly, return that. If no graph is stored
directly but a metagraph is stored containing a graph, return that.
Raises:
ValueError: If there is no graph for this run.
Returns:
The `graph_def` proto.
"""
graph = graph_pb2.GraphDef()
if self._graph is not None:
graph.ParseFromString(self._graph)
return graph
raise ValueError('There is no graph in this EventAccumulator')
def MetaGraph(self):
"""Return the metagraph definition, if there is one.
Raises:
ValueError: If there is no metagraph for this run.
Returns:
The `meta_graph_def` proto.
"""
if self._meta_graph is None:
raise ValueError('There is no metagraph in this EventAccumulator')
meta_graph = meta_graph_pb2.MetaGraphDef()
meta_graph.ParseFromString(self._meta_graph)
return meta_graph
def RunMetadata(self, tag):
"""Given a tag, return the associated session.run() metadata.
Args:
tag: A string tag associated with the event.
Raises:
ValueError: If the tag is not found.
Returns:
The metadata in form of `RunMetadata` proto.
"""
if tag not in self._tagged_metadata:
raise ValueError('There is no run metadata with this tag name')
run_metadata = RunMetadata()
run_metadata.ParseFromString(self._tagged_metadata[tag])
return run_metadata
def Histograms(self, tag):
"""Given a summary tag, return all associated histograms.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `HistogramEvent`s.
"""
return self._histograms.Items(tag)
def CompressedHistograms(self, tag):
"""Given a summary tag, return all associated compressed histograms.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `CompressedHistogramEvent`s.
"""
return self._compressed_histograms.Items(tag)
def Images(self, tag):
"""Given a summary tag, return all associated images.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `ImageEvent`s.
"""
return self._images.Items(tag)
def Audio(self, tag):
"""Given a summary tag, return all associated audio.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `AudioEvent`s.
"""
return self._audio.Items(tag)
def _MaybePurgeOrphanedData(self, event):
"""Maybe purge orphaned data due to a TensorFlow crash.
When TensorFlow crashes at step T+O and restarts at step T, any events
written after step T are now "orphaned" and will be at best misleading if
they are included in TensorBoard.
This logic attempts to determine if there is orphaned data, and purge it
if it is found.
Args:
event: The event to use as a reference, to determine if a purge is needed.
"""
if not self.purge_orphaned_data:
return
## Check if the event happened after a crash, and purge expired tags.
if self.file_version and self.file_version >= 2:
## If the file_version is recent enough, use the SessionLog enum
## to check for restarts.
self._CheckForRestartAndMaybePurge(event)
else:
## If there is no file version, default to old logic of checking for
## out of order steps.
self._CheckForOutOfOrderStepAndMaybePurge(event)
def _CheckForRestartAndMaybePurge(self, event):
"""Check and discard expired events using SessionLog.START.
Check for a SessionLog.START event and purge all previously seen events
with larger steps, because they are out of date. Because of supervisor
threading, it is possible that this logic will cause the first few event
messages to be discarded since supervisor threading does not guarantee
that the START message is deterministically written first.
This method is preferred over _CheckForOutOfOrderStepAndMaybePurge which
can inadvertently discard events due to supervisor threading.
Args:
event: The event to use as reference. If the event is a START event, all
previously seen events with a greater event.step will be purged.
"""
if event.HasField(
'session_log') and event.session_log.status == SessionLog.START:
self._Purge(event, by_tags=False)
def _CheckForOutOfOrderStepAndMaybePurge(self, event):
"""Check for out-of-order event.step and discard expired events for tags.
Check if the event is out of order relative to the global most recent step.
If it is, purge outdated summaries for tags that the event contains.
Args:
event: The event to use as reference. If the event is out-of-order, all
events with the same tags, but with a greater event.step will be purged.
"""
if event.step < self.most_recent_step and event.HasField('summary'):
self._Purge(event, by_tags=True)
else:
self.most_recent_step = event.step
self.most_recent_wall_time = event.wall_time
def _ConvertHistogramProtoToTuple(self, histo):
return HistogramValue(min=histo.min,
max=histo.max,
num=histo.num,
sum=histo.sum,
sum_squares=histo.sum_squares,
bucket_limit=list(histo.bucket_limit),
bucket=list(histo.bucket))
def _ProcessHistogram(self, tag, wall_time, step, histo):
"""Processes a proto histogram by adding it to accumulated state."""
histo = self._ConvertHistogramProtoToTuple(histo)
histo_ev = HistogramEvent(wall_time, step, histo)
self._histograms.AddItem(tag, histo_ev)
self._compressed_histograms.AddItem(
tag, histo_ev, lambda x: _CompressHistogram(x, self._compression_bps))
def _ProcessImage(self, tag, wall_time, step, image):
"""Processes an image by adding it to accumulated state."""
event = ImageEvent(wall_time=wall_time,
step=step,
encoded_image_string=image.encoded_image_string,
width=image.width,
height=image.height)
self._images.AddItem(tag, event)
def _ProcessAudio(self, tag, wall_time, step, audio):
"""Processes a audio by adding it to accumulated state."""
event = AudioEvent(wall_time=wall_time,
step=step,
encoded_audio_string=audio.encoded_audio_string,
content_type=audio.content_type,
sample_rate=audio.sample_rate,
length_frames=audio.length_frames)
self._audio.AddItem(tag, event)
def _ProcessScalar(self, tag, wall_time, step, scalar):
"""Processes a simple value by adding it to accumulated state."""
sv = ScalarEvent(wall_time=wall_time, step=step, value=scalar)
self._scalars.AddItem(tag, sv)
def _ProcessHealthPill(self, wall_time, step, node_name, output_slot,
elements):
"""Processes a health pill value by adding it to accumulated state.
Args:
wall_time: The time at which the health pill was created. Provided by the
debugger.
step: The step at which the health pill was created. Provided by the
debugger.
node_name: The name of the node for this health pill.
output_slot: The output slot for this health pill.
elements: An ND array of 12 floats. The elements of the health pill.
"""
# Key by the node name for fast retrieval of health pills by node name.
self._health_pills.AddItem(
node_name,
HealthPillEvent(
wall_time=wall_time,
step=step,
node_name=node_name,
output_slot=output_slot,
value=elements))
def _Purge(self, event, by_tags):
"""Purge all events that have occurred after the given event.step.
If by_tags is True, purge all events that occurred after the given
event.step, but only for the tags that the event has. Non-sequential
event.steps suggest that a TensorFlow restart occurred, and we discard
the out-of-order events to display a consistent view in TensorBoard.
Discarding by tags is the safer method, when we are unsure whether a restart
has occurred, given that threading in supervisor can cause events of
different tags to arrive with unsynchronized step values.
If by_tags is False, then purge all events with event.step greater than the
given event.step. This can be used when we are certain that a TensorFlow
restart has occurred and these events can be discarded.
Args:
event: The event to use as reference for the purge. All events with
the same tags, but with a greater event.step will be purged.
by_tags: Bool to dictate whether to discard all out-of-order events or
only those that are associated with the given reference event.
"""
## Keep data in reservoirs that has a step less than event.step
_NotExpired = lambda x: x.step < event.step
if by_tags:
def _ExpiredPerTag(value):
return [getattr(self, x).FilterItems(_NotExpired, value.tag)
for x in self.accumulated_attrs]
expired_per_tags = [_ExpiredPerTag(value)
for value in event.summary.value]
expired_per_type = [sum(x) for x in zip(*expired_per_tags)]
else:
expired_per_type = [getattr(self, x).FilterItems(_NotExpired)
for x in self.accumulated_attrs]
if sum(expired_per_type) > 0:
purge_msg = _GetPurgeMessage(self.most_recent_step,
self.most_recent_wall_time, event.step,
event.wall_time, *expired_per_type)
logging.warn(purge_msg)
def _GetPurgeMessage(most_recent_step, most_recent_wall_time, event_step,
event_wall_time, num_expired_scalars, num_expired_histos,
num_expired_comp_histos, num_expired_images,
num_expired_audio):
"""Return the string message associated with TensorBoard purges."""
return ('Detected out of order event.step likely caused by '
'a TensorFlow restart. Purging expired events from Tensorboard'
' display between the previous step: {} (timestamp: {}) and '
'current step: {} (timestamp: {}). Removing {} scalars, {} '
'histograms, {} compressed histograms, {} images, '
'and {} audio.').format(most_recent_step, most_recent_wall_time,
event_step, event_wall_time,
num_expired_scalars, num_expired_histos,
num_expired_comp_histos, num_expired_images,
num_expired_audio)
def _GeneratorFromPath(path):
"""Create an event generator for file or directory at given path string."""
if not path:
raise ValueError('path must be a valid string')
if IsTensorFlowEventsFile(path):
return event_file_loader.EventFileLoader(path)
else:
return directory_watcher.DirectoryWatcher(
path, event_file_loader.EventFileLoader, IsTensorFlowEventsFile)
def _ParseFileVersion(file_version):
"""Convert the string file_version in event.proto into a float.
Args:
file_version: String file_version from event.proto
Returns:
Version number as a float.
"""
tokens = file_version.split('brain.Event:')
try:
return float(tokens[-1])
except ValueError:
## This should never happen according to the definition of file_version
## specified in event.proto.
logging.warn(('Invalid event.proto file_version. Defaulting to use of '
'out-of-order event.step logic for purging expired events.'))
return -1
def _CompressHistogram(histo_ev, bps):
"""Creates fixed size histogram by adding compression to accumulated state.
This routine transforms a histogram at a particular step by linearly
interpolating its variable number of buckets to represent their cumulative
weight at a constant number of compression points. This significantly reduces
the size of the histogram and makes it suitable for a two-dimensional area
plot where the output of this routine constitutes the ranges for a single x
coordinate.
Args:
histo_ev: A HistogramEvent namedtuple.
bps: Compression points represented in basis points, 1/100ths of a percent.
Returns:
CompressedHistogramEvent namedtuple.
"""
# See also: Histogram::Percentile() in core/lib/histogram/histogram.cc
histo = histo_ev.histogram_value
if not histo.num:
return CompressedHistogramEvent(
histo_ev.wall_time,
histo_ev.step,
[CompressedHistogramValue(b, 0.0) for b in bps])
bucket = np.array(histo.bucket)
weights = (bucket * bps[-1] / (bucket.sum() or 1.0)).cumsum()
values = []
j = 0
while j < len(bps):
i = np.searchsorted(weights, bps[j], side='right')
while i < len(weights):
cumsum = weights[i]
cumsum_prev = weights[i - 1] if i > 0 else 0.0
if cumsum == cumsum_prev: # prevent remap divide by zero
i += 1
continue
if not i or not cumsum_prev:
lhs = histo.min
else:
lhs = max(histo.bucket_limit[i - 1], histo.min)
rhs = min(histo.bucket_limit[i], histo.max)
weight = _Remap(bps[j], cumsum_prev, cumsum, lhs, rhs)
values.append(CompressedHistogramValue(bps[j], weight))
j += 1
break
else:
break
while j < len(bps):
values.append(CompressedHistogramValue(bps[j], histo.max))
j += 1
return CompressedHistogramEvent(histo_ev.wall_time, histo_ev.step, values)
def _Remap(x, x0, x1, y0, y1):
"""Linearly map from [x0, x1] unto [y0, y1]."""
return y0 + (x - x0) * float(y1 - y0) / (x1 - x0)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from configparser import NoOptionError, NoSectionError
from configparser_extended import ExtendedConfigParser, SectionProxyExtended
try:
from backports.configparser.helpers import OrderedDict
except ImportError:
from collections import OrderedDict
from six import u
class BasicTestCase(unittest.TestCase):
def setUp(self):
self.x = ExtendedConfigParser()
self.x.read('./test_cfg.ini')
def test_basic(self):
self.assertTrue(self.x is not None)
def test_get_basic(self):
self.assertEqual(self.x.get('sect2', 'key2'), 'val2')
def test_get_basic_fail(self):
self.assertRaises(NoOptionError, self.x.get, 'sect2', 'key4')
def test_get_basic_fail2(self):
self.assertRaises(NoSectionError, self.x.get, 'sect404', 'key3')
def test_get_int(self):
self.assertEqual(self.x.getint('sect1', 'key_int'), 1)
def test_get_float(self):
self.assertEqual(self.x.getfloat('sect1', 'key_float'), 1.24)
def test_get_boolean_true(self):
self.assertTrue(self.x.getboolean('sect1', 'key_bool1'))
def test_get_boolean_on(self):
self.assertTrue(self.x.getboolean('sect1', 'key_bool2'))
def test_get_boolean_1(self):
self.assertTrue(self.x.getboolean('sect1', 'key_bool3'))
def test_get_boolean_yes(self):
self.assertTrue(self.x.getboolean('sect1', 'key_bool4'))
def test_get_boolean_false(self):
self.assertFalse(self.x.getboolean('sect1', 'key_bool5'))
# Wrong type
def test_get_boolean_random(self):
self.assertRaises(ValueError, self.x.getboolean, 'sect1', 'key_bool6')
def test_get_list_int_wrong_type(self):
self.assertRaises(ValueError, self.x.getint, 'sect1', 'key_bool6')
def test_get_list_float_wrong_type(self):
self.assertRaises(ValueError, self.x.getfloat, 'sect1', 'key_bool6')
def test_default(self):
self.assertEqual(self.x.get('sect3', 'key2'), 'default2')
def test_default_section(self):
default = OrderedDict([('key1[dev_plop_toto_stuff]',
'dev_plop_toto1_default'),
('key2', 'default2'), ('key3', 'default3'),
('key049', 'DEFAULT'),
('key049[dev]', 'DEFAULT_dev')])
self.assertEqual(self.x.default_section, default)
def test_get_vars(self):
self.assertEqual(self.x.get('sect1', 'key1', vars={'key1': 'deez'}),
'deez')
def test_get_fallback(self):
self.assertEqual(self.x.get('sect1', 'key173', fallback='deez'),
'deez')
def test_get_fallback_None(self):
self.assertEqual(self.x.get('sect1', 'key173', fallback=None), None)
def test_get_fallback_no_section(self):
x = ExtendedConfigParser(config="FOO", strict=False)
x.read('./test_cfg.ini')
self.assertEqual(x.get('sect_does_not_exsist', 'key173',
fallback='deez'), 'deez')
def test_get_fallback_no_section_None(self):
x = ExtendedConfigParser(config="FOO", strict=False)
x.read('./test_cfg.ini')
self.assertEqual(x.get('sect_does_not_exsist', 'key173',
fallback=None), None)
def test_fallback_int_wrong_type(self):
self.assertEqual(self.x.getint('sect1', 'key173',
fallback="not_an_int"), "not_an_int")
def test_fallback_float_wrong_type(self):
self.assertEqual(self.x.getfloat('sect1', 'key173',
fallback="not_a_float"), "not_a_float")
def test_fallback_bool_wrong_type(self):
self.assertEqual(self.x.getboolean('sect1', 'key173',
fallback="not_a_bool"), "not_a_bool")
def test_has_section(self):
self.assertTrue(self.x.has_section('sect2'))
def test_has_section_advanced(self):
self.assertTrue(self.x.has_section('sect1'))
def test_has_section_fail(self):
self.assertFalse(self.x.has_section('sect404'))
def test_has_option(self):
self.assertTrue(self.x.has_option('sect3', 'key3'))
def test_has_option_config(self):
self.assertTrue(self.x.has_option('sect3', 'key2', 'dev'))
def test_has_option_config_fail(self):
self.assertFalse(self.x.has_option('sect3', 'key2', 'scp'))
def test_has_option_inheritance(self):
self.assertTrue(self.x.has_option('sect1', 'key3'))
def test_has_option_specificaction_fail(self):
# Because config dependant
self.assertFalse(self.x.has_option('sect3', 'key2'))
def test_has_option_fail(self):
self.assertFalse(self.x.has_option('sect3', 'key4'))
def test_has_option_strict(self):
self.assertTrue(self.x.has_option('sect3', 'key3', strict=True))
def test_has_option_strict_config(self):
self.assertTrue(self.x.has_option('sect3', 'key2', 'dev', strict=True))
def test_has_option_strict_config_fail(self):
self.assertFalse(self.x.has_option('sect3', 'key2', 'scp',
strict=True))
def test_has_option_strict_inheritance(self):
# Because strict
self.assertFalse(self.x.has_option('sect1', 'key3', strict=True))
def test_has_option_strict_specificaction_fail(self):
# Because config dependant
self.assertFalse(self.x.has_option('sect3', 'key2', strict=True))
def test_has_option_strict_fail(self):
self.assertFalse(self.x.has_option('sect3', 'key4', strict=True))
def test_has_option_config_ind(self):
self.assertTrue(self.x.has_option('sect3', 'key3', cfg_ind=True))
def test_has_option_config_ind_inheritance(self):
self.assertTrue(self.x.has_option('sect1', 'key3', cfg_ind=True))
def test_has_option_config_ind_specificaction(self):
self.assertTrue(self.x.has_option('sect3', 'key2', cfg_ind=True))
def test_has_option_config_ind_fail(self):
self.assertFalse(self.x.has_option('sect3', 'key4', cfg_ind=True))
def test_has_option_strict_config_ind(self):
self.assertTrue(self.x.has_option('sect3', 'key3', cfg_ind=True,
strict=True))
def test_has_option_strict_config_ind_inheritance(self):
self.assertFalse(self.x.has_option('sect1', 'key3', cfg_ind=True,
strict=True))
def test_has_option_strict_config_ind_specificaction(self):
self.assertTrue(self.x.has_option('sect3', 'key2', cfg_ind=True,
strict=True))
def test_has_option_strict_config_ind_fail(self):
self.assertFalse(self.x.has_option('sect3', 'key4', cfg_ind=True,
strict=True))
def test_options_basic(self):
res = ['key1', 'key2', 'key2[dev]', 'key2[dev_plop]',
'key1[dev_plop_toto_stuff]', 'key2', 'key3', 'key049',
'key049[dev]']
self.assertEquals(self.x.options('sect2'), res)
def test_options_inheritance(self):
res = ['key1', 'key_int', 'key_bool1', 'key_bool2', 'key_bool3',
'key_bool4', 'key_bool5', 'key_bool6', 'key_float', 'key_list',
'key_list_int', 'key_list_bool', 'key_list_float', 'key1[dev]',
'key1', 'key2', 'key2[dev]', 'key2[dev_plop]',
'key1[dev_plop_toto]', 'key2[dev]', 'key3', 'key3[dev]',
'key3[toto]', 'key3[dev_plop]', 'key3[dev_plop_toto]',
'key1[dev_plop_toto_stuff]', 'key2', 'key3', 'key049',
'key049[dev]']
self.assertEquals(self.x.options('sect1'), res)
def test_options_strict(self):
res = ['key1', 'key_int', 'key_bool1', 'key_bool2', 'key_bool3',
'key_bool4', 'key_bool5', 'key_bool6', 'key_float', 'key_list',
'key_list_int', 'key_list_bool', 'key_list_float', 'key1[dev]']
self.assertEquals(self.x.options('sect1', strict=True), res)
def test_options_strict_defaults(self):
res = ['key1', 'key_int', 'key_bool1', 'key_bool2', 'key_bool3',
'key_bool4', 'key_bool5', 'key_bool6', 'key_float', 'key_list',
'key_list_int', 'key_list_bool', 'key_list_float', 'key1[dev]',
'key1[dev_plop_toto_stuff]', 'key2', 'key3', 'key049',
'key049[dev]']
self.assertEquals(self.x.options('sect1', strict=True, defaults=True),
res)
def test_options_strict_config_ind(self):
res = ['key1', 'key_int', 'key_bool1', 'key_bool2', 'key_bool3',
'key_bool4', 'key_bool5', 'key_bool6', 'key_float', 'key_list',
'key_list_int', 'key_list_bool', 'key_list_float']
self.assertEquals(self.x.options('sect1', strict=True, cfg_ind=True),
res)
def test_options_strict_config_ind_defaults(self):
res = ['key1', 'key_int', 'key_bool1', 'key_bool2', 'key_bool3',
'key_bool4', 'key_bool5', 'key_bool6', 'key_float', 'key_list',
'key_list_int', 'key_list_bool', 'key_list_float', 'key2',
'key3', 'key049']
self.assertEquals(self.x.options('sect1', strict=True, defaults=True,
cfg_ind=True), res)
def test_options_config_ind(self):
res = ['key1', 'key_int', 'key_bool1', 'key_bool2', 'key_bool3',
'key_bool4', 'key_bool5', 'key_bool6', 'key_float', 'key_list',
'key_list_int', 'key_list_bool', 'key_list_float',
'key2', 'key3', 'key049']
self.assertEquals(self.x.options('sect1', cfg_ind=True).sort(),
res.sort())
def test_items_basic(self):
res = [('key1', 'val1_sect2'),
('key2', 'val2'),
('key2[dev]', 'dev2'),
('key2[dev_plop]', 'dev_plop2'),
('key1[dev_plop_toto_stuff]', 'dev_plop_toto1_default'),
('key2', 'default2'),
('key3', 'default3'),
('key049', 'DEFAULT'),
('key049[dev]', 'DEFAULT_dev')]
res.sort()
test = self.x.items('sect2')
test.sort()
self.assertEquals(test, res)
def test_items_inheritance(self):
res = [('key1', 'val1'),
('key_int', '1'),
('key_bool1', 'true'),
('key_bool2', 'on'),
('key_bool3', '1'),
('key_bool4', 'yes'),
('key_bool5', 'false'),
('key_bool6', 'random'),
('key_float', '1.24'),
('key_list', 'damn;dang;nabbit'),
('key_list_int', '1;7;3'),
('key_list_bool', 'true;false;true'),
('key_list_float', '0.96;1.73;6.82'),
('key1[dev]', 'dev1'),
('key1', 'val1_sect2'),
('key2', 'val2'),
('key2[dev]', 'dev2'),
('key2[dev_plop]', 'dev_plop2'),
('key1[dev_plop_toto]', 'dev_plop_toto1_sect3'),
('key2[dev]', 'dev2_sect3'),
('key3', 'val3'),
('key3[dev]', 'dev3'),
('key3[toto]', 'toto3'),
('key3[dev_plop]', 'dev_plop3'),
('key3[dev_plop_toto]', 'dev_plop_toto3'),
('key1[dev_plop_toto_stuff]', 'dev_plop_toto1_default'),
('key2', 'default2'),
('key3', 'default3'),
('key049', 'DEFAULT'),
('key049[dev]', 'DEFAULT_dev')]
res.sort()
test = self.x.items('sect1')
test.sort()
self.assertEquals(test, res)
def test_items_strict_basic(self):
res = [('key1', 'val1'),
('key_int', '1'),
('key_bool1', 'true'),
('key_bool2', 'on'),
('key_bool3', '1'),
('key_bool4', 'yes'),
('key_bool5', 'false'),
('key_bool6', 'random'),
('key_float', '1.24'),
('key_list', 'damn;dang;nabbit'),
('key_list_int', '1;7;3'),
('key_list_bool', 'true;false;true'),
('key_list_float', '0.96;1.73;6.82'),
('key1[dev]', 'dev1')]
res.sort()
test = self.x.items('sect1', strict=True)
test.sort()
self.assertEquals(test, res)
def test_items_strict_defaults(self):
res = [('key1', 'val1'),
('key_int', '1'),
('key_bool1', 'true'),
('key_bool2', 'on'),
('key_bool3', '1'),
('key_bool4', 'yes'),
('key_bool5', 'false'),
('key_bool6', 'random'),
('key_float', '1.24'),
('key_list', 'damn;dang;nabbit'),
('key_list_int', '1;7;3'),
('key_list_bool', 'true;false;true'),
('key_list_float', '0.96;1.73;6.82'),
('key1[dev]', 'dev1'),
('key1[dev_plop_toto_stuff]', 'dev_plop_toto1_default'),
('key2', 'default2'),
('key3', 'default3'),
('key049', 'DEFAULT'),
('key049[dev]', 'DEFAULT_dev')]
res.sort()
test = self.x.items('sect1', defaults=True, strict=True)
test.sort()
self.assertEquals(test, res)
def test_items_all(self):
res = []
for key in self.x._sections:
res.append((key, SectionProxyExtended(self.x, key)))
res.sort()
test = self.x.items()
test.sort()
self.assertEquals(test, res)
def test_items_strict_all(self):
res = []
for key in self.x._sections:
res.append((key, SectionProxyExtended(self.x, key)))
res.sort()
test = self.x.items(strict=True)
test.sort()
self.assertEquals(test, res)
def test_items_vars(self):
res = [('william', 'Overbeck'),
('key1', 'val1_sect2'),
('key2', 'val2'),
('key2[dev]', 'dev2'),
('key2[dev_plop]', 'dev_plop2'),
('key1[dev_plop_toto_stuff]', 'dev_plop_toto1_default'),
('key2', 'default2'),
('key3', 'default3'),
('key049', 'DEFAULT'),
('key049[dev]', 'DEFAULT_dev')]
res.sort()
test = self.x.items('sect2', vars={'William': 'Overbeck'})
test.sort()
self.assertEquals(test, res)
def test_items_vars_strict(self):
res = [('william', 'Overbeck'),
('key1', 'val1_sect2'),
('key2', 'val2'),
('key2[dev]', 'dev2'),
('key2[dev_plop]', 'dev_plop2')]
res.sort()
test = self.x.items('sect2', vars={'William': 'Overbeck'}, strict=True)
test.sort()
self.assertEquals(test, res)
def test_get_key_dict(self):
self.assertEqual(self.x['sect2']['key2'], 'val2')
def test_get_sect_dict_fail(self):
self.assertRaises(KeyError, lambda: self.x['sect404'])
def test_get_key_dict_fail(self):
self.assertRaises(KeyError, lambda: self.x['sect2']['key42'])
def test_has_section_strict(self):
self.assertTrue(self.x.has_section('sect2', strict=True))
def test_has_section_strict_fail(self):
self.assertFalse(self.x.has_section('sect1', strict=True))
def test_defaults(self):
self.x = ExtendedConfigParser(defaults={'william': 'Overbeck'})
self.x.read('./test_cfg.ini')
res = OrderedDict([('william', 'Overbeck')])
self.assertEquals(self.x.defaults(), res)
def test_add_section(self):
self.x.add_section('Jim Hoxworth')
self.assertTrue(self.x.has_section('Jim Hoxworth'))
self.assertEqual(self.x.get('Jim Hoxworth', 'key2'), 'default2')
self.assertEqual(self.x['Jim Hoxworth']['key2'], 'default2')
def test_read_file(self):
self.x = ExtendedConfigParser()
data = open('./test_cfg.ini', 'r')
self.x.read_file(data)
self.assertEqual(self.x.get('sect2', 'key2'), 'val2')
def test_read_string_unicode(self):
self.x = ExtendedConfigParser()
with open('./test_cfg.ini', 'r') as cfg_file:
string = cfg_file.read()
string = u(string)
self.x.read_string(string)
self.assertEqual(self.x.get('sect2', 'key2'), 'val2')
class AdvancedTestCase(unittest.TestCase):
def setUp(self):
self.x = ExtendedConfigParser()
self.x.read('./test_cfg.ini')
def test_get_not_list(self):
res = 'damn;dang;nabbit'
self.assertEqual(self.x.get('sect1', 'key_list'), res)
def test_get_list(self):
res = ['damn', 'dang', 'nabbit']
self.assertEqual(self.x.get('sect1', 'key_list', isList=True), res)
def test_get_list_int(self):
res = [1, 7, 3]
self.assertEqual(self.x.getintlist('sect1', 'key_list_int'), res)
def test_get_list_bool(self):
res = [True, False, True]
self.assertEqual(self.x.getbooleanlist('sect1', 'key_list_bool'), res)
def test_get_list_float(self):
res = [0.96, 1.73, 6.82]
self.assertEqual(self.x.getfloatlist('sect1', 'key_list_float'), res)
def test_get_list_int_wrong_type(self):
self.assertRaises(ValueError, self.x.getintlist, 'sect1',
'key_list')
def test_get_list_bool_wrong_type(self):
self.assertRaises(ValueError, self.x.getbooleanlist, 'sect1',
'key_list')
def test_get_list_float_wrong_type(self):
self.assertRaises(ValueError, self.x.getfloatlist, 'sect1',
'key_list')
def test_get_fallback_list(self):
# If not found in the section or parents (tests list for defaults,
# fallback, father,...)
self.assertEqual(self.x.get('sect1', 'key173', fallback='deez',
isList=True), 'deez')
def test_get_father(self):
self.x = ExtendedConfigParser(defaults={'key4': 'father'})
self.x.read('./test_cfg.ini')
self.assertEqual(self.x.get('sect1', 'key4'), 'father')
def test_get_default_over_father(self):
self.x = ExtendedConfigParser(defaults={'key049': 'father'})
self.x.read('./test_cfg.ini')
self.assertEqual(self.x.get('sect1', 'key049'), 'DEFAULT')
def test_get_config_plus(self):
self.x = ExtendedConfigParser(config='mem_plop_toto')
self.x.read('./test_cfg.ini')
self.assertEqual(self.x.get('sect3', 'key3', cfg_plus=True), 'toto3')
def test_get_config_section_loop_basic(self):
self.x = ExtendedConfigParser(config='dev_plop_toto')
self.x.read('./test_cfg.ini')
self.assertEqual(self.x.get('sect1', 'key1', sect_first=False),
'dev_plop_toto1_sect3')
def test_get_config_section_loop_default(self):
self.x = ExtendedConfigParser(config='dev_plop_toto_stuff')
self.x.read('./test_cfg.ini')
self.assertEqual(self.x.get('sect1', 'key1', sect_first=False),
'dev_plop_toto1_default')
def test_get_config_section_loop_father(self):
self.x = ExtendedConfigParser(config='dev_plop_toto_stuff',
defaults={'key049[dev_plop]': 'father'})
self.x.read('./test_cfg.ini')
self.assertEqual(self.x.get('sect1', 'key049', sect_first=False),
'father')
def test_get_config_section_loop_vars_specified(self):
self.x = ExtendedConfigParser(config='dev_plop_toto_stuff')
self.x.read('./test_cfg.ini')
self.assertEqual(self.x.get('sect1', 'key1', sect_first=False,
vars={'key1[dev_plop_toto_stuff]':
'vars'}), 'vars')
def test_get_config_section_loop_vars(self):
self.x = ExtendedConfigParser(config='mem_plop_toto_stuff')
self.x.read('./test_cfg.ini')
self.assertEqual(self.x.get('sect2', 'key1', sect_first=False,
vars={'key1': 'vars'}), 'vars')
def test_get_config_section_loop_vars_unspecified(self):
self.x = ExtendedConfigParser()
self.x.read('./test_cfg.ini')
self.assertEqual(self.x.get('sect2', 'key1', sect_first=False,
vars={'key1': 'vars'}), 'vars')
def test_get_config_section_loop_sect_unspecified(self):
self.x = ExtendedConfigParser(config='dev_plop')
self.x.read('./test_cfg.ini')
self.assertEqual(self.x.get('sect2', 'key1', sect_first=False),
'val1_sect2')
def test_get_config_section_loop_default_unspecified(self):
self.x = ExtendedConfigParser(config='mem_plop')
self.x.read('./test_cfg.ini')
self.assertEqual(self.x.get('sect2', 'key049', sect_first=False),
'DEFAULT')
def test_get_config_section_loop_father_unspecified(self):
self.x = ExtendedConfigParser(config='mem_plop', defaults={'key096':
'father'})
self.x.read('./test_cfg.ini')
self.assertEqual(self.x.get('sect2', 'key096', sect_first=False),
'father')
def test_get_config_section_loop_fallback(self):
self.assertEqual(self.x.get('sect2', 'key173', sect_first=False,
fallback='SCP-173'), 'SCP-173')
def test_get_config_section_loop_fail(self):
self.assertRaises(NoOptionError, self.x.get, 'sect1', 'key682',
sect_first=False)
def test_get_config_section_loop_config_plus(self):
self.x = ExtendedConfigParser(config='mem_plop_toto')
self.x.read('./test_cfg.ini')
self.assertEqual(self.x.get('sect3', 'key3', sect_first=False,
cfg_plus=True), 'toto3')
def test_get_config_section_loop_list(self):
self.assertEqual(self.x.get('sect2', 'key173', sect_first=False,
fallback='SCP-173', isList=True),
'SCP-173')
def test_get_kwargs(self):
self.x = ExtendedConfigParser(config='dev',
config_separator='#',
section_separator='%',
list_separator='*',
delimiters=':',
comment_prefixes=('#', ';'),
inline_comment_prefixes=None,
strict=True,
empty_lines_in_values=True,
default_section='THINGY',
interpolation=None)
self.x.read('./test_cfg_kwargs.ini')
self.assertEqual(self.x.get('sect1', 'key2', isList=True),
['dev2', '2ved', '2vedev2'])
def test_set_config_separator(self):
self.x = ExtendedConfigParser(config='dev#dem#der',
section_separator='%',
list_separator='*')
self.x.read('./test_cfg_kwargs.ini')
self.x.set_config_separator('#')
self.assertEqual(self.x.get('sect1', 'key1'), 'dev1')
def test_set_config_separator_wrong(self):
self.x = ExtendedConfigParser(config='dev_dem_der',
section_separator='%',
list_separator='*')
self.x.read('./test_cfg_kwargs.ini')
self.x.set_config_separator('#')
self.x.set_config_name('dev_dem_der')
self.assertEqual(self.x.get('sect1', 'key1'), 'val1')
def test_set_section_separator(self):
self.x = ExtendedConfigParser(config='',
config_separator='#',
list_separator='*')
self.x.read('./test_cfg_kwargs.ini')
self.x.set_section_separator('%')
self.assertEqual(self.x.get('sect1', 'key3'), 'val3')
def test_set_section_separator_fail(self):
self.x = ExtendedConfigParser(config='',
config_separator='#',
list_separator='*')
self.x.read('./test_cfg_kwargs.ini')
self.x.set_section_separator('%')
self.assertEqual(self.x.get('sect1', 'key3'), 'val3')
def test_set_list_separator(self):
self.x = ExtendedConfigParser(config='dev',
config_separator='#',
section_separator='%')
self.x.read('./test_cfg_kwargs.ini')
self.x.set_list_separator('*')
self.assertEqual(self.x.get('sect1', 'key2', isList=True),
['dev2', '2ved', '2vedev2'])
def test_set_list_separator_wrong(self):
self.x = ExtendedConfigParser(config='dev',
config_separator='#',
section_separator='%')
self.x.read('./test_cfg_kwargs.ini')
self.x.set_list_separator('*')
self.assertEqual(self.x.get('sect1', 'key_list'), 'damn;dang;nabbit')
def test_set_inheritance(self):
self.x.set_inheritance('test')
self.assertEqual(self.x.inheritance, 'test')
def test_get_section_name_compact_basic(self):
self.assertEqual(self.x.get_section_name_compact('sect2'), 'sect2')
def test_get_section_name_compact_parents(self):
self.assertEqual(self.x.get_section_name_compact('sect1:sect2:sect3'),
'sect1')
def test_get_first_section(self):
self.assertEqual(self.x.get_first_section(), 'sect1')
class InheritanceTestCase(unittest.TestCase):
def setUp(self):
self.x = ExtendedConfigParser()
self.x.read('./test_cfg.ini')
def test_get_section_name(self):
self.assertEqual(self.x.get_section_name('sect1'), 'sect1:sect2:sect3')
def test_get_section_name_fail(self):
self.assertRaises(NoSectionError, self.x.get_section_name, 'sect404')
def test_get_basic2(self):
self.assertEqual(self.x.get('sect1', 'key1'), 'val1')
def test_get_basic_fail2(self):
self.assertRaises(NoOptionError, self.x.get, 'sect1', 'key412')
def test_get_sections(self):
sections = ['sect1:sect2:sect3', 'sect2', 'sect3']
self.assertEqual(self.x._get_corresponding_sections('sect1'), sections)
def test_get_sections_return_name(self):
sections = ['sect3']
self.assertEqual(self.x._get_corresponding_sections('sect3'), sections)
def test_get_sections_fail(self):
self.assertRaises(NoSectionError, lambda:
self.x._get_corresponding_sections('sect173'))
def test_get_parent_key(self):
self.assertEqual(self.x.get('sect1', 'key2'), 'val2')
def test_get_grandparent_key(self):
self.assertEqual(self.x.get('sect1', 'key3'), 'val3')
def test_get_key_dict(self):
self.assertEqual(self.x['sect1']['key1'], 'val1')
def test_get_parent_key_dict(self):
self.assertEqual(self.x['sect1']['key2'], 'val2')
def test_get_grandparent_key_dict(self):
self.assertEqual(self.x['sect1']['key3'], 'val3')
def test_get_default_key_dict(self):
self.assertEqual(self.x['sect1']['key049'], 'DEFAULT')
def test_get_key_dict_fail(self):
self.assertRaises(KeyError, lambda: self.x['sect2']['key4'])
def test_get_sect_dict_fail(self):
self.assertRaises(KeyError, lambda: self.x['sect42']['key1'])
class InheritModeSelectionTestCase(unittest.TestCase):
def test_get_sections_method_select_im(self):
self.x = ExtendedConfigParser(inheritance='im')
self.x.read('./test_cfg.ini')
self.assertEqual(
self.x.get_corresponding_sections('sect4'),
self.x._get_corresponding_sections_inheritance('sect4'))
def test_get_sections_method_select_impl(self):
self.x = ExtendedConfigParser(inheritance='impl')
self.x.read('./test_cfg.ini')
self.assertEqual(
self.x.get_corresponding_sections('sect4'),
self.x._get_corresponding_sections_inheritance('sect4'))
def test_get_sections_method_select_implicit(self):
self.x = ExtendedConfigParser(inheritance='implicit')
self.x.read('./test_cfg.ini')
self.assertEqual(
self.x.get_corresponding_sections('sect4'),
self.x._get_corresponding_sections_inheritance('sect4'))
def test_get_sections_method_select_random(self):
self.x = ExtendedConfigParser(inheritance='Valkyr Prime')
self.x.read('./test_cfg.ini')
self.assertEqual(
self.x.get_corresponding_sections('sect4'),
self.x._get_corresponding_sections('sect4'))
def test_get_sections_method_select_default(self):
self.x = ExtendedConfigParser()
self.x.read('./test_cfg.ini')
self.assertEqual(
self.x.get_corresponding_sections('sect4'),
self.x._get_corresponding_sections('sect4'))
class InheritModeTestCase(unittest.TestCase):
def setUp(self):
self.x = ExtendedConfigParser(inheritance='implicit')
self.x.read('./test_cfg.ini')
def test_get_sections_inherit_mode(self):
sections = ['sect1:sect2:sect3', 'sect2', 'sect3']
self.assertEqual(
self.x._get_corresponding_sections_inheritance('sect1'),
sections)
def test_get_sections_inherit_mode_return_name(self):
sections = ['sect3']
self.assertEqual(
self.x._get_corresponding_sections_inheritance('sect3'),
sections)
def test_get_sections_inherit_mode_fail(self):
self.assertRaises(
NoSectionError,
lambda: self.x._get_corresponding_sections_inheritance('sect173'))
def test_get_sections_inherit_mode_simple_inherit(self):
# [sect1:sect2] and [sect2:sect3] => [sect1:sect2:sect3]
sections = ['sect8:sect5', 'sect5:sect51', 'sect51']
self.assertEqual(
self.x._get_corresponding_sections_inheritance('sect8'),
sections)
def test_get_sections_inherit_mode_multiple_inherit(self):
# [sect1:sect2:sect3] => check [sect2], [sect3] and their parents
sections = ['sect4:sect5:sect6', 'sect5:sect51',
'sect51', 'sect6:sect61', 'sect61']
self.assertEqual(
self.x._get_corresponding_sections_inheritance('sect4'),
sections)
def test_section_inherit_mode_basic(self):
res = ['father', 'grandpa', 'key1[dev_plop_toto_stuff]', 'key2',
'key3', 'key049', 'key049[dev]']
res.sort()
test = self.x.options('sect6')
test.sort()
self.assertEquals(test, res)
def test_section_inherit_mode_basic_values(self):
res = [('father', '6'),
('grandpa', '61'),
('key1[dev_plop_toto_stuff]', 'dev_plop_toto1_default'),
('key2', 'default2'),
('key3', 'default3'),
('key049', 'DEFAULT'),
('key049[dev]', 'DEFAULT_dev')]
res.sort()
test = self.x.items('sect6')
test.sort()
self.assertEquals(test, res)
def test_section_inherit_mode_lvl2(self):
res = ['son', 'father', 'mother', 'grandpa', 'grandma',
'key1[dev_plop_toto_stuff]', 'key2', 'key3', 'key049',
'key049[dev]']
res.sort()
test = self.x.options('sect8')
test.sort()
self.assertEquals(test, res)
def test_section_inherit_mode_lvl2_values(self):
res = [('son', '8'),
('father', '5'),
('mother', '5'),
('grandpa', '51'),
('grandma', '51'),
('key1[dev_plop_toto_stuff]', 'dev_plop_toto1_default'),
('key2', 'default2'),
('key3', 'default3'),
('key049', 'DEFAULT'),
('key049[dev]', 'DEFAULT_dev')]
res.sort()
test = self.x.items('sect8')
test.sort()
self.assertEquals(test, res)
def test_section_inherit_mode_multiple_inheritance_values_father(self):
self.assertEquals(self.x.get('sect4', 'father'), '5')
def test_section_inherit_mode_multiple_inheritance_values_grandpa(self):
self.assertEquals(self.x.get('sect4', 'grandpa'), '51')
class SpecificationTestCase(unittest.TestCase):
def setUp(self):
self.x = ExtendedConfigParser(config='dev')
self.x.read('./test_cfg.ini')
def test_get_config_name(self):
self.assertEqual(self.x.get_config_name(), 'dev')
def test_specification_basic_unspeced_key(self):
self.assertEqual(self.x.get('sect2', 'key1'), 'val1_sect2')
def test_specification_basic(self):
self.assertEqual(self.x.get('sect1', 'key1'), 'dev1')
def test_specification_goto_parent_config(self):
self.x.set_config_name('dev_stuff')
self.assertEqual(self.x.get('sect1', 'key1'), 'dev1')
def test_specification_goto_grandparent(self):
self.x.set_config_name('dev_dem_der')
self.assertEqual(self.x.get('sect1', 'key1'), 'dev1')
def test_get_configs(self):
self.x.set_config_name('dev_plop_toto')
res = ['dev_plop_toto', 'dev_plop', 'dev']
self.assertEqual(self.x.get_configs(), res)
def test_get_configs_param(self):
res = ['dev_plop_toto', 'dev_plop', 'dev']
self.assertEqual(self.x.get_configs('dev_plop_toto'), res)
def test_get_configs_plus(self):
self.x.set_config_name('dev_plop_toto')
res = ['dev_plop_toto', 'plop_toto', 'toto', 'dev_plop', 'plop', 'dev']
self.assertEqual(self.x.get_configs_plus(), res)
def test_get_configs_plus_param(self):
res = ['dev_plop_toto', 'plop_toto', 'toto', 'dev_plop', 'plop', 'dev']
self.assertEqual(self.x.get_configs_plus('dev_plop_toto'), res)
def test_specification_advanced(self):
self.x.set_config_name('dev_plop_toto')
self.assertEqual(self.x.get('sect3', 'key3'), 'dev_plop_toto3')
def test_get_specified_defaults(self):
self.assertEqual(self.x.get('sect3', 'key049'), 'DEFAULT_dev')
def test_get_specified_vars(self):
self.assertEqual(self.x.get('sect1', 'key1', vars={'key1[dev]': 'deez'}
), 'deez')
| |
from django.shortcuts import render
from django.http import HttpResponse
from django.template import Context, loader, RequestContext
from django.template import loader, Context
from django.shortcuts import render_to_response, redirect
from django.views.decorators.csrf import csrf_exempt
from django import forms
import json
from django.http import HttpResponse
from django.core import serializers
import urllib2
from urllib2 import urlopen
import json
from bs4 import BeautifulSoup
import re
class News(object):
def __init__(self):
self.subStory = None
self.additionalInfoTime = None
self.additionalInfoSource = None
self.link = None
self.title = None
self.imageLink = None
class Meta:
app_label = 'rest'
class DetailsNews(object):
def __init__(self):
self.Story = None
self.Imagelink = None
self.title = None
class Meta:
app_label = 'rest'
class HeadLine(object):
"""docstring for HeadLine"""
def __init__(self):
self.Title = None
self.SubStory = None
self.Imagelink = None
self.Link = None
def FirstPage(request):
url = "http://www.prothom-alo.com/"
print url
# sourceCode = "halum"
sourceCode = urllib2.urlopen(url).read()
soup = BeautifulSoup(sourceCode, from_encoding="sourceCode")
div = soup.html.body
content = soup.select('#main-menu ul .menu_color_ .dynamic')
links = re.findall(r'href="(.*?)"', str(content))
return HttpResponse(json.dumps(links))
# CurrentLyVisitingLink = "http://www.prothom-alo.com/bangladesh/article/"
def CollectSubPageLinks(request):
if request.method == 'GET':
print "true"
CurrentLyVisitingLink = request.GET.get('url', '')
print CurrentLyVisitingLink
AllSubPagesLink = CollectSubPageLinksMethod(CurrentLyVisitingLink)
NewsListJson = []
NewsListJson += "["
i = 0
for x in AllSubPagesLink:
x = json.dumps(vars(x))
NewsListJson.append(x)
i += 1
if len(AllSubPagesLink) > i:
NewsListJson+= ","
NewsListJson += "]"
return HttpResponse(NewsListJson)
def CollectSubPageLinksMethod(url):
AllSubPagesLink = []
sourceCode = urllib2.urlopen(url).read()
soup = BeautifulSoup(sourceCode)
content = soup.findAll("div", attrs={"class":"oh mb10"})
imageSoup = soup.findAll("img")
print "Image : " + str(imageSoup)
# print "\n\n\n\n\n\n\n\n\n\n\n\n****************************************************************"
for x in content:
news = News()
try:
Imagelink = x.select('img')
image = re.findall(r'<img .*?src="//(.*?)"', str(Imagelink))
news.imageLink = "http://"+str(image[0])
print Imagelink
except Exception, e:
print "No Image"
title = x.select('a')
title = re.findall(r'<a href=".*?">(.*?)</a>', str(title))
try:
title = title[0]
news.title = title
except Exception, e:
print "Exception : " + str(e) + "\n"
# link = x.select('a')
# link = re.findall(r'<a href="(.*?)"', str(link))
# link = url + link[0]
# shortLink = re.findall(r'^(.*/d?)', str(link))
# # print "this is shortlink : " + str(shortLink)
# link = shortLink[0]
# headline.Link = link
link = x.select('a')
link = re.findall(r'<a href="(.*?)"', str(link))
try:
link = url + link[0]
shortLink = re.findall(r'^(.*/d?)', str(link))
# print "this is shortlink : " + str(shortLink)
news.link = shortLink[0]
except Exception, e:
print link
additionalInfo = x.select('.additional_info')
additionalInfoSource = re.findall(r'<span .*?>(.*?)</span>', str(additionalInfo))
try:
additionalInfoSource = additionalInfoSource[0]
news.additionalInfoSource = additionalInfoSource
except Exception, e:
print "Exception : " + str(e) + "\n"
additionalInfoTime = re.findall(r'<span>(.*?)</span>', str(additionalInfo))
try:
additionalInfoTime = additionalInfoTime[0]
news.additionalInfoTime = additionalInfoTime
except Exception, e:
print "Exception : " + str(e) + "\n"
subStory = x.select('.content')
subStory = re.findall(r'<a href=".*?">(.*?)</a>', str(subStory))
for x in subStory:
subStory = x
news.subStory = subStory
break
# print news.title
# if todaysLimit == None:
# todaysLimit = str(additionalInfoTime[15:])
# print title
# print link
# print additionalInfoSource
# print additionalInfoTime
# print subStory
# print "\n\n\n"
AllSubPagesLink.append(news)
return AllSubPagesLink
def Details(request):
details = DetailsNews()
if request.method == 'GET':
print "true"
CurrentLyVisitingLink = request.GET.get('url', '')
print CurrentLyVisitingLink
sourceCode = urllib2.urlopen(CurrentLyVisitingLink).read()
soup = BeautifulSoup(sourceCode)
details.title = soup.h1.text.encode('utf-8')
content = soup.article
# print content
try:
details.Imagelink = content.img['src']
except Exception, e:
print "No Image"
# print content
print "\n\n\n\n"
articleLines = []
content = re.findall(r'<p.*?>(.*?)</p>', str(content))
for x in content:
x = re.sub(r'<img .*?/>', '', str(x))
x = re.sub(r'<br/>', '\n', str(x))
x = re.sub(r'<span>', '', str(x))
x = re.sub(r'</span>', '', str(x))
x = re.sub(r'<strong>', '', str(x))
x = re.sub(r'</strong>', '', str(x))
x = re.sub(r'<em>', '', str(x))
x = re.sub(r'</em>', '', str(x))
articleLines.append(x)
details.Story = articleLines
return HttpResponse(json.dumps(vars(details)))
def HeadLines(request):
headlines = []
url = "http://www.prothom-alo.com/"
sourceCode = urllib2.urlopen(url).read()
soup = BeautifulSoup(sourceCode)
content = soup.findAll("div", attrs={"class":"each_news mb20"})
# print content
for x in content:
headline = HeadLine()
# print x
# print "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
try:
link = x.select('a')
link = re.findall(r'<a href="(.*?)"', str(link))
link = url + link[0]
shortLink = re.findall(r'^(.*/d?)', str(link))
# print "this is shortlink : " + str(shortLink)
link = shortLink[0]
headline.Link = link
# print link
except Exception, e:
print "No Link"
try:
image = re.findall(r'<img .*?src="//(.*?)"', str(x))
# print image[0]
headline.Imagelink = "http://"+str(image[0])
print headline.Imagelink
except Exception, e:
print "No Image"
try:
title = re.findall(r'<a.*?>(.*?)</a>', str(x.h2.a))
# print title[0]
headline.Title = title[0]
except Exception, e:
print "no Heading"
try:
subStory = re.findall(r'<a class="content_right".*?>(.*?)</a>', str(x))
# print subStory[0]
headline.SubStory = subStory[0]
except Exception, e:
print "No subStory"
headlines.append(headline)
NewsListJson = []
NewsListJson += "["
i = 0
for x in headlines:
x = json.dumps(vars(x))
NewsListJson.append(x)
i += 1
if len(headlines) > i:
NewsListJson+= ","
NewsListJson += "]"
return HttpResponse(NewsListJson)
def HelloFromViews(request):
return HttpResponse("Hi")
| |
#
# BitBake Graphical GTK User Interface
#
# Copyright (C) 2011 Intel Corporation
#
# Authored by Joshua Lock <josh@linux.intel.com>
# Authored by Dongxiao Xu <dongxiao.xu@intel.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import gobject
import logging
import ast
from bb.ui.crumbs.runningbuild import RunningBuild
class HobHandler(gobject.GObject):
"""
This object does BitBake event handling for the hob gui.
"""
__gsignals__ = {
"package-formats-updated" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,)),
"config-updated" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING, gobject.TYPE_PYOBJECT,)),
"command-succeeded" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_INT,)),
"command-failed" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING,)),
"parsing-warning" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING,)),
"sanity-failed" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING, gobject.TYPE_INT)),
"generating-data" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
"data-generated" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
"parsing-started" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,)),
"parsing" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,)),
"parsing-completed" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,)),
"recipe-populated" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
"package-populated" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
"network-passed" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
"network-failed" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
}
(GENERATE_CONFIGURATION, GENERATE_RECIPES, GENERATE_PACKAGES, GENERATE_IMAGE, POPULATE_PACKAGEINFO, SANITY_CHECK, NETWORK_TEST) = range(7)
(SUB_PATH_LAYERS, SUB_FILES_DISTRO, SUB_FILES_MACH, SUB_FILES_SDKMACH, SUB_MATCH_CLASS, SUB_PARSE_CONFIG, SUB_SANITY_CHECK,
SUB_GNERATE_TGTS, SUB_GENERATE_PKGINFO, SUB_BUILD_RECIPES, SUB_BUILD_IMAGE, SUB_NETWORK_TEST) = range(12)
def __init__(self, server, recipe_model, package_model):
super(HobHandler, self).__init__()
self.build = RunningBuild(sequential=True)
self.recipe_model = recipe_model
self.package_model = package_model
self.commands_async = []
self.generating = False
self.current_phase = None
self.building = False
self.recipe_queue = []
self.package_queue = []
self.server = server
self.error_msg = ""
self.initcmd = None
self.parsing = False
def set_busy(self):
if not self.generating:
self.emit("generating-data")
self.generating = True
def clear_busy(self):
if self.generating:
self.emit("data-generated")
self.generating = False
def runCommand(self, commandline):
try:
result, error = self.server.runCommand(commandline)
if error:
raise Exception("Error running command '%s': %s" % (commandline, error))
return result
except Exception as e:
self.commands_async = []
self.clear_busy()
self.emit("command-failed", "Hob Exception - %s" % (str(e)))
return None
def run_next_command(self, initcmd=None):
if initcmd != None:
self.initcmd = initcmd
if self.commands_async:
self.set_busy()
next_command = self.commands_async.pop(0)
else:
self.clear_busy()
if self.initcmd != None:
self.emit("command-succeeded", self.initcmd)
return
if next_command == self.SUB_PATH_LAYERS:
self.runCommand(["findConfigFilePath", "bblayers.conf"])
elif next_command == self.SUB_FILES_DISTRO:
self.runCommand(["findConfigFiles", "DISTRO"])
elif next_command == self.SUB_FILES_MACH:
self.runCommand(["findConfigFiles", "MACHINE"])
elif next_command == self.SUB_FILES_SDKMACH:
self.runCommand(["findConfigFiles", "MACHINE-SDK"])
elif next_command == self.SUB_MATCH_CLASS:
self.runCommand(["findFilesMatchingInDir", "rootfs_", "classes"])
elif next_command == self.SUB_PARSE_CONFIG:
self.runCommand(["resetCooker"])
elif next_command == self.SUB_GNERATE_TGTS:
self.runCommand(["generateTargetsTree", "classes/image.bbclass", []])
elif next_command == self.SUB_GENERATE_PKGINFO:
self.runCommand(["triggerEvent", "bb.event.RequestPackageInfo()"])
elif next_command == self.SUB_SANITY_CHECK:
self.runCommand(["triggerEvent", "bb.event.SanityCheck()"])
elif next_command == self.SUB_NETWORK_TEST:
self.runCommand(["triggerEvent", "bb.event.NetworkTest()"])
elif next_command == self.SUB_BUILD_RECIPES:
self.clear_busy()
self.building = True
self.runCommand(["buildTargets", self.recipe_queue, self.default_task])
self.recipe_queue = []
elif next_command == self.SUB_BUILD_IMAGE:
self.clear_busy()
self.building = True
target = self.image
if self.base_image:
# Request the build of a custom image
self.generate_hob_base_image(target)
self.set_var_in_file("LINGUAS_INSTALL", "", "local.conf")
hobImage = self.runCommand(["matchFile", target + ".bb"])
if self.base_image != self.recipe_model.__custom_image__:
baseImage = self.runCommand(["matchFile", self.base_image + ".bb"])
version = self.runCommand(["generateNewImage", hobImage, baseImage, self.package_queue, True, ""])
target += version
self.recipe_model.set_custom_image_version(version)
targets = [target]
if self.toolchain_packages:
self.set_var_in_file("TOOLCHAIN_TARGET_TASK", " ".join(self.toolchain_packages), "local.conf")
targets.append(target + ":do_populate_sdk")
self.runCommand(["buildTargets", targets, self.default_task])
def display_error(self):
self.clear_busy()
self.emit("command-failed", self.error_msg)
self.error_msg = ""
if self.building:
self.building = False
def handle_event(self, event):
if not event:
return
if self.building:
self.current_phase = "building"
self.build.handle_event(event)
if isinstance(event, bb.event.PackageInfo):
self.package_model.populate(event._pkginfolist)
self.emit("package-populated")
self.run_next_command()
elif isinstance(event, bb.event.SanityCheckPassed):
reparse = self.runCommand(["getVariable", "BB_INVALIDCONF"]) or None
if reparse is True:
self.set_var_in_file("BB_INVALIDCONF", False, "local.conf")
self.runCommand(["setPrePostConfFiles", "conf/.hob.conf", ""])
self.commands_async.prepend(self.SUB_PARSE_CONFIG)
self.run_next_command()
elif isinstance(event, bb.event.SanityCheckFailed):
self.emit("sanity-failed", event._msg, event._network_error)
elif isinstance(event, logging.LogRecord):
if not self.building:
if event.levelno >= logging.ERROR:
formatter = bb.msg.BBLogFormatter()
msg = formatter.format(event)
self.error_msg += msg + '\n'
elif event.levelno >= logging.WARNING and self.parsing == True:
formatter = bb.msg.BBLogFormatter()
msg = formatter.format(event)
warn_msg = msg + '\n'
self.emit("parsing-warning", warn_msg)
elif isinstance(event, bb.event.TargetsTreeGenerated):
self.current_phase = "data generation"
if event._model:
self.recipe_model.populate(event._model)
self.emit("recipe-populated")
elif isinstance(event, bb.event.ConfigFilesFound):
self.current_phase = "configuration lookup"
var = event._variable
values = event._values
values.sort()
self.emit("config-updated", var, values)
elif isinstance(event, bb.event.ConfigFilePathFound):
self.current_phase = "configuration lookup"
elif isinstance(event, bb.event.FilesMatchingFound):
self.current_phase = "configuration lookup"
# FIXME: hard coding, should at least be a variable shared between
# here and the caller
if event._pattern == "rootfs_":
formats = []
for match in event._matches:
classname, sep, cls = match.rpartition(".")
fs, sep, format = classname.rpartition("_")
formats.append(format)
formats.sort()
self.emit("package-formats-updated", formats)
elif isinstance(event, bb.command.CommandCompleted):
self.current_phase = None
self.run_next_command()
elif isinstance(event, bb.command.CommandFailed):
if event.error not in ("Forced shutdown", "Stopped build"):
self.error_msg += event.error
self.commands_async = []
self.display_error()
elif isinstance(event, (bb.event.ParseStarted,
bb.event.CacheLoadStarted,
bb.event.TreeDataPreparationStarted,
)):
message = {}
message["eventname"] = bb.event.getName(event)
message["current"] = 0
message["total"] = None
message["title"] = "Parsing recipes"
self.emit("parsing-started", message)
if isinstance(event, bb.event.ParseStarted):
self.parsing = True
elif isinstance(event, (bb.event.ParseProgress,
bb.event.CacheLoadProgress,
bb.event.TreeDataPreparationProgress)):
message = {}
message["eventname"] = bb.event.getName(event)
message["current"] = event.current
message["total"] = event.total
message["title"] = "Parsing recipes"
self.emit("parsing", message)
elif isinstance(event, (bb.event.ParseCompleted,
bb.event.CacheLoadCompleted,
bb.event.TreeDataPreparationCompleted)):
message = {}
message["eventname"] = bb.event.getName(event)
message["current"] = event.total
message["total"] = event.total
message["title"] = "Parsing recipes"
self.emit("parsing-completed", message)
if isinstance(event, bb.event.ParseCompleted):
self.parsing = False
elif isinstance(event, bb.event.NetworkTestFailed):
self.emit("network-failed")
self.run_next_command()
elif isinstance(event, bb.event.NetworkTestPassed):
self.emit("network-passed")
self.run_next_command()
if self.error_msg and not self.commands_async:
self.display_error()
return
def init_cooker(self):
self.runCommand(["createConfigFile", ".hob.conf"])
def set_extra_inherit(self, bbclass):
self.append_var_in_file("INHERIT", bbclass, ".hob.conf")
def set_bblayers(self, bblayers):
self.set_var_in_file("BBLAYERS", " ".join(bblayers), "bblayers.conf")
def set_machine(self, machine):
if machine:
self.early_assign_var_in_file("MACHINE", machine, "local.conf")
def set_sdk_machine(self, sdk_machine):
self.set_var_in_file("SDKMACHINE", sdk_machine, "local.conf")
def set_image_fstypes(self, image_fstypes):
self.set_var_in_file("IMAGE_FSTYPES", image_fstypes, "local.conf")
def set_distro(self, distro):
self.set_var_in_file("DISTRO", distro, "local.conf")
def set_package_format(self, format):
package_classes = ""
for pkgfmt in format.split():
package_classes += ("package_%s" % pkgfmt + " ")
self.set_var_in_file("PACKAGE_CLASSES", package_classes, "local.conf")
def set_bbthreads(self, threads):
self.set_var_in_file("BB_NUMBER_THREADS", threads, "local.conf")
def set_pmake(self, threads):
pmake = "-j %s" % threads
self.set_var_in_file("PARALLEL_MAKE", pmake, "local.conf")
def set_dl_dir(self, directory):
self.set_var_in_file("DL_DIR", directory, "local.conf")
def set_sstate_dir(self, directory):
self.set_var_in_file("SSTATE_DIR", directory, "local.conf")
def set_sstate_mirrors(self, url):
self.set_var_in_file("SSTATE_MIRRORS", url, "local.conf")
def set_extra_size(self, image_extra_size):
self.set_var_in_file("IMAGE_ROOTFS_EXTRA_SPACE", str(image_extra_size), "local.conf")
def set_rootfs_size(self, image_rootfs_size):
self.set_var_in_file("IMAGE_ROOTFS_SIZE", str(image_rootfs_size), "local.conf")
def set_incompatible_license(self, incompat_license):
self.set_var_in_file("INCOMPATIBLE_LICENSE", incompat_license, "local.conf")
def set_extra_setting(self, extra_setting):
self.set_var_in_file("EXTRA_SETTING", extra_setting, "local.conf")
def set_extra_config(self, extra_setting):
old_extra_setting = self.runCommand(["getVariable", "EXTRA_SETTING"]) or {}
old_extra_setting = str(old_extra_setting)
old_extra_setting = ast.literal_eval(old_extra_setting)
if not type(old_extra_setting) == dict:
old_extra_setting = {}
# settings not changed
if old_extra_setting == extra_setting:
return
# remove the old EXTRA SETTING variable
self.remove_var_from_file("EXTRA_SETTING")
# remove old settings from conf
for key in old_extra_setting.keys():
if key not in extra_setting:
self.remove_var_from_file(key)
# add new settings
for key, value in extra_setting.iteritems():
self.set_var_in_file(key, value, "local.conf")
if extra_setting:
self.set_var_in_file("EXTRA_SETTING", extra_setting, "local.conf")
def set_http_proxy(self, http_proxy):
self.set_var_in_file("http_proxy", http_proxy, "local.conf")
def set_https_proxy(self, https_proxy):
self.set_var_in_file("https_proxy", https_proxy, "local.conf")
def set_ftp_proxy(self, ftp_proxy):
self.set_var_in_file("ftp_proxy", ftp_proxy, "local.conf")
def set_socks_proxy(self, socks_proxy):
self.set_var_in_file("all_proxy", socks_proxy, "local.conf")
def set_cvs_proxy(self, host, port):
self.set_var_in_file("CVS_PROXY_HOST", host, "local.conf")
self.set_var_in_file("CVS_PROXY_PORT", port, "local.conf")
def request_package_info(self):
self.commands_async.append(self.SUB_GENERATE_PKGINFO)
self.run_next_command(self.POPULATE_PACKAGEINFO)
def trigger_sanity_check(self):
self.commands_async.append(self.SUB_SANITY_CHECK)
self.run_next_command(self.SANITY_CHECK)
def trigger_network_test(self):
self.commands_async.append(self.SUB_NETWORK_TEST)
self.run_next_command(self.NETWORK_TEST)
def generate_configuration(self):
self.runCommand(["setPrePostConfFiles", "conf/.hob.conf", ""])
self.commands_async.append(self.SUB_PARSE_CONFIG)
self.commands_async.append(self.SUB_PATH_LAYERS)
self.commands_async.append(self.SUB_FILES_DISTRO)
self.commands_async.append(self.SUB_FILES_MACH)
self.commands_async.append(self.SUB_FILES_SDKMACH)
self.commands_async.append(self.SUB_MATCH_CLASS)
self.run_next_command(self.GENERATE_CONFIGURATION)
def generate_recipes(self):
self.runCommand(["setPrePostConfFiles", "conf/.hob.conf", ""])
self.commands_async.append(self.SUB_PARSE_CONFIG)
self.commands_async.append(self.SUB_GNERATE_TGTS)
self.run_next_command(self.GENERATE_RECIPES)
def generate_packages(self, tgts, default_task="build"):
targets = []
targets.extend(tgts)
self.recipe_queue = targets
self.default_task = default_task
self.runCommand(["setPrePostConfFiles", "conf/.hob.conf", ""])
self.commands_async.append(self.SUB_PARSE_CONFIG)
self.commands_async.append(self.SUB_BUILD_RECIPES)
self.run_next_command(self.GENERATE_PACKAGES)
def generate_image(self, image, base_image, image_packages=[], toolchain_packages=[], default_task="build"):
self.image = image
self.base_image = base_image
self.package_queue = image_packages
self.toolchain_packages = toolchain_packages
self.default_task = default_task
self.runCommand(["setPrePostConfFiles", "conf/.hob.conf", ""])
self.commands_async.append(self.SUB_PARSE_CONFIG)
self.commands_async.append(self.SUB_BUILD_IMAGE)
self.run_next_command(self.GENERATE_IMAGE)
def generate_new_image(self, image, base_image, package_queue, description):
if base_image:
base_image = self.runCommand(["matchFile", self.base_image + ".bb"])
self.runCommand(["generateNewImage", image, base_image, package_queue, False, description])
def generate_hob_base_image(self, hob_image):
image_dir = self.get_topdir() + "/recipes/images/"
recipe_name = hob_image + ".bb"
self.ensure_dir(image_dir)
self.generate_new_image(image_dir + recipe_name, None, [], "")
def ensure_dir(self, directory):
self.runCommand(["ensureDir", directory])
def build_succeeded_async(self):
self.building = False
def build_failed_async(self):
self.initcmd = None
self.commands_async = []
self.building = False
def cancel_parse(self):
self.runCommand(["stateForceShutdown"])
def cancel_build(self, force=False):
if force:
# Force the cooker to stop as quickly as possible
self.runCommand(["stateForceShutdown"])
else:
# Wait for tasks to complete before shutting down, this helps
# leave the workdir in a usable state
self.runCommand(["stateShutdown"])
def reset_build(self):
self.build.reset()
def get_logfile(self):
return self.server.runCommand(["getVariable", "BB_CONSOLELOG"])[0]
def get_topdir(self):
return self.runCommand(["getVariable", "TOPDIR"]) or ""
def _remove_redundant(self, string):
ret = []
for i in string.split():
if i not in ret:
ret.append(i)
return " ".join(ret)
def set_var_in_file(self, var, val, default_file=None):
self.runCommand(["enableDataTracking"])
self.server.runCommand(["setVarFile", var, val, default_file, "set"])
self.runCommand(["disableDataTracking"])
def early_assign_var_in_file(self, var, val, default_file=None):
self.runCommand(["enableDataTracking"])
self.server.runCommand(["setVarFile", var, val, default_file, "earlyAssign"])
self.runCommand(["disableDataTracking"])
def remove_var_from_file(self, var):
self.server.runCommand(["removeVarFile", var])
def append_var_in_file(self, var, val, default_file=None):
self.server.runCommand(["setVarFile", var, val, default_file, "append"])
def append_to_bbfiles(self, val):
bbfiles = self.runCommand(["getVariable", "BBFILES", "False"]) or ""
bbfiles = bbfiles.split()
if val not in bbfiles:
self.append_var_in_file("BBFILES", val, "bblayers.conf")
def get_parameters(self):
# retrieve the parameters from bitbake
params = {}
params["core_base"] = self.runCommand(["getVariable", "COREBASE"]) or ""
params["layer"] = self.runCommand(["getVariable", "BBLAYERS"]) or ""
params["layers_non_removable"] = self.runCommand(["getVariable", "BBLAYERS_NON_REMOVABLE"]) or ""
params["dldir"] = self.runCommand(["getVariable", "DL_DIR"]) or ""
params["machine"] = self.runCommand(["getVariable", "MACHINE"]) or ""
params["distro"] = self.runCommand(["getVariable", "DISTRO"]) or "defaultsetup"
params["pclass"] = self.runCommand(["getVariable", "PACKAGE_CLASSES"]) or ""
params["sstatedir"] = self.runCommand(["getVariable", "SSTATE_DIR"]) or ""
params["sstatemirror"] = self.runCommand(["getVariable", "SSTATE_MIRRORS"]) or ""
num_threads = self.runCommand(["getCpuCount"])
if not num_threads:
num_threads = 1
max_threads = 65536
else:
try:
num_threads = int(num_threads)
max_threads = 16 * num_threads
except:
num_threads = 1
max_threads = 65536
params["max_threads"] = max_threads
bbthread = self.runCommand(["getVariable", "BB_NUMBER_THREADS"])
if not bbthread:
bbthread = num_threads
else:
try:
bbthread = int(bbthread)
except:
bbthread = num_threads
params["bbthread"] = bbthread
pmake = self.runCommand(["getVariable", "PARALLEL_MAKE"])
if not pmake:
pmake = num_threads
elif isinstance(pmake, int):
pass
else:
try:
pmake = int(pmake.lstrip("-j "))
except:
pmake = num_threads
params["pmake"] = "-j %s" % pmake
params["image_addr"] = self.runCommand(["getVariable", "DEPLOY_DIR_IMAGE"]) or ""
image_extra_size = self.runCommand(["getVariable", "IMAGE_ROOTFS_EXTRA_SPACE"])
if not image_extra_size:
image_extra_size = 0
else:
try:
image_extra_size = int(image_extra_size)
except:
image_extra_size = 0
params["image_extra_size"] = image_extra_size
image_rootfs_size = self.runCommand(["getVariable", "IMAGE_ROOTFS_SIZE"])
if not image_rootfs_size:
image_rootfs_size = 0
else:
try:
image_rootfs_size = int(image_rootfs_size)
except:
image_rootfs_size = 0
params["image_rootfs_size"] = image_rootfs_size
image_overhead_factor = self.runCommand(["getVariable", "IMAGE_OVERHEAD_FACTOR"])
if not image_overhead_factor:
image_overhead_factor = 1
else:
try:
image_overhead_factor = float(image_overhead_factor)
except:
image_overhead_factor = 1
params['image_overhead_factor'] = image_overhead_factor
params["incompat_license"] = self._remove_redundant(self.runCommand(["getVariable", "INCOMPATIBLE_LICENSE"]) or "")
params["sdk_machine"] = self.runCommand(["getVariable", "SDKMACHINE"]) or self.runCommand(["getVariable", "SDK_ARCH"]) or ""
params["image_fstypes"] = self._remove_redundant(self.runCommand(["getVariable", "IMAGE_FSTYPES"]) or "")
params["image_types"] = self._remove_redundant(self.runCommand(["getVariable", "IMAGE_TYPES"]) or "")
params["conf_version"] = self.runCommand(["getVariable", "CONF_VERSION"]) or ""
params["lconf_version"] = self.runCommand(["getVariable", "LCONF_VERSION"]) or ""
params["runnable_image_types"] = self._remove_redundant(self.runCommand(["getVariable", "RUNNABLE_IMAGE_TYPES"]) or "")
params["runnable_machine_patterns"] = self._remove_redundant(self.runCommand(["getVariable", "RUNNABLE_MACHINE_PATTERNS"]) or "")
params["deployable_image_types"] = self._remove_redundant(self.runCommand(["getVariable", "DEPLOYABLE_IMAGE_TYPES"]) or "")
params["kernel_image_type"] = self.runCommand(["getVariable", "KERNEL_IMAGETYPE"]) or ""
params["tmpdir"] = self.runCommand(["getVariable", "TMPDIR"]) or ""
params["distro_version"] = self.runCommand(["getVariable", "DISTRO_VERSION"]) or ""
params["target_os"] = self.runCommand(["getVariable", "TARGET_OS"]) or ""
params["target_arch"] = self.runCommand(["getVariable", "TARGET_ARCH"]) or ""
params["tune_pkgarch"] = self.runCommand(["getVariable", "TUNE_PKGARCH"]) or ""
params["bb_version"] = self.runCommand(["getVariable", "BB_MIN_VERSION"]) or ""
params["default_task"] = self.runCommand(["getVariable", "BB_DEFAULT_TASK"]) or "build"
params["socks_proxy"] = self.runCommand(["getVariable", "all_proxy"]) or ""
params["http_proxy"] = self.runCommand(["getVariable", "http_proxy"]) or ""
params["ftp_proxy"] = self.runCommand(["getVariable", "ftp_proxy"]) or ""
params["https_proxy"] = self.runCommand(["getVariable", "https_proxy"]) or ""
params["cvs_proxy_host"] = self.runCommand(["getVariable", "CVS_PROXY_HOST"]) or ""
params["cvs_proxy_port"] = self.runCommand(["getVariable", "CVS_PROXY_PORT"]) or ""
params["image_white_pattern"] = self.runCommand(["getVariable", "BBUI_IMAGE_WHITE_PATTERN"]) or ""
params["image_black_pattern"] = self.runCommand(["getVariable", "BBUI_IMAGE_BLACK_PATTERN"]) or ""
return params
| |
# ccbb libraries
import ccbb_pyutils.pandas_utils as ns_pandas
# project-specific libraries
import dual_crispr.construct_file_extracter as ns_extracter
import dual_crispr.count_files_and_dataframes as ns_count
__author__ = "Amanda Birmingham"
__maintainer__ = "Amanda Birmingham"
__email__ = "abirmingham@ucsd.edu"
__status__ = "development"
def get_prepped_file_suffix():
return "timepoint_counts.txt"
def get_sample_name_header():
return "sampleName"
def get_abundance_thresh_header():
return "log2CountsThresh"
def get_abundance_thresh_file_suffix():
return "abundance_thresholds.txt"
def read_timepoint_from_standardized_count_header(count_header, time_prefixes_list):
count_header_pieces = count_header.split(ns_extracter.get_header_divider())
if len(count_header_pieces) != _get_num_header_pieces() + 1:
# +1 because we expect a standardized header, which has the experiment id added to it
raise ValueError("Column header '{0}' splits into an unexpected number of pieces ({1})".format(
count_header, len(count_header_pieces)
))
timepoint_str = count_header_pieces[_get_timepoint_index()]
return _validate_and_standardize_timepoint(timepoint_str, time_prefixes_list)
def merge_and_annotate_counts(count_file_fps, constructs_fp, dataset_name, time_prefixes_list,
disregard_order=True):
construct_id_header = ns_extracter.get_construct_header()
# load and merge the counts file(s)
combined_counts_df = ns_pandas.merge_files_by_shared_header(count_file_fps, construct_id_header)
# validate, standardize, and rename the count column headers
orig_count_headers = _get_orig_count_headers(combined_counts_df)
standardized_count_headers = _validate_and_standardize_count_headers(orig_count_headers,
dataset_name, time_prefixes_list)
rename_dictionary = dict(zip(orig_count_headers, standardized_count_headers))
combined_counts_df.rename(columns=rename_dictionary, inplace=True)
# load and standardize the annotation file (containing construct definitions)
annotation_df = ns_extracter.load_annotation_df(constructs_fp, disregard_order)
minimal_annotation_df = _generate_scoring_friendly_annotation(annotation_df)
# join counts to annotation and sort into required order
joined_df = minimal_annotation_df.merge(combined_counts_df, on=construct_id_header)
sorted_col_headers = list(minimal_annotation_df.columns.values)
sorted_count_headers = _sort_headers(standardized_count_headers, dataset_name, time_prefixes_list)
sorted_col_headers.extend(sorted_count_headers)
return joined_df.loc[:, sorted_col_headers]
def _get_timepoint_index():
return -2
def _get_num_header_pieces():
return 2
def _get_preferred_timept_prefix(time_prefixes_list):
return time_prefixes_list[0]
def _get_orig_count_headers(combined_counts_df):
orig_count_headers = list(combined_counts_df.columns.values)
orig_count_headers.remove(ns_extracter.get_construct_header())
return orig_count_headers
def _validate_and_standardize_count_headers(orig_count_headers, expt_id, time_prefixes_list):
error_msgs = []
expt_structure_by_id = {}
orig_header_by_standardized = {}
result = []
for curr_count_header in orig_count_headers:
# Required count header format: experiment_timept_rep
try:
valid_id, timept, replicate = _validate_and_standardize_header_pieces(curr_count_header, expt_id,
time_prefixes_list)
standardized_header = _recompose_count_header(valid_id, timept, replicate, time_prefixes_list)
if standardized_header in orig_header_by_standardized:
raise ValueError("The following pair of column headers both appear to represent the same timepoint "
"and replicate: '{0}', '{1}'. Please modify the inputs to remove this "
"ambiguity.".format(curr_count_header, orig_header_by_standardized[
standardized_header]))
except ValueError as ex:
error_msgs.append(str(ex))
continue
orig_header_by_standardized[standardized_header] = curr_count_header
result.append(standardized_header)
# fill out structure {some_id: {timept: {set of replicates}}} for use in
# validation after all columns are examined
if valid_id not in expt_structure_by_id: expt_structure_by_id[valid_id] = {}
curr_expt_structure = expt_structure_by_id[valid_id]
if timept not in curr_expt_structure: curr_expt_structure[timept] = set()
curr_timept_replicates = curr_expt_structure[timept]
curr_timept_replicates.add(replicate)
if len(error_msgs) > 0:
raise ValueError(
'The following error(s) were detected during count file parsing:\n{0}'.format("\n".join(error_msgs)))
_validate_expt_structure(expt_structure_by_id)
return result
# this method is broken out from _validate_and_standardize_count_headers just to make unit testing easier
def _validate_and_standardize_header_pieces(curr_count_header, expt_id, time_prefixes_list):
timept, replicate = _validate_and_standardize_timept_and_replicate(
curr_count_header, time_prefixes_list)
valid_id = _validate_and_standardize_expt_id(expt_id)
return valid_id, timept, replicate
def _validate_and_standardize_timept_and_replicate(count_header, time_prefixes_list):
# Required count header format: experiment_timept_rep
trimmed_count_header = ns_count.clip_count_header_suffix(count_header)
count_header_pieces = trimmed_count_header.split(ns_extracter.get_header_divider())
num_expected_pieces = _get_num_header_pieces()
if len(count_header_pieces) < num_expected_pieces:
raise ValueError("Column header '{0}' separates on the '{1}' delimiter into the following {2} piece(s) instead of the expected {3}: "
"{3}.".format(count_header, ns_extracter.get_header_divider(), len(count_header_pieces),
num_expected_pieces, count_header_pieces))
timept = _validate_and_standardize_timepoint(count_header_pieces[_get_timepoint_index()],
time_prefixes_list)
replicate = _validate_and_standardize_replicate(count_header_pieces[-1])
return timept, replicate
def _validate_and_standardize_timepoint(timept, time_prefixes_list):
if isinstance(timept, str):
# ensure timepoint is "t" or "T" plus a non-negative integer number
expected_timepoint_prefixes = [x.upper() for x in time_prefixes_list]
timepoint_prefix = timept[:1]
if timepoint_prefix.upper() not in expected_timepoint_prefixes:
raise ValueError("Time point '{0}' does not start with upper or lower case versions of any of the "
"expected prefixes {1}.".format(
timept, ", ".join(time_prefixes_list)))
timept = timept[1:]
else:
timept = str(timept)
if not timept.isdigit():
raise ValueError("Time point value '{0}' is not recognizable as a positive integer.".format(
timept))
return int(timept)
def _validate_and_standardize_replicate(rep):
if not isinstance(rep, int):
rep = int(rep) if rep.isdigit() else rep
return rep
def _validate_and_standardize_expt_id(expt_id):
# experiment ids are used as a component of the sample names--and sample names are used as
# dataframe *column names* in both pandas (Python) and R. In R, column names must be valid (R) variable
# names, and thus may contain only alphanumerics, periods, and underscores. In pandas, column names
# must be valid Python variable names, which may contain only alphanumerics and underscores. Thus,
# the only *non*-alphanumeric character that is accepted by both is the underscore, and I use that
# to delimit the pieces of the sample name (i.e., exptid_timept_replicatenum). That means expt id
# can't contain any underscores itself--leaving only alphanumerics :(
if not expt_id.isalnum():
raise ValueError("Experiment id '{0}' is not strictly alphanumeric.".format(
expt_id))
return expt_id
def _recompose_count_header(expt_id, timept, replicate, time_prefixes_list):
time_prefix = _get_preferred_timept_prefix(time_prefixes_list)
divider = ns_extracter.get_header_divider()
result = "{}{}{}{}{}{}".format(expt_id, divider, time_prefix,
timept, divider, replicate)
return result
def _validate_expt_structure(expt_structure_by_id):
# expt_structure_by_id should have format {some_id: {timept: {set of replicates}}}
# There must be only one experiment represented in the data structure
# All timepoints in the experiment must have the exact same set of replicates:
# e.g., can't have sample1_T1_1; sample1_T2_1, sample1_T2_2
if len(expt_structure_by_id) != 1:
raise ValueError(("Count headers must describe one and only one experiment, "
"but {0} were detected: '{1}'.").format(len(expt_structure_by_id),
sorted(list(expt_structure_by_id.keys()))))
for curr_expt_id, curr_expt_structure in expt_structure_by_id.items():
# ensure all timepoints for current sample have the same number of replicates
is_first_timept = True
reference_reps_set = None
if len(curr_expt_structure) == 0:
raise ValueError("Count headers must describe at least one timepoint for experiment, "
"but 0 were detected.")
for curr_timept, curr_rep_set in curr_expt_structure.items():
if len(curr_rep_set) == 0:
raise ValueError(("Count headers must describe at least one replicate for each timepoint, "
"but 0 were detected for timepoint '{0}'.").format(curr_timept))
if is_first_timept:
reference_reps_set = curr_rep_set
is_first_timept = False
else:
if curr_rep_set != reference_reps_set:
raise ValueError("For sample '{0}', timepoint {1} has "
"replicates '{2}' instead of the expected '{3}'".format(
curr_expt_id, curr_timept, sorted(curr_rep_set),
sorted(reference_reps_set)))
def _generate_scoring_friendly_annotation(annotation_df):
construct_id_header = ns_extracter.get_construct_header()
result = annotation_df.loc[:, [construct_id_header,
ns_extracter.get_probe_id_header("a"),
ns_extracter.get_probe_id_header("b"),
ns_extracter.get_target_id_header("a"),
ns_extracter.get_target_id_header("b")
]]
# Below is what I expect the output to be after scoring data prep code is refactored to accept more
# detail (and generate less itself).
# result = annotation_df.loc[:, (construct_id_header, ns_extracter.get_target_id_header("a"),
# ns_extracter.get_probe_id_header("a"),
# ns_extracter.get_target_id_header("b"),
# ns_extracter.get_probe_id_header("b"))]
# target_pair_id_header = ns_extracter.get_target_pair_id_header()
# probe_pair_id_header = ns_extracter.get_probe_pair_id_header()
# Note: the below column creations could be done without using apply (i.e., by
# just writing "df[colA] + divider + df[colB]") but I used apply because I want
# to centralize the code that creates these strings, and sometimes it needs to work
# on a single pair of variables rather than columns of variables, so it needed to be
# a non-vectorized function.
# result[target_pair_id_header] = result.apply(_compose_target_pair_id, axis=1)
# result[probe_pair_id_header] = result.apply(_compose_probe_pair_id, axis=1)
return result
# def _compose_probe_pair_id(row):
# return ns_extracter.compose_probe_pair_id_from_probe_ids(row[ns_extracter.get_probe_id_header("a")],
# row[ns_extracter.get_probe_id_header("b")])
#
#
# def _compose_target_pair_id(row):
# return ns_extracter.compose_target_pair_id_from_target_ids(row[ns_extracter.get_target_id_header("a")],
# row[ns_extracter.get_target_id_header("b")])
def _sort_headers(headers_list, expt_id, time_prefixes_list):
# headers need to be sorted first by timepoint *as a number* and then by replicate *as a number*
# the expt id should be irrelevant to the sorting as it should be the same for all headers
header_tuples_list = [_validate_and_standardize_header_pieces(x, expt_id,time_prefixes_list) for x in headers_list]
sorted_header_tuples_list = sorted(header_tuples_list)
result = [_recompose_count_header(*x, time_prefixes_list=time_prefixes_list) for x in sorted_header_tuples_list]
return result
| |
#!/usr/bin/python
import pygame
import pygame.gfxdraw
from . import (
coord,
dungeon,
grid,
level,
monster,
timer,
tower,
tower_selector,
)
from . gui import (
button,
)
class Player(pygame.sprite.Sprite):
def __init__(self):
self.rect = pygame.Rect(0, 0, 5, 5)
self.health = 100
self.xp = 0
class GameState:
fps = 30
def __init__(self):
self.time = timer.Timer()
self.clock = pygame.time.Clock()
self.bullets = pygame.sprite.Group()
self.monsters = pygame.sprite.Group()
self.towers = pygame.sprite.Group()
self.clickables = pygame.sprite.Group()
self.grid = grid.Grid(self)
self.running = True
self.paused = False
self.placing_group = pygame.sprite.Group()
self.placing_tower = None
self.player = Player()
lvl = level.get_level(self.player.xp)
self.ts = tower_selector.TowerSelector(lvl.towers)
self.selected = None
# Initial money (will get 10 more when loading the first level below).
self.money = 20
# Load dungeon when grid gets set.
self.dungeon = None
self.dungeon_level = 1
self._load_dungeon()
@property
def rect(self):
r = self.image.get_rect()
r.x = self.coord.x
r.y = self.coord.y
return r
def status_message(self):
"""Returns strings containing user-facing game status."""
msgs = []
msgs.append(self.dungeon.status_message())
msgs.append("${}, Health: {}, XP: {}".format(
self.money, self.player.health, self.player.xp))
return msgs
def _load_dungeon(self):
if self.dungeon_level < 0: # FIXME: handle game-over
return
self.placing_tower = None
self.selected = None
self.grid = grid.Grid(self)
self.bullets.empty()
self.monsters.empty()
self.towers.empty()
self.clickables.empty()
self.placing_group.empty()
filename = 'levels/{}.dungeon'.format(self.dungeon_level)
try:
self.dungeon = dungeon.Dungeon.load(self.grid, filename)
except IOError:
print("GAME OVER")
self.dungeon_level = -1 # FIXME: handle game-over
return
for wall in self.dungeon.walls:
ok = self.grid.add_obstacle(wall)
assert ok
self.towers.add(wall)
self.clickables.add(wall)
# Give player at least 20 + (10*dlevel)
self.money = max(self.money, 20)
self.money += self.dungeon_level * 10
self.dungeon_level += 1
def _reload_dungeon(self):
# XXX - this is a hack, because _load_dungeon() increments the
# level.
self.money = 20
self.dungeon_level -= 1
self._load_dungeon()
@property
def seconds(self):
return self.time.ticks / 1000.0
def active(self):
return self.running
def handle_events(self):
for event in pygame.event.get():
if button.Button.handle_event(event):
continue
if event.type == pygame.QUIT:
self.running = False
elif event.type == pygame.KEYDOWN:
if event.key in [pygame.K_q, pygame.K_ESCAPE]:
self.running = False
elif event.key in [pygame.K_m]:
m = self.spawn_monster(monster.Orc)
m.update_path()
elif event.key in [pygame.K_p, pygame.K_SPACE]:
if self.paused:
self.paused = False
self.time.resume()
else:
self.paused = True
self.time.pause()
elif event.key == pygame.K_d:
import pdb
pdb.set_trace()
elif event.key == pygame.K_r:
self._reload_dungeon()
elif event.key in [pygame.K_t]:
if self.placing_tower is None:
self.selected = None
if self.ts.selected_tower is not None:
self.placing_tower = self.ts.selected_tower()
pos = pygame.mouse.get_pos()
cc = coord.Coord(pos[0], pos[1])
aligned = self.grid.client_coord_aligned(cc)
self.placing_tower.rect.x = aligned.x
self.placing_tower.rect.y = aligned.y
self.placing_group.add(self.placing_tower)
else:
self.placing_tower.kill()
self.placing_tower = None
elif event.key in [pygame.K_u]:
if (self.selected is None or
not isinstance(self.selected,
tower.Tower)):
return
t = self.selected
uc = t.upgrade_cost()
print("Upgrade tower {} for {}".format(
t.level, uc))
if uc is None:
print("No upgrade available")
return
if uc > self.money:
print("Not enough money")
return
self.money -= uc
t.upgrade()
elif event.key in [pygame.K_w]:
if self.placing_tower is None:
self.selected = None
self.placing_tower = tower.Wall()
pos = pygame.mouse.get_pos()
cc = coord.Coord(pos[0], pos[1])
aligned = self.grid.client_coord_aligned(cc)
self.placing_tower.rect.x = aligned.x
self.placing_tower.rect.y = aligned.y
self.placing_group.add(self.placing_tower)
else:
self.placing_tower.kill()
self.placing_tower = None
elif event.type == pygame.MOUSEBUTTONDOWN:
if self.placing_tower is not None:
self.add_tower(self.placing_tower)
self.placing_group.empty()
self.placing_tower = None
else:
self.player.rect.x = event.pos[0]
self.player.rect.y = event.pos[1]
clicked = pygame.sprite.spritecollide(
self.player, self.clickables, False)
if clicked:
self.selected = clicked[0]
elif event.type == pygame.MOUSEMOTION:
if self.placing_tower is not None:
cc = coord.Coord(event.pos[0], event.pos[1])
aligned = self.grid.client_coord_aligned(cc)
self.placing_tower.rect.x = aligned.x
self.placing_tower.rect.y = aligned.y
def update(self):
if self.paused:
# XXX - still want to be able to place towers while paused.
return
# Limit FPS
dt = self.clock.tick(self.fps)
if dt > 100:
# Don't jump forward too much.
return
if not self.dungeon.active and len(self.monsters.sprites()) == 0:
self._load_dungeon()
return
self.bullets.update(dt)
self.monsters.update(dt)
self.towers.update(self)
# XXX
class Base(pygame.sprite.Sprite):
def __init__(self, c):
super().__init__()
self.rect = pygame.Rect(c.x, c.y, 32, 32)
base = Base(self.grid.grid_coord_to_client(self.dungeon.base))
# Check for monsters at base.
smashers = pygame.sprite.spritecollide(
base, self.monsters, False)
for m in smashers:
self.player.health -= m.damage
m.kill()
if self.player.health <= 0:
print("YOU ARE DEAD! GAME OVER")
self.paused = True
return
# Injure monsters with bullets.
hits = pygame.sprite.groupcollide(self.bullets,
self.monsters,
True, False)
for b, ms in hits.items():
for m in ms:
# Note: bullet only one hits one monster, so break.
m.injure(b.damage)
if not m.alive():
self.money += m.money
self.player.xp += m.xp
lvl = level.get_level(self.player.xp)
self.ts.set_available(lvl.towers)
break
loaded = [t for t in self.towers.sprites() if t.loaded]
# Fire the towers.
if (self.selected is not None and
isinstance(self.selected, monster.Monster)):
selected_monster = self.selected
else:
selected_monster = None
in_range = pygame.sprite.groupcollide(
loaded, self.monsters, False, False,
collided=pygame.sprite.collide_circle)
for t, ms in in_range.items():
if selected_monster in ms:
# Force firing at the selected monster.
ms = [selected_monster]
b = t.fire(ms, self.seconds)
if b:
self.add_bullet(b)
self.dungeon.update(self)
def add_bullet(self, b):
self.bullets.add(b)
def add_monster(self, m):
self.monsters.add(m)
self.clickables.add(m)
def add_tower(self, t):
if self.money < t.cost:
print("Not enough money.")
return
if self.grid is not None:
if self.grid.add_obstacle(t):
self.money -= t.cost
self.towers.add(t)
self.clickables.add(t)
def spawn_monster(self, cls):
m = cls(self.dungeon.spawn_origin, self.dungeon.base, self.grid)
self.add_monster(m)
return m
def draw_tower_radius(self, screen, t):
if t.radius > 0:
RED = (200, 0, 0)
center = t.center
pygame.gfxdraw.circle(screen,
int(center.x), int(center.y),
int(t.radius), RED)
def draw(self, screen):
BLACK = (0, 0, 0)
font = pygame.font.Font(None, 18)
# Draw wave status.
text_x = grid.GRID_WIDTH + 20 # XXX
text_y = 20
for msg in self.status_message():
text = font.render(msg, True, BLACK)
screen.blit(text, [text_x, text_y])
text_y += 20
self.grid.draw(screen)
self.monsters.draw(screen)
self.towers.draw(screen)
self.bullets.draw(screen)
self.placing_group.draw(screen)
if self.placing_tower is not None:
self.draw_tower_radius(screen, self.placing_tower)
if self.selected is not None:
text = font.render(self.selected.status_message(self),
True, BLACK)
text_y = 100
screen.blit(text, [text_x, text_y])
if isinstance(self.selected, tower.Tower):
self.draw_tower_radius(screen, self.selected)
self.ts.draw(screen)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ib-downloader4.py
Created on Tue Jul 5 15:53:45 2016
@author: nwillemse
"""
import time
import argparse
import pandas as pd
from datetime import datetime
from ib.ext.Contract import Contract
from ib.opt import Connection
class Downloader:
def __init__(
self, tickers, exchange, ticker_type, expiry, barsize,
start_date, end_date, ib_client_id, ib_port
):
self.tickers = tickers
self.exchange = exchange
self.ticker_type = ticker_type
self.expiry = expiry
self.barsize = barsize
self.client_id = ib_client_id
self.order_id = 1
self.port = ib_port
self.currency = 'USD'
self.tws_conn = None
self.curr_ohlc = pd.DataFrame(
columns=['open', 'high', 'low', 'close', 'volume', 'open_interest']
)
self.no_data_error = False
self.got_hist_data = False
self.dates_list = self._get_trade_dates(start_date, end_date)
self.what_to_show = 'MIDPOINT' if ticker_type=='CASH' else 'TRADES'
self.end_date = end_date
def _get_trade_dates(self, start_dt=None, end_dt=None):
if self.ticker_type in ['CASH', 'FUT']:
dates = pd.date_range(start_dt, end_dt).tolist()
res = sorted(map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), dates), reverse=True)
# res.sort(reverse=True)
print(res)
else:
fn = 'nyse_dates.txt'
print("Loading trading days from %s..." % fn)
a = pd.read_csv(fn, parse_dates=['trade_date'])
sub = a[a.trade_date >= start_dt].trade_date
sub = sub[sub <= end_dt]
sub.sort_values(ascending=False, inplace=True)
res = sub.apply(lambda x: x.strftime('%Y-%m-%d')).values.tolist()
print("Loaded %s days from %s to %s" % (len(res), res[-1], res[0]))
#print(res)
return res
def error_handler(self, msg):
if msg.typeName == "error": # and msg.id != -1:
print("Server Error:", msg)
if msg.errorCode == 162:
self.no_data_error = True
def server_handler(self, msg):
if msg.typeName == "nextValidId":
self.order_id = msg.orderId
elif msg.typeName == "managedAccounts":
self.account_code = msg.accountsList
print(self.account_code)
elif msg.typeName == "historicalData":
self.historical_data_event(msg)
elif msg.typeName == "error" and msg.id != -1:
return
# else:
# print msg.typeName, msg
def create_contract(self, symbol, sec_type, exch, curr, expiry):
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = sec_type
contract.m_exchange = exch
contract.m_currency = curr
contract.m_expiry = expiry
if sec_type=='FUT':
contract.m_includeExpired = 1
print("symbol:%s secType:%s exchange:%s currency:%s expiry:%s" % (
contract.m_symbol, contract.m_secType, contract.m_exchange,
contract.m_currency, contract.m_expiry
)
)
return contract
def historical_data_event(self, msg):
if msg.date.find('finished') == -1:
try:
date = datetime.strptime(msg.date, '%Y%m%d %H:%M:%S')
except Exception:
date = datetime.strptime(msg.date, '%Y%m%d')
self.curr_ohlc.loc[date] = msg.open, msg.high, msg.low, msg.close, \
msg.volume, msg.count
else:
self.got_hist_data = True
def connect_to_tws(self):
self.tws_conn = Connection.create(host='localhost',
port=self.port,
clientId=self.client_id)
self.tws_conn.connect()
time.sleep(2)
if not self.tws_conn.isConnected():
raise Exception("Unable to connect to TWS. Make sure the Gateway or TWS has been started. Port=%s ClientId=%s" % (self.port, self.client_id))
def disconnect_from_tws(self):
if self.tws_conn is not None:
self.tws_conn.disconnect()
def register_callback_functions(self):
print("Registering callback functions...")
# Assign server messages handling function.
self.tws_conn.registerAll(self.server_handler)
# Assign error handling function.
self.tws_conn.register(self.error_handler, 'Error')
def request_historical_data(self, symbol_id, symbol):
contract = self.create_contract(symbol,
self.ticker_type,
self.exchange,
self.currency,
self.expiry)
self.got_hist_data = False
self.no_data_error = False
end_dt = self.end_date.strftime('%Y%m%d %H:%M:%S')
print("Requesting history for %s on %s..." % (symbol, self.end_date))
self.tws_conn.reqHistoricalData(symbol_id,
contract,
endDateTime=end_dt,
durationStr='250 D',
barSizeSetting=self.barsize,
whatToShow=self.what_to_show,
useRTH=0,
formatDate=1)
while not self.got_hist_data and not self.no_data_error:
time.sleep(1)
if self.no_data_error:
self.no_data_error = False
print("no data found for this day, continuing...")
return
time.sleep(8)
def start(self):
try:
print("Connecing to tws...")
self.connect_to_tws()
self.register_callback_functions()
for ticker in self.tickers:
print("Request historical data for %s" % ticker)
self.request_historical_data(1, ticker)
self.curr_ohlc.sort_index(ascending=False, inplace=True)
self.curr_ohlc.index.name = 'datetime'
if self.ticker_type=='CASH':
filename = ticker + '.' + self.currency + '.csv'
else:
filename = ticker + '.csv'
self.curr_ohlc.to_csv('data/' + filename)
except Exception:
print("Error:")
finally:
print("disconnected")
self.disconnect_from_tws()
if __name__ == '__main__':
def valid_date(s):
try:
return datetime.strptime(s, "%Y-%m-%d")
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
parser = argparse.ArgumentParser(description='Script to download data from Interactive Brokers')
parser.add_argument('-t', required=True, dest='tickers',
action='append',
default=[],
help='Ticker to download data'
)
parser.add_argument('-T', dest='exchange', default='SMART',
choices=['SMART', 'GLOBEX'],
help='Exhcange of the ticker'
)
parser.add_argument('-x', dest='ticker_type', default='STK',
choices=['STK', 'FUT', 'OPT', 'IDX'],
help='The type of the asset'
)
parser.add_argument('-e', dest='expiry',
help='The expiry for future contract'
)
parser.add_argument('-b', dest='barsize', default='D',
choices=['D', '30 mins', '60 mins'],
help='The bar size to be download'
)
parser.add_argument('-s', required=True, dest='start_date',
help='Starting date for loading data (YYYY-MM-DD)',
type=valid_date
)
parser.add_argument('-d', required=True, dest='end_date',
help='Ending date for loading data (YYYY-MM-DD)',
type=valid_date
)
parser.add_argument('-c', dest='ib_client_id', default='200',
help='The IB client id'
)
parser.add_argument('-p', dest='ib_port', default='4001',
help='The IB client port to connect to'
)
parser.add_argument('-v', action='version', version='%(prog)s 1.0')
args = parser.parse_args()
print(vars(args))
system = Downloader(**vars(args))
system.start()
| |
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import ctypes
import math
import numpy
from pyscf import lib
libfci = lib.load_library('libfci')
def make_strings(orb_list, nelec):
'''Generate string from the given orbital list.
Returns:
list of int64. One int64 element represents one string in binary format.
The binary format takes the convention that the one bit stands for one
orbital, bit-1 means occupied and bit-0 means unoccupied. The lowest
(right-most) bit corresponds to the lowest orbital in the orb_list.
Exampels:
>>> [bin(x) for x in make_strings((0,1,2,3),2)]
[0b11, 0b101, 0b110, 0b1001, 0b1010, 0b1100]
>>> [bin(x) for x in make_strings((3,1,0,2),2)]
[0b1010, 0b1001, 0b11, 0b1100, 0b110, 0b101]
'''
orb_list = list(orb_list)
if len(orb_list) > 63:
return _gen_occslst(orb_list, nelec)
assert(nelec >= 0)
if nelec == 0:
return numpy.asarray([0], dtype=numpy.int64)
elif nelec > len(orb_list):
return numpy.asarray([], dtype=numpy.int64)
def gen_str_iter(orb_list, nelec):
if nelec == 1:
res = [(1 << i) for i in orb_list]
elif nelec >= len(orb_list):
n = 0
for i in orb_list:
n = n | (1 << i)
res = [n]
else:
restorb = orb_list[:-1]
thisorb = 1 << orb_list[-1]
res = gen_str_iter(restorb, nelec)
for n in gen_str_iter(restorb, nelec-1):
res.append(n | thisorb)
return res
strings = gen_str_iter(orb_list, nelec)
assert(strings.__len__() == num_strings(len(orb_list),nelec))
return numpy.asarray(strings, dtype=numpy.int64)
gen_strings4orblist = make_strings
def _gen_occslst(orb_list, nelec):
'''Generate occupied orbital list for each string.
'''
orb_list = list(orb_list)
assert(nelec >= 0)
if nelec == 0:
return numpy.zeros((1,nelec), dtype=numpy.int32)
elif nelec > len(orb_list):
return numpy.zeros((0,nelec), dtype=numpy.int32)
def gen_occs_iter(orb_list, nelec):
if nelec == 1:
res = [[i] for i in orb_list]
elif nelec >= len(orb_list):
res = [orb_list]
else:
restorb = orb_list[:-1]
thisorb = orb_list[-1]
res = gen_occs_iter(restorb, nelec)
for n in gen_occs_iter(restorb, nelec-1):
res.append(n + [thisorb])
return res
occslst = gen_occs_iter(orb_list, nelec)
return numpy.asarray(occslst, dtype=numpy.int32).view(OIndexList)
def _strs2occslst(strs, norb):
na = len(strs)
one_particle_strs = numpy.asarray([1 << i for i in range(norb)])
occ_masks = (strs.reshape(-1,1) & one_particle_strs) != 0
occslst = numpy.where(occ_masks)[1].reshape(na,-1)
return numpy.asarray(occslst, dtype=numpy.int32).view(OIndexList)
def _occslst2strs(occslst):
na, nelec = occslst.shape
strs = numpy.zeros(na, dtype=numpy.int64)
for i in range(nelec):
strs ^= 1 << occslst[:,i]
return strs
class OIndexList(numpy.ndarray):
pass
def num_strings(n, m):
if m < 0 or m > n:
return 0
else:
return math.factorial(n) // (math.factorial(n-m)*math.factorial(m))
def gen_linkstr_index_o0(orb_list, nelec, strs=None):
if strs is None:
strs = make_strings(orb_list, nelec)
strdic = dict(zip(strs,range(strs.__len__())))
def propgate1e(str0):
occ = []
vir = []
for i in orb_list:
if str0 & (1 << i):
occ.append(i)
else:
vir.append(i)
linktab = []
for i in occ:
linktab.append((i, i, strdic[str0], 1))
for i in occ:
for a in vir:
str1 = str0 ^ (1 << i) | (1 << a)
# [cre, des, target_address, parity]
linktab.append((a, i, strdic[str1], cre_des_sign(a, i, str0)))
return linktab
t = [propgate1e(s) for s in strs.astype(numpy.int64)]
return numpy.array(t, dtype=numpy.int32)
def gen_linkstr_index_o1(orb_list, nelec, strs=None, tril=False):
if nelec == 0:
return numpy.zeros((0,0,4), dtype=numpy.int32)
if strs is None:
strs = _gen_occslst(orb_list, nelec)
occslst = strs
orb_list = numpy.asarray(orb_list)
norb = len(orb_list)
assert(numpy.all(numpy.arange(norb) == orb_list))
strdic = dict((tuple(s), i) for i,s in enumerate(occslst))
nvir = norb - nelec
def propgate1e(str0):
addr0 = strdic[tuple(str0)]
tab = numpy.empty((nelec,4), dtype=numpy.int32)
tab[:,0] = tab[:,1] = str0
tab[:,2] = addr0
tab[:,3] = 1
linktab = [tab]
virmask = numpy.ones(norb, dtype=bool)
virmask[str0] = False
vir = orb_list[virmask]
str0 = numpy.asarray(str0)
# where to put vir-orb, ie how many occ-orb in the left
where_vir = numpy.sum(str0.reshape(-1,1) < vir, axis=0)
parity_occ_orb = 1 # parity for annihilating occupied orbital
for n,i in enumerate(str0): # loop over all occupied orbitals
# o,v which index is bigger, to determine whether to annihilate occ-orb first
reorder_to_ov = vir > i
str1s = numpy.empty((nvir,nelec), dtype=int)
str1s[:] = str0
str1s[:,n] = vir
str1s.sort(axis=1)
addr = [strdic[tuple(s)] for s in str1s]
parity = (where_vir + reorder_to_ov + 1) % 2 #? +1 so that even parity has +1, odd parity = 0
parity[parity == 0] = -1
parity *= parity_occ_orb
tab = numpy.empty((nvir,4), dtype=numpy.int32)
tab[:,0] = vir
tab[:,1] = i
tab[:,2] = addr
tab[:,3] = parity
linktab.append(tab)
parity_occ_orb *= -1
return numpy.vstack(linktab)
lidx = [propgate1e(s) for s in occslst]
lidx = numpy.asarray(lidx, dtype=numpy.int32)
if tril:
lidx = reform_linkstr_index(lidx)
return lidx
# return [cre, des, target_address, parity]
def gen_linkstr_index(orb_list, nocc, strs=None, tril=False):
'''Look up table, for the strings relationship in terms of a
creation-annihilating operator pair.
For given string str0, index[str0] is (nocc+nocc*nvir) x 4 array.
The first nocc rows [i(:occ),i(:occ),str0,sign] are occupied-occupied
excitations, which do not change the string. The next nocc*nvir rows
[a(:vir),i(:occ),str1,sign] are occupied-virtual exciations, starting from
str0, annihilating i, creating a, to get str1.
'''
if strs is None:
strs = make_strings(orb_list, nocc)
if isinstance(strs, OIndexList):
return gen_linkstr_index_o1(orb_list, nocc, strs, tril)
strs = numpy.array(strs, dtype=numpy.int64)
assert(all(strs[:-1] < strs[1:]))
norb = len(orb_list)
nvir = norb - nocc
na = strs.shape[0]
link_index = numpy.empty((na,nocc*nvir+nocc,4), dtype=numpy.int32)
libfci.FCIlinkstr_index(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(na),
ctypes.c_int(nocc),
strs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(tril))
return link_index
def reform_linkstr_index(link_index):
'''Compress the (a, i) pair index in linkstr_index to a lower triangular
index. The compressed indices can match the 4-fold symmetry of integrals.
'''
#for k, tab in enumerate(link_index):
# for j, (a, i, str1, sign) in enumerate(tab):
# if a > i:
# ai = a*(a+1)//2+i
# else:
# ai = i*(i+1)//2+a
# link_new[k,j] = (ai,0,str1,sign)
link_new = link_index.copy()
a = link_index[:,:,0]
i = link_index[:,:,1]
link_new[:,:,0] = numpy.maximum(a*(a+1)//2+i, i*(i+1)//2+a)
link_new[:,:,1] = 0
return link_new
def gen_linkstr_index_trilidx(orb_list, nocc, strs=None):
r'''Generate linkstr_index with the assumption that :math:`p^+ q|0\rangle`
where :math:`p > q`.
So the resultant link_index has the structure ``[pq, *, str1, sign]``.
It is identical to a call to ``reform_linkstr_index(gen_linkstr_index(...))``.
'''
return gen_linkstr_index(orb_list, nocc, strs, True)
# return [cre, des, target_address, parity]
def gen_cre_str_index_o0(orb_list, nelec):
'''Slow version of gen_cre_str_index function'''
cre_strs = make_strings(orb_list, nelec+1)
if isinstance(cre_strs, OIndexList):
raise NotImplementedError('System with 64 orbitals or more')
credic = dict(zip(cre_strs,range(cre_strs.__len__())))
def progate1e(str0):
linktab = []
for i in orb_list:
if not str0 & (1 << i):
str1 = str0 | (1 << i)
linktab.append((i, 0, credic[str1], cre_sign(i, str0)))
return linktab
strs = make_strings(orb_list, nelec)
t = [progate1e(s) for s in strs.astype(numpy.int64)]
return numpy.array(t, dtype=numpy.int32)
def gen_cre_str_index_o1(orb_list, nelec):
'''C implementation of gen_cre_str_index function'''
norb = len(orb_list)
assert(nelec < norb)
strs = make_strings(orb_list, nelec)
if isinstance(strs, OIndexList):
raise NotImplementedError('System with 64 orbitals or more')
strs = numpy.array(strs, dtype=numpy.int64)
na = strs.shape[0]
link_index = numpy.empty((len(strs),norb-nelec,4), dtype=numpy.int32)
libfci.FCIcre_str_index(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(na),
ctypes.c_int(nelec),
strs.ctypes.data_as(ctypes.c_void_p))
return link_index
def gen_cre_str_index(orb_list, nelec):
'''linkstr_index to map between N electron string to N+1 electron string.
It maps the given string to the address of the string which is generated by
the creation operator.
For given string str0, index[str0] is nvir x 4 array. Each entry
[i(cre),--,str1,sign] means starting from str0, creating i, to get str1.
'''
return gen_cre_str_index_o1(orb_list, nelec)
# return [cre, des, target_address, parity]
def gen_des_str_index_o0(orb_list, nelec):
'''Slow version of gen_des_str_index function'''
des_strs = make_strings(orb_list, nelec-1)
if isinstance(des_strs, OIndexList):
raise NotImplementedError('System with 64 orbitals or more')
desdic = dict(zip(des_strs,range(des_strs.__len__())))
def progate1e(str0):
linktab = []
for i in orb_list:
if str0 & (1 << i):
str1 = str0 ^ (1 << i)
linktab.append((0, i, desdic[str1], des_sign(i, str0)))
return linktab
strs = make_strings(orb_list, nelec)
t = [progate1e(s) for s in strs.astype(numpy.int64)]
return numpy.array(t, dtype=numpy.int32)
def gen_des_str_index_o1(orb_list, nelec):
'''C implementation of gen_des_str_index function'''
assert(nelec > 0)
strs = make_strings(orb_list, nelec)
if isinstance(strs, OIndexList):
raise NotImplementedError('System with 64 orbitals or more')
strs = numpy.array(strs, dtype=numpy.int64)
norb = len(orb_list)
na = strs.shape[0]
link_index = numpy.empty((len(strs),nelec,4), dtype=numpy.int32)
libfci.FCIdes_str_index(link_index.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(na),
ctypes.c_int(nelec),
strs.ctypes.data_as(ctypes.c_void_p))
return link_index
def gen_des_str_index(orb_list, nelec):
'''linkstr_index to map between N electron string to N-1 electron string.
It maps the given string to the address of the string which is generated by
the annihilation operator.
For given string str0, index[str0] is nvir x 4 array. Each entry
[--,i(des),str1,sign] means starting from str0, annihilating i, to get str1.
'''
return gen_des_str_index_o1(orb_list, nelec)
# Determine the sign of p^+ q |string0>
def cre_des_sign(p, q, string0):
if p == q:
return 1
else:
if (string0 & (1 << p)) or (not (string0 & (1 << q))):
return 0
elif p > q:
mask = (1 << p) - (1 << (q+1))
else:
mask = (1 << q) - (1 << (p+1))
return (-1) ** bin(string0 & mask).count('1')
# Determine the sign of p^+ |string0>
def cre_sign(p, string0):
if (string0 & (1 << p)):
return 0
else:
return (-1) ** bin(string0 >> (p+1)).count('1')
# Determine the sign of p |string0>
def des_sign(p, string0):
if (not (string0 & (1 << p))):
return 0
else:
return (-1) ** bin(string0 >> (p+1)).count('1')
# Determine the sign of string1 = p^+ q |string0>
def parity(string0, string1):
#sys.stderr.write('Function cistring.parity is deprecated\n')
ss = string1 - string0
def count_bit1(n):
# see Hamming weight problem and K&R C program
return bin(n).count('1')
if ss > 0:
# string1&ss gives the number of 1s between two strings
return (-1) ** (count_bit1(string1 & ss))
elif ss == 0:
return 1
else:
return (-1) ** (count_bit1(string0 & (-ss)))
def addr2str_o0(norb, nelec, addr):
assert(num_strings(norb, nelec) > addr)
if addr == 0 or nelec == norb or nelec == 0:
return (1 << nelec) - 1 # ..0011..11
else:
for i in reversed(range(norb)):
addrcum = num_strings(i, nelec)
if addrcum <= addr:
return (1 << i) | addr2str_o0(i, nelec-1, addr-addrcum)
def addr2str_o1(norb, nelec, addr):
assert(num_strings(norb, nelec) > addr)
if addr == 0 or nelec == norb or nelec == 0:
return (1 << nelec) - 1 # ..0011..11
str1 = 0
nelec_left = nelec
for norb_left in reversed(range(norb)):
addrcum = num_strings(norb_left, nelec_left)
if nelec_left == 0:
break
elif addr == 0:
str1 |= (1 << nelec_left) - 1
break
elif addrcum <= addr:
str1 |= 1 << norb_left
addr -= addrcum
nelec_left -= 1
return str1
def addr2str(norb, nelec, addr):
'''Convert CI determinant address to string'''
return addrs2str(norb, nelec, [addr])[0]
def addrs2str(norb, nelec, addrs):
'''Convert a list of CI determinant address to string'''
#return [addr2str_o1(norb, nelec, addr) for addr in addrs]
addrs = numpy.asarray(addrs, dtype=numpy.int32)
assert(all(num_strings(norb, nelec) > addrs))
count = addrs.size
strs = numpy.empty(count, dtype=numpy.int64)
libfci.FCIaddrs2str(strs.ctypes.data_as(ctypes.c_void_p),
addrs.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(count),
ctypes.c_int(norb), ctypes.c_int(nelec))
return strs
#def str2addr_o0(norb, nelec, string):
# if norb <= nelec or nelec == 0:
# return 0
# elif (1<<(norb-1)) & string: # remove the first bit
# return num_strings(norb-1, nelec) \
# + str2addr_o0(norb-1, nelec-1, string^(1<<(norb-1)))
# else:
# return str2addr_o0(norb-1, nelec, string)
#def str2addr_o1(norb, nelec, string):
# #TODO: assert norb > first-bit-in-string, nelec == num-1-in-string
# addr = 0
# nelec_left = nelec
# for norb_left in reversed(range(norb)):
# if nelec_left == 0 or norb_left < nelec_left:
# break
# elif (1<<norb_left) & string:
# addr += num_strings(norb_left, nelec_left)
# nelec_left -= 1
# return addr
def str2addr(norb, nelec, string):
'''Convert string to CI determinant address'''
if isinstance(string, str):
assert(string.count('1') == nelec)
string = int(string, 2)
else:
assert(bin(string).count('1') == nelec)
libfci.FCIstr2addr.restype = ctypes.c_int
return libfci.FCIstr2addr(ctypes.c_int(norb), ctypes.c_int(nelec),
ctypes.c_ulonglong(string))
def strs2addr(norb, nelec, strings):
'''Convert a list of string to CI determinant address'''
strings = numpy.asarray(strings, dtype=numpy.int64)
count = strings.size
addrs = numpy.empty(count, dtype=numpy.int32)
libfci.FCIstrs2addr(addrs.ctypes.data_as(ctypes.c_void_p),
strings.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(count),
ctypes.c_int(norb), ctypes.c_int(nelec))
return addrs
def sub_addrs(norb, nelec, orbital_indices, sub_nelec=0):
'''The addresses of the determinants which include the specified orbital
indices. The size of the returned addresses is equal to the number of
determinants of (norb, nelec) system.
'''
assert(norb < 63)
if sub_nelec == 0:
strs = make_strings(orbital_indices, nelec)
return strs2addr(norb, nelec, strs)
else:
strs = make_strings(range(norb), nelec)
counts = numpy.zeros(len(strs), dtype=int)
for i in orbital_indices:
counts += (strs & (1 << i)) != 0
sub_strs = strs[counts == sub_nelec]
return strs2addr(norb, nelec, sub_strs)
def tn_strs(norb, nelec, n):
'''Generate strings for Tn amplitudes. Eg n=1 (T1) has nvir*nocc strings,
n=2 (T2) has nvir*(nvir-1)/2 * nocc*(nocc-1)/2 strings.
'''
if nelec < n or norb-nelec < n:
return numpy.zeros(0, dtype=int)
occs_allow = numpy.asarray(make_strings(range(nelec), n)[::-1])
virs_allow = numpy.asarray(make_strings(range(nelec,norb), n))
hf_str = int('1'*nelec, 2)
tns = (hf_str | virs_allow.reshape(-1,1)) ^ occs_allow
return tns.ravel()
if __name__ == '__main__':
print([bin(i) for i in make_strings(range(2,5), 2)])
print(make_strings(range(4), 2))
#print(gen_linkstr_index(range(6), 3))
# index = gen_linkstr_index(range(8), 4)
# idx16 = index[:16]
# print(idx16[:,:,2])
| |
# Copyright (c) 2020 University of Chicago.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import ddt
from blazar import context
from blazar import enforcement
from blazar.enforcement import filters
from blazar import exceptions
from blazar.manager import service
from blazar import tests
from oslo_config import cfg
def get_fake_host(host_id):
return {
'id': host_id,
'hypervisor_hostname': 'hypvsr1',
'service_name': 'compute1',
'vcpus': 4,
'cpu_info': 'foo',
'hypervisor_type': 'xen',
'hypervisor_version': 1,
'memory_mb': 8192,
'local_gb': 10,
}
def get_fake_lease(**kwargs):
fake_lease = {
'id': '1',
'name': 'lease_test',
'start_date': datetime.datetime.utcnow().strftime(
service.LEASE_DATE_FORMAT),
'end_date': (
datetime.datetime.utcnow() + datetime.timedelta(days=1)).strftime(
service.LEASE_DATE_FORMAT),
'user_id': '111',
'project_id': '222',
'reservations': [{'resource_id': '1234',
'resource_type': 'virtual:instance'}],
'events': [],
'before_end_date': '2014-02-01 10:37',
'action': None,
'status': None,
'status_reason': None,
'trust_id': 'exxee111qwwwwe'}
if kwargs:
fake_lease.update(kwargs)
return fake_lease
def get_lease_rsv_allocs():
allocation_candidates = {'virtual:instance': [get_fake_host('1')]}
lease_values = get_fake_lease()
reservations = list(lease_values['reservations'])
del lease_values['reservations']
return lease_values, reservations, allocation_candidates
class FakeFilter(filters.base_filter.BaseFilter):
enforcement_opts = [
cfg.IntOpt('fake_opt', default=1, help='This is a fake config.'),
]
def __init__(self, conf=None):
super(FakeFilter, self).__init__(conf=conf)
def check_create(self, context, lease_values):
pass
def check_update(self, context, current_lease_values, new_lease_values):
pass
def on_end(self, context, lease_values):
pass
@ddt.ddt
class EnforcementTestCase(tests.TestCase):
def setUp(self):
super(EnforcementTestCase, self).setUp()
self.cfg = cfg
self.region = 'RegionOne'
filters.FakeFilter = FakeFilter
filters.all_filters = ['FakeFilter']
self.enforcement = enforcement.UsageEnforcement()
cfg.CONF.set_override(
'enabled_filters', filters.all_filters, group='enforcement')
cfg.CONF.set_override('os_region_name', self.region)
self.enforcement.load_filters()
self.fake_service_catalog = [
dict(
type='identity', endpoints=[
dict(
interface='internal', region=self.region,
url='https://fakeauth.com')
]
)
]
self.ctx = context.BlazarContext(
user_id='111', project_id='222',
service_catalog=self.fake_service_catalog)
self.set_context(self.ctx)
self.fake_host_id = '1'
self.fake_host = {
'id': self.fake_host_id,
'hypervisor_hostname': 'hypvsr1',
'service_name': 'compute1',
'vcpus': 4,
'cpu_info': 'foo',
'hypervisor_type': 'xen',
'hypervisor_version': 1,
'memory_mb': 8192,
'local_gb': 10,
}
self.addCleanup(self.cfg.CONF.clear_override, 'enabled_filters',
group='enforcement')
self.addCleanup(self.cfg.CONF.clear_override, 'os_region_name')
def tearDown(self):
super(EnforcementTestCase, self).tearDown()
def get_formatted_lease(self, lease_values, rsv, allocs):
expected_lease = lease_values.copy()
if rsv:
expected_lease['reservations'] = rsv
for res in expected_lease['reservations']:
res['allocations'] = allocs[res['resource_type']]
return expected_lease
def test_load_filters(self):
self.assertEqual(len(self.enforcement.enabled_filters), 1)
fake_filter = self.enforcement.enabled_filters.pop()
self.assertIsInstance(fake_filter, FakeFilter)
self.assertEqual(fake_filter.conf.enforcement.fake_opt, 1)
def test_format_context(self):
formatted_context = self.enforcement.format_context(
context.current(), get_fake_lease())
expected = dict(user_id='111', project_id='222',
region_name=self.region,
auth_url='https://fakeauth.com')
self.assertDictEqual(expected, formatted_context)
def test_format_lease(self):
lease_values, rsv, allocs = get_lease_rsv_allocs()
formatted_lease = self.enforcement.format_lease(lease_values, rsv,
allocs)
expected_lease = self.get_formatted_lease(lease_values, rsv, allocs)
self.assertDictEqual(expected_lease, formatted_lease)
def test_check_create(self):
lease_values, rsv, allocs = get_lease_rsv_allocs()
ctx = context.current()
check_create = self.patch(self.enforcement.enabled_filters[0],
'check_create')
self.enforcement.check_create(ctx, lease_values, rsv, allocs)
formatted_lease = self.enforcement.format_lease(lease_values, rsv,
allocs)
formatted_context = self.enforcement.format_context(ctx, lease_values)
check_create.assert_called_once_with(formatted_context,
formatted_lease)
expected_context = dict(user_id='111', project_id='222',
region_name=self.region,
auth_url='https://fakeauth.com')
expected_lease = self.get_formatted_lease(lease_values, rsv, allocs)
self.assertDictEqual(expected_context, formatted_context)
self.assertDictEqual(expected_lease, formatted_lease)
def test_check_create_with_exception(self):
lease_values, rsv, allocs = get_lease_rsv_allocs()
ctx = context.current()
check_create = self.patch(self.enforcement.enabled_filters[0],
'check_create')
check_create.side_effect = exceptions.BlazarException
self.assertRaises(exceptions.BlazarException,
self.enforcement.check_create,
context=ctx, lease_values=lease_values,
reservations=rsv, allocations=allocs)
def test_check_update(self):
lease, rsv, allocs = get_lease_rsv_allocs()
new_lease_values = get_fake_lease(end_date='2014-02-07 13:37')
new_reservations = list(new_lease_values['reservations'])
allocation_candidates = {'virtual:instance': [get_fake_host('2')]}
del new_lease_values['reservations']
ctx = context.current()
check_update = self.patch(self.enforcement.enabled_filters[0],
'check_update')
self.enforcement.check_update(
ctx, lease, new_lease_values, allocs, allocation_candidates,
rsv, new_reservations)
formatted_context = self.enforcement.format_context(ctx, lease)
formatted_lease = self.enforcement.format_lease(lease, rsv, allocs)
new_formatted_lease = self.enforcement.format_lease(
new_lease_values, new_reservations, allocation_candidates)
expected_context = dict(user_id='111', project_id='222',
region_name=self.region,
auth_url='https://fakeauth.com')
expected_lease = self.get_formatted_lease(lease, rsv, allocs)
expected_new_lease = self.get_formatted_lease(
new_lease_values, new_reservations, allocation_candidates)
check_update.assert_called_once_with(
formatted_context, formatted_lease, new_formatted_lease)
self.assertDictEqual(expected_context, formatted_context)
self.assertDictEqual(expected_lease, formatted_lease)
self.assertDictEqual(expected_new_lease, new_formatted_lease)
def test_check_update_with_exception(self):
lease, rsv, allocs = get_lease_rsv_allocs()
new_lease_values = get_fake_lease(end_date='2014-02-07 13:37')
new_reservations = list(new_lease_values['reservations'])
allocation_candidates = {'virtual:instance': [get_fake_host('2')]}
del new_lease_values['reservations']
ctx = context.current()
check_update = self.patch(self.enforcement.enabled_filters[0],
'check_update')
check_update.side_effect = exceptions.BlazarException
self.assertRaises(
exceptions.BlazarException, self.enforcement.check_update,
context=ctx, current_lease=lease, new_lease=new_lease_values,
current_allocations=allocs, new_allocations=allocation_candidates,
current_reservations=rsv, new_reservations=new_reservations)
def test_on_end(self):
allocations = {'virtual:instance': [get_fake_host('1')]}
lease = get_fake_lease()
ctx = context.current()
on_end = self.patch(self.enforcement.enabled_filters[0], 'on_end')
self.enforcement.on_end(ctx, lease, allocations)
formatted_context = self.enforcement.format_context(ctx, lease)
formatted_lease = self.enforcement.format_lease(
lease, lease['reservations'], allocations)
on_end.assert_called_once_with(formatted_context, formatted_lease)
expected_context = dict(user_id='111', project_id='222',
region_name=self.region,
auth_url='https://fakeauth.com')
expected_lease = self.get_formatted_lease(lease, None, allocations)
self.assertDictEqual(expected_context, formatted_context)
self.assertDictEqual(expected_lease, formatted_lease)
| |
import os
import copy
import shutil
import tempfile
import os.path as op
import bento.convert.commands
from bento.errors \
import \
UsageException
from bento.compat.api.moves \
import \
unittest
from bento.core.package \
import \
PackageDescription
from bento.core.node \
import \
create_root_with_source_tree
from bento.core.testing \
import \
create_fake_package_from_bento_info
from bento.commands.command_contexts \
import \
CmdContext
from bento.commands.options \
import \
OptionsContext
from bento.convert.commands \
import \
ConvertCommand, DetectTypeCommand
from bento.testing.sub_test_case \
import \
SubprocessTestCase
dummy_meta_data = dict(
name="foo",
version="1.0",
description="a few words",
long_description="some more words",
url="http://example.com",
download_url="http://example.com/download",
author="John Doe",
maintainer="John Doe",
author_email="john@example.com",
maintainer_email="john@example.com",
license="BSD",
platforms=["UNIX"],
)
bento_dummy_meta_data = copy.copy(dummy_meta_data)
bento_dummy_meta_data["platforms"] = ",".join(bento_dummy_meta_data["platforms"])
bento_meta_data_template = """\
Name: %(name)s
Version: %(version)s
Summary: %(description)s
Url: %(url)s
DownloadUrl: %(download_url)s
Description: %(long_description)s
Author: %(author)s
AuthorEmail: %(author_email)s
Maintainer: %(maintainer)s
MaintainerEmail: %(maintainer_email)s
License: %(license)s
Platforms: %(platforms)s"""
def _run_convert_command(top_node, run_node, setup_py, bento_info, cmd_argv):
setup_node = top_node.make_node("setup.py")
setup_node.safe_write(setup_py)
create_fake_package_from_bento_info(top_node, bento_info)
package = PackageDescription.from_string(bento_info)
cmd = ConvertCommand()
opts = OptionsContext.from_command(cmd)
context = CmdContext(None, cmd_argv, opts, package, run_node)
cmd.run(context)
cmd.finish(context)
context.finish()
class CommonTestCase(unittest.TestCase):
def setUp(self):
super(CommonTestCase, self).setUp()
self.save = os.getcwd()
self.d = tempfile.mkdtemp()
os.chdir(self.d)
try:
self.root = create_root_with_source_tree(self.d, os.path.join(self.d, "build"))
self.top_node = self.root._ctx.srcnode
self.build_node = self.root._ctx.bldnode
self.run_node = self.root.find_node(self.d)
except Exception:
os.chdir(self.save)
raise
def tearDown(self):
os.chdir(self.save)
shutil.rmtree(self.d)
super(CommonTestCase, self).tearDown()
class TestConvertCommand(SubprocessTestCase, CommonTestCase):
def test_simple_package(self):
bento_meta_data = bento_meta_data_template % bento_dummy_meta_data
bento_info = """\
%s
ExtraSourceFiles:
setup.py
Library:
Packages:
foo
""" % bento_meta_data
setup_py = """\
from distutils.core import setup
setup(packages=["foo"], **%s)
""" % dummy_meta_data
output = "foo.info"
cmd_argv = ["--output=%s" % output, "-t", "distutils"]
_run_convert_command(self.top_node, self.run_node, setup_py, bento_info, cmd_argv=cmd_argv)
gen_bento = self.top_node.find_node(output)
self.assertEqual(gen_bento.read(), bento_info)
def test_package_data_distutils(self):
bento_meta_data = bento_meta_data_template % bento_dummy_meta_data
bento_info = """\
%s
ExtraSourceFiles:
setup.py
DataFiles: foo_data
SourceDir: foo
TargetDir: $sitedir/foo
Files:
info.txt
Library:
Packages:
foo
""" % bento_meta_data
setup_py = """\
from distutils.core import setup
setup(packages=["foo"], package_data={"foo": ["*txt"]}, **%s)
""" % dummy_meta_data
data_node = self.top_node.make_node(op.join("foo", "info.txt"))
data_node.parent.mkdir()
data_node.write("")
output = "foo.info"
cmd_argv = ["--output=%s" % output, "-t", "distutils"]
_run_convert_command(self.top_node, self.run_node, setup_py, bento_info, cmd_argv=cmd_argv)
gen_bento = self.top_node.find_node(output)
self.assertEqual(gen_bento.read(), bento_info)
class TestMockedConvertCommand(CommonTestCase):
"""Test the convert command UI."""
def setUp(self):
super(TestMockedConvertCommand, self).setUp()
def dummy_convert(ctx, filename, setup_args, monkey_patch_mode, verbose, output, log, show_output):
pass
self.old_convert = bento.convert.commands.convert
try:
bento.convert.commands.convert = lambda *a: None
except:
bento.convert.commands.convert = self.old_convert
def tearDown(self):
bento.convert.commands.convert.convert = self.old_convert
super(TestMockedConvertCommand, self).tearDown()
def test_simple(self):
bento_info = """\
Name: foo
Library:
Packages:
foo
"""
setup_py = """\
from distutils.core import setup
setup(packages=["foo"], name="foo")
"""
_run_convert_command(self.top_node, self.run_node, setup_py, bento_info, [])
def test_help(self):
bento_info = """\
Name: foo
Library:
Packages:
foo
"""
setup_py = """\
from distutils.core import setup
setup(packages=["foo"], name="foo")
"""
_run_convert_command(self.top_node, self.run_node, setup_py, bento_info, ["-h"])
def test_not_overwritten(self):
bento_info = """\
Name: foo
Library:
Packages:
foo
"""
setup_py = """\
from distutils.core import setup
setup(packages=["foo"], name="foo")
"""
self.top_node.make_node("bento.info").write("")
self.assertRaises(UsageException,
lambda: _run_convert_command(self.top_node,
self.run_node, setup_py, bento_info,
["--output=bento.info"]))
class TestMockedDetectTypeCommand(CommonTestCase):
"""Test the detect_type command UI."""
def setUp(self):
super(TestMockedDetectTypeCommand, self).setUp()
self.old_whole_test = bento.convert.commands.whole_test
try:
bento.convert.commands.whole_test = lambda *a: None
except:
bento.convert.commands.whole_test = self.old_whole_test
def tearDown(self):
bento.convert.commands.convert.whole_test = self.old_whole_test
super(TestMockedDetectTypeCommand, self).tearDown()
def _run_command(self):
setup_node = self.top_node.make_node("setup.py")
setup_node.safe_write("")
create_fake_package_from_bento_info(self.top_node, "")
package = PackageDescription.from_string("")
cmd = DetectTypeCommand()
opts = OptionsContext.from_command(cmd)
context = CmdContext(None, [], opts, package, self.run_node)
cmd.run(context)
cmd.finish(context)
context.finish()
def test_simple(self):
self._run_command()
| |
import datetime
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from django.core.urlresolvers import reverse
from django.conf import settings
from django_messages.models import Message
from django_messages.forms import ComposeForm
from django_messages.utils import format_quote
from django.contrib import messages
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
def inbox(request, template_name='django_messages/inbox.html'):
"""
Displays a list of received messages for the current user.
Optional Arguments:
``template_name``: name of the template to use.
"""
message_list = Message.objects.inbox_for(request.user)
return render_to_response(template_name, {
'message_list': message_list,
}, context_instance=RequestContext(request))
inbox = login_required(inbox)
def outbox(request, template_name='django_messages/outbox.html'):
"""
Displays a list of sent messages by the current user.
Optional arguments:
``template_name``: name of the template to use.
"""
message_list = Message.objects.outbox_for(request.user)
return render_to_response(template_name, {
'message_list': message_list,
}, context_instance=RequestContext(request))
outbox = login_required(outbox)
def trash(request, template_name='django_messages/trash.html'):
"""
Displays a list of deleted messages.
Optional arguments:
``template_name``: name of the template to use
Hint: A Cron-Job could periodicly clean up old messages, which are deleted
by sender and recipient.
"""
message_list = Message.objects.trash_for(request.user)
return render_to_response(template_name, {
'message_list': message_list,
}, context_instance=RequestContext(request))
trash = login_required(trash)
def compose(request, recipient=None, form_class=ComposeForm,
template_name='django_messages/compose.html', success_url=None, recipient_filter=None):
"""
Displays and handles the ``form_class`` form to compose new messages.
Required Arguments: None
Optional Arguments:
``recipient``: username or id of a `django.contrib.auth` User, who should
receive the message, optionally multiple usernames/ids
could be separated by a '+'
``form_class``: the form-class to use
``template_name``: the template to use
``success_url``: where to redirect after successfull submission
"""
if request.method == "POST":
sender = request.user
form = form_class(request.POST, recipient_filter=recipient_filter)
if form.is_valid():
form.save(sender=request.user)
messages.add_message(request, messages.INFO,
_(u"Message successfully sent."))
if success_url is None:
success_url = reverse('messages_inbox')
if request.GET.has_key('next'):
success_url = request.GET['next']
return HttpResponseRedirect(success_url)
else:
form = form_class()
if recipient is not None:
recipients = [u for u in User.objects.filter(username__in=[r.strip() for r in recipient.split('+')])]
if not recipients:
recipients = [u for u in User.objects.filter(pk__in=[pk for pk in recipient.split('+')])]
form.fields['recipient'].initial = recipients
return render_to_response(template_name, {
'form': form,
}, context_instance=RequestContext(request))
compose = login_required(compose)
def reply(request, message_id, form_class=ComposeForm,
template_name='django_messages/compose.html', success_url=None,
recipient_filter=None, quote_helper=format_quote):
"""
Prepares the ``form_class`` form for writing a reply to a given message
(specified via ``message_id``). Uses the ``format_quote`` helper from
``messages.utils`` to pre-format the quote. To change the quote format
assign a different ``quote_helper`` kwarg in your url-conf.
"""
parent = get_object_or_404(Message, id=message_id)
if parent.sender != request.user and parent.recipient != request.user:
raise Http404
if request.method == "POST":
sender = request.user
form = form_class(request.POST, recipient_filter=recipient_filter)
if form.is_valid():
form.save(sender=request.user, parent_msg=parent)
messages.add_message(request, messages.INFO,
_(u"Message successfully sent."))
if success_url is None:
success_url = reverse('messages_inbox')
return HttpResponseRedirect(success_url)
else:
if parent.subject.startswith('Re:'):
subject = parent.subject
else:
subject = _(u"Re: %(subject)s") % {'subject': parent.subject}
form = form_class(initial={
'body': quote_helper(parent.sender, parent.body),
'subject': subject,
'recipient': [parent.sender,]
})
return render_to_response(template_name, {
'form': form,
}, context_instance=RequestContext(request))
reply = login_required(reply)
def delete(request, message_id, success_url=None):
"""
Marks a message as deleted by sender or recipient. The message is not
really removed from the database, because two users must delete a message
before it's save to remove it completely.
A cron-job should prune the database and remove old messages which are
deleted by both users.
As a side effect, this makes it easy to implement a trash with undelete.
You can pass ?next=/foo/bar/ via the url to redirect the user to a different
page (e.g. `/foo/bar/`) than ``success_url`` after deletion of the message.
"""
user = request.user
now = datetime.datetime.now()
message = get_object_or_404(Message, id=message_id)
deleted = False
if success_url is None:
success_url = reverse('messages_inbox')
if request.GET.has_key('next'):
success_url = request.GET['next']
if message.sender == user:
message.sender_deleted_at = now
deleted = True
if message.recipient == user:
message.recipient_deleted_at = now
deleted = True
if deleted:
message.save()
messages.add_message(request, messages.INFO,
_(u"Message successfully deleted."))
return HttpResponseRedirect(success_url)
raise Http404
delete = login_required(delete)
def undelete(request, message_id, success_url=None):
"""
Recovers a message from trash. This is achieved by removing the
``(sender|recipient)_deleted_at`` from the model.
"""
user = request.user
message = get_object_or_404(Message, id=message_id)
undeleted = False
if success_url is None:
success_url = reverse('messages_inbox')
if request.GET.has_key('next'):
success_url = request.GET['next']
if message.sender == user:
message.sender_deleted_at = None
undeleted = True
if message.recipient == user:
message.recipient_deleted_at = None
undeleted = True
if undeleted:
message.save()
messages.add_message(request, messages.INFO,
_(u"Message successfully recovered."))
return HttpResponseRedirect(success_url)
raise Http404
undelete = login_required(undelete)
def view(request, message_id, template_name='django_messages/view.html'):
"""
Shows a single message.``message_id`` argument is required.
The user is only allowed to see the message, if he is either
the sender or the recipient. If the user is not allowed a 404
is raised.
If the user is the recipient and the message is unread
``read_at`` is set to the current datetime.
"""
user = request.user
now = datetime.datetime.now()
message = get_object_or_404(Message, id=message_id)
if (message.sender != user) and (message.recipient != user):
raise Http404
if message.read_at is None and message.recipient == user:
message.read_at = now
message.save()
return render_to_response(template_name, {
'message': message,
}, context_instance=RequestContext(request))
view = login_required(view)
| |
#!/usr/bin/env python
import eventlet
import time
import threading
from oslo.config import cfg
from virtman.drivers import fcg
from virtman.drivers import dmsetup
from virtman.drivers import iscsi
from virtman.drivers import volt
from virtman.path import connection_to_str
from virtman.path import Path
from virtman.utils import utils
from virtman.utils.enum import Enum
from virtman.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
STATUS = Enum(['empty', 'building', 'ok', 'destroying', 'error'])
ACTIONS = Enum(['build', 'destroy'])
class BaseImage(object):
def __init__(self):
pass
def deploy_base_image(self):
return NotImplementedError()
def destroy_base_image(self):
return NotImplementedError()
class BlockDeviceBaseImage(BaseImage):
def __init__(self, image_name, image_connections):
self.image_name = image_name
self.image_connections = utils.reform_connections(image_connections)
self.is_local_has_image = False
self.paths = {}
self.has_multipath = False
self.has_cache = False
self.has_origin = False
self.has_target = False
self.is_login = False
self.iqn = self.image_connections[0]['target_iqn']
self.multipath_name = 'multipath_' + self.image_name
self.origin_name = 'origin_' + self.image_name
self.multipath_path = None
self.cached_path = None
self.origin_path = None
#TODO: all virtual machines called image
self.peer_id = ''
self.target_id = 0
self.__status = STATUS.empty
self.status_lock = threading.Lock()
LOG.debug("Virtman: creating a base image of image_name %s" % self.image_name)
def change_status(self, src_status, dst_status):
with self.status_lock:
flag = False
if self.__status == src_status:
self.__status = dst_status
flag = True
LOG.debug("Virtman: source status = %s, dst status = %s, flag = %s" % (src_status, dst_status, flag))
return flag
def adjust_for_heartbeat(self, parents):
LOG.debug('Virtman: adjust_for_heartbeat according to connections: %s ' % parents)
parent_connections = utils.reform_connections(parents)
self.rebuild_multipath(parent_connections)
def deploy_base_image(self):
"""
build_chain = Chain()
build_chain.add_step(lambda: self.rebuild_paths(), lambda: self.destroy_multipath())
build_chain.add_step(lambda: self.create_cache(), lambda: self.destroy_cache())
build_chain.add_step(lambda: self.create_origin(), lambda: self.destroy_origin())
build_chain.add_step(lambda: self.create_target(), lambda: self.destroy_target())
build_chain.add_step(lambda: self.login_master(), lambda: self.logout_master())
build_chain.do()
"""
success = self.change_status(STATUS.empty, STATUS.building)
if not success:
while self.__status == STATUS.building:
LOG.debug("Virtman: in deploy_base_image, sleep 3 seconds waiting for build completed")
eventlet.sleep(3)
LOG.debug("Virtman: ..........begin to deploy base image")
try:
origin_path = self._deploy_base_image()
except Exception, e:
LOG.error(e)
self.change_status(STATUS.building, STATUS.error)
raise
else:
self.change_status(STATUS.building, STATUS.ok)
LOG.debug("Virtman: ..........deploy base image completed")
return origin_path
def _deploy_base_image(self):
#TODO: Roll back if failed !
"""
deploy image in compute node, return the origin path to create snapshot
:param image_connection: the connection towards to the base image
:returns: origin path to create snapshot
"""
LOG.debug("Virtman: in deploy_base_image, image name = %s, has multipath = %s, has origin = %s, has cache = %s, "
"is_login = %s" % (self.image_name, self.has_multipath, self.has_origin, self.has_cache, self.is_login))
#Check if it had origin or not!
if self.has_origin:
return self.origin_path
#save the base_image paths
found = None
for connection in self.image_connections:
if connection['target_portal'].find(CONF.host_ip) >= 0:
found = connection
break
if found is not None:
self.image_connections = [found]
self.is_local_has_image = True
LOG.debug("Virtman: my host_ip = %s, is_local_has_image = %s!, now image_connections = %s"
% (CONF.host_ip, self.is_local_has_image, self.image_connections))
#Reform connections
if self.is_local_has_image:
parent_connections = []
else:
parent_connections = utils.reform_connections(self._get_parent())
LOG.debug("Virtman: parents for volt is %s" % parent_connections)
self.rebuild_multipath(parent_connections)
self._create_cache()
self._create_origin()
self._create_target()
self._login_master()
#print "target_id = ", self.target_id
#print "origin_path = ", self.origin_path, " origin_name = ", self.origin_name
#print "cached_path = ", self.cached_path, " No name"
#print "multipath_path = ", self.multipath_path, "multipath_name = ", self.multipath_name
print "Virtman: baseimage OK!"
return self.origin_path
def destroy_base_image(self):
LOG.debug("Virtman: destroy base_image = %s, peer_id = %s" % (self.image_name, self.peer_id))
self._logout_master()
if self.has_target:
if iscsi.is_connected(self.target_id):
LOG.debug("Virtman: destroy base image Failed! base_image = %s, peer_id = %s" % (self.image_name, self.peer_id))
return False
else:
self._delete_target()
if self.has_origin:
self._delete_origin()
time.sleep(1)
if not self.has_origin and not self.has_target:
self._delete_cache()
if not self.has_cache:
self._delete_multipath()
if not self.has_multipath:
for key in self.paths.keys():
self.paths[key].disconnect()
del self.paths[key]
LOG.debug("Virtman: destroy base image SUCCESS! base_image = %s, peer_id = %s" % (self.image_name, self.peer_id))
return True
return False
def rebuild_multipath(self, parent_connections):
"""
:param parent_connections: list
"""
LOG.debug("Virtman: begin to rebuild multipath...")
#If it has image on the local node or no path to connect, connect to root
if self.is_local_has_image or len(parent_connections) == 0:
parent_connections = self.image_connections
LOG.debug("Virtman: the parents were modified! now parents = %s" % parent_connections)
#Get keys of paths to remove, and add new paths
paths_to_remove = []
for key in self.paths.keys():
found = False
for connection in parent_connections:
if key == connection_to_str(connection):
found = True
break
if not found:
paths_to_remove.append(key)
for connection in parent_connections:
if not isinstance(connection, dict):
raise (Exception("Unknown %s type of %s " % (type(connection), connection)))
key = connection_to_str(connection)
if not self.paths.has_key(key):
self.paths[key] = Path(connection)
#Connect new paths
for key in self.paths.keys():
if key not in paths_to_remove and not self.paths[key].connected:
self.paths[key].connect()
#Rebuild multipath device
disks = [self.paths[key].device_path for key in self.paths.keys()
if key not in paths_to_remove and self.paths[key].connected]
if len(disks) > 0:
if not self.has_multipath:
self._create_multipath(disks)
else:
self._reload_multipath(disks)
#TODO:fix here, wait for multipath device ready
time.sleep(2)
#Disconnect paths to remove
for key in paths_to_remove:
if self.paths[key].connected:
self.paths[key].disconnect()
del self.paths[key]
LOG.debug("Virtman: rebuild multipath completed, multipath = %s" % self.multipath_path)
def _create_multipath(self, disks):
if not self.has_multipath:
self.multipath_path = dmsetup.multipath(self.multipath_name, disks)
self.has_multipath = True
LOG.debug("Virtman: create multipath according connection :")
LOG.debug(disks)
return self.multipath_path
def _reload_multipath(self, disks):
dmsetup.reload_multipath(self.multipath_name, disks)
def _delete_multipath(self):
LOG.debug("Virtman: delete multipath %s start!" % self.multipath_name)
dmsetup.remove_table(self.multipath_name)
self.has_multipath = False
LOG.debug("Virtman: delete multipath %s completed !" % self.multipath_name)
def _create_cache(self):
if not self.has_cache:
LOG.debug("Virtman: create cache for base image %s" % self.image_name)
LOG.debug("Virtman: create cache according to multipath %s" % self.multipath_path)
self.cached_path = fcg.add_disk(self.multipath_path)
self.has_cache = True
LOG.debug("Virtman: create cache completed, cache path = %s" % self.cached_path)
return self.cached_path
def _delete_cache(self):
LOG.debug("Virtman: start to delete cache according to multipath %s " % self.multipath_path)
fcg.rm_disk(self.multipath_path)
self.has_cache = False
LOG.debug("Virtman: delete cache according to multipath %s completed" % self.multipath_path)
def _create_origin(self):
if not self.has_origin:
LOG.debug("Virtman: start to create origin, cache path = %s" % self.cached_path)
self.origin_path = dmsetup.origin(self.origin_name, self.cached_path)
self.has_origin = True
LOG.debug("Virtman: create origin complete, origin path = %s" % self.origin_path)
return self.origin_path
def _delete_origin(self):
LOG.debug("Virtman: start to remove origin %s " % self.origin_name)
dmsetup.remove_table(self.origin_name)
self.has_origin = False
LOG.debug("Virtman: remove origin %s completed" % self.origin_name)
def _create_target(self):
if self.is_local_has_image:
return
if not self.has_target:
LOG.debug("Virtman: start to create target, cache path = %s" % self.cached_path)
if iscsi.exists(self.iqn):
self.has_target = True
else:
self.target_id = iscsi.create_iscsi_target(self.iqn, self.cached_path)
self.has_target = True
LOG.debug("Virtman: create target complete, target id = %s" % self.target_id)
def _delete_target(self):
iscsi.remove_iscsi_target(self.target_id, 1, self.image_name, self.image_name)
self.has_target = False
LOG.debug("Virtman: successful remove target %s " % self.target_id)
def _login_master(self):
if self.is_local_has_image:
return
LOG.debug("Virtman: try to login to master server")
if not self.is_login:
info = volt.login(session_name=self.image_name, peer_id=self.peer_id,
host=CONF.host_ip, port='3260', iqn=self.iqn, lun='1')
LOG.debug("Virtman: login to master server %s" % info)
self.is_login = True
def _logout_master(self):
if self.is_login:
volt.logout(self.image_name, peer_id=self.peer_id)
self.is_login = False
LOG.debug("Virtman: logout master session = %s, peer_id = %s" % (self.image_name, self.peer_id))
def _get_parent(self):
max_try_count = 10
host_ip = CONF.host_ip
try_times = 0
while True:
try:
self.peer_id, parent_list = volt.get(session_name=self.image_name, host=host_ip)
LOG.debug(
"Virtman: in get_parent function, peer_id = %s, parent_list = %s:" % (self.peer_id, parent_list))
return parent_list
except Exception, e:
LOG.debug(
"Virtman: get parent info from volt server failed due to %s, tried %d times" % (e, try_times))
if try_times < max_try_count:
time.sleep(3)
try_times += 1
continue
else:
raise Exception("Virtman: Get parent info failed due to %s! " % e)
class Qcow2BaseImage(BaseImage):
pass
| |
# Copyright (C) 2016 iNuron NV
#
# This file is part of Open vStorage Open Source Edition (OSE),
# as available from
#
# http://www.openvstorage.org and
# http://www.openvstorage.com.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3)
# as published by the Free Software Foundation, in version 3 as it comes
# in the LICENSE.txt file of the Open vStorage OSE distribution.
#
# Open vStorage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY of any kind.
"""
Contains the loghandler module
"""
import os
import sys
import time
import socket
import inspect
import logging
import itertools
class OVSFormatter(logging.Formatter):
"""
Formatter for the logger
"""
def formatTime(self, record, datefmt=None):
"""
Overrides the default formatter to include UTC offset
"""
_ = datefmt
ct = self.converter(record.created)
tz = time.altzone if time.daylight and ct.tm_isdst > 0 else time.timezone
offset = '{0}{1:0>2}{2:0>2}'.format('-' if tz > 0 else '+', abs(tz) // 3600, abs(tz // 60) % 60)
base_time = time.strftime('%Y-%m-%d %H:%M:%S', ct)
return '{0} {1:03.0f}00 {2}'.format(base_time, record.msecs, offset)
def format(self, record):
"""
Format a record
:param record: Record to format
:return: Formatted record
"""
if 'hostname' not in record.__dict__:
record.hostname = socket.gethostname()
if 'sequence' not in record.__dict__:
record.sequence = LogHandler.counter.next()
return super(OVSFormatter, self).format(record)
class LogHandler(object):
"""
Log handler.
WARNING: This log handler might be highly unreliable if not used correctly. It can log to redis, but if Redis is
not working as expected, it will result in lost log messages. If you want reliable logging, do not use Redis at all
or log to files and have a separate process forward them to Redis (so logs can be re-send if Redis is unavailable)
"""
_logs = {} # Used by unittests
cache = {}
counter = itertools.count()
propagate_cache = {}
defaults = {'logging_target': {'type': 'console'}}
def __init__(self, source, name, propagate):
"""
Initializes the logger
"""
parent_invoker = inspect.stack()[1]
if not __file__.startswith(parent_invoker[1]) or parent_invoker[3] != 'get':
raise RuntimeError('Cannot invoke instance from outside this class. Please use LogHandler.get(source, name=None) instead')
if name is None:
name = 'logger'
formatter = OVSFormatter('%(asctime)s - %(hostname)s - %(process)s/%(thread)d - {0}/%(name)s - %(sequence)s - %(levelname)s - %(message)s'.format(source))
target_definition = LogHandler.load_target_definition(source, allow_override=True)
if target_definition['type'] == 'redis':
from redis import Redis
from ovs.log.redis_logging import RedisListHandler
self.handler = RedisListHandler(queue=target_definition['queue'],
client=Redis(host=target_definition['host'],
port=target_definition['port']))
elif target_definition['type'] == 'file':
self.handler = logging.FileHandler(target_definition['filename'])
else:
self.handler = logging.StreamHandler(sys.stdout)
self.unittest_mode = False
if os.environ.get('RUNNING_UNITTESTS') == 'True':
self.unittest_mode = True
self.handler.setFormatter(formatter)
self.logger = logging.getLogger(name)
self.logger.addHandler(self.handler)
self.logger.propagate = propagate
self.logger.setLevel(getattr(logging, 'DEBUG'))
self._key = '{0}_{1}'.format(source, name)
@staticmethod
def load_target_definition(source, allow_override=False, forced_target_type=None):
"""
Load the logger target
:param source: Source
:type source: str
:param allow_override: Allow override
:type allow_override: bool
:param forced_target_type: Override target type
:type forced_target_type: str
:return: Target definition
:rtype: dict
"""
logging_target = LogHandler.defaults['logging_target']
try:
from ovs.extensions.generic.configuration import Configuration
logging_target = Configuration.get('/ovs/framework/logging')
except:
pass
target_type = logging_target.get('type', 'console')
if allow_override is True and 'OVS_LOGTYPE_OVERRIDE' in os.environ:
target_type = os.environ['OVS_LOGTYPE_OVERRIDE']
if allow_override is True and forced_target_type is not None:
target_type = forced_target_type
if target_type == 'redis':
queue = logging_target.get('queue', '/ovs/logging')
if '{0}' in queue:
queue = queue.format(source)
return {'type': 'redis',
'queue': '/{0}'.format(queue.lstrip('/')),
'host': logging_target.get('host', 'localhost'),
'port': logging_target.get('port', 6379)}
if target_type == 'file':
return {'type': 'file',
'filename': LogHandler.load_path(source)}
return {'type': 'console'}
@staticmethod
def get_sink_path(source, allow_override=False, forced_target_type=None):
"""
Retrieve the path to sink logs to
:param source: Source
:type source: str
:param allow_override: Allow override
:type allow_override: bool
:param forced_target_type: Override target type
:type forced_target_type: str
:return: The path to sink to
:rtype: str
"""
target_definition = LogHandler.load_target_definition(source, allow_override, forced_target_type)
if target_definition['type'] == 'redis':
sink = 'redis://{0}:{1}{2}'.format(target_definition['host'], target_definition['port'], target_definition['queue'])
elif target_definition['type'] == 'file':
sink = target_definition['filename']
else:
sink = 'console:'
return sink
@staticmethod
def load_path(source):
"""
Load path
:param source: Source
:return: Path
"""
log_path = '/var/log/ovs'
log_filename = '{0}/{1}.log'.format(log_path, source)
if not os.path.exists(log_path):
os.mkdir(log_path, 0777)
if not os.path.exists(log_filename):
open(log_filename, 'a').close()
os.chmod(log_filename, 0o666)
return log_filename
@staticmethod
def get(source, name=None, propagate=False):
"""
Retrieve a loghandler instance
"""
key = '{0}_{1}'.format(source, name)
if key not in LogHandler.cache:
logger = LogHandler(source, name, propagate)
LogHandler.cache[key] = logger
if key not in LogHandler.propagate_cache:
LogHandler.propagate_cache[key] = propagate
return LogHandler.cache[key]
def _fix_propagate(self):
"""
Obey propagate flag as initially called
- celery will overwrite it to catch the logging
"""
propagate = LogHandler.propagate_cache.get(self._key, None)
if propagate is not None:
self.logger.propagate = propagate
def _log(self, msg, severity, *args, **kwargs):
"""
Log pass-through
"""
if self.unittest_mode is True:
if self._key not in LogHandler._logs:
LogHandler._logs[self._key] = {}
LogHandler._logs[self._key][msg.strip()] = severity
self._fix_propagate()
if 'print_msg' in kwargs:
del kwargs['print_msg']
print msg
extra = kwargs.get('extra', {})
extra['hostname'] = socket.gethostname()
extra['sequence'] = LogHandler.counter.next()
kwargs['extra'] = extra
try:
return getattr(self.logger, severity)(msg, *args, **kwargs)
except:
pass
def info(self, msg, *args, **kwargs):
""" Info """
return self._log(msg, 'info', *args, **kwargs)
def error(self, msg, *args, **kwargs):
""" Error """
return self._log(msg, 'error', *args, **kwargs)
def debug(self, msg, *args, **kwargs):
""" Debug """
return self._log(msg, 'debug', *args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" Warning """
return self._log(msg, 'warning', *args, **kwargs)
def log(self, msg, *args, **kwargs):
""" Log """
return self._log(msg, 'log', *args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" Critical """
return self._log(msg, 'critical', *args, **kwargs)
def exception(self, msg, *args, **kwargs):
""" Exception """
return self._log(msg, 'exception', *args, **kwargs)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
# Wrapper around the crawlutils module that provides:
# (a) an autonomous push mode via command-line invocation, and
# (b) a network pull mode via an HTTP REST interface (xxx CURRENTLY DISABLED)
##
import os
import sys
import logging
import logging.handlers
import time
import traceback
import multiprocessing
import tempfile
import argparse
import cPickle as pickle
import json
import copy
# External dependencies that must be pip install'ed separately
import defaults
import misc
import crawlutils
from crawlmodes import Modes
CRAWLER_HOST = misc.get_host_ipaddr()
logger = None
def setup_logger(logger_name, logfile='crawler.log', process_id=None):
_logger = logging.getLogger(logger_name)
_logger.setLevel(logging.INFO)
(logfile_name, logfile_xtnsion) = os.path.splitext(logfile)
if process_id is None:
fname = logfile
else:
fname = '{0}-{1}{2}'.format(logfile_name, process_id,
logfile_xtnsion)
h = logging.handlers.RotatingFileHandler(filename=fname,
maxBytes=10e6, backupCount=1)
f = logging.Formatter(
'%(asctime)s %(processName)-10s %(levelname)-8s %(message)s')
h.setFormatter(f)
_logger.addHandler(h)
def crawler_worker(process_id, logfile, params):
setup_logger('crawlutils', logfile, process_id)
setup_logger('yapsy', logfile, process_id)
# Starting message
logger.info('*' * 50)
logger.info('Crawler #%d started.' % (process_id))
logger.info('*' * 50)
crawlutils.snapshot(**params)
def start_autonomous_crawler(num_processes, logfile):
if params['crawlmode'] == 'OUTCONTAINER':
jobs = []
for index in xrange(num_processes):
# XXX use options.get() instead
options['partition_strategy']['name'] = 'equally_by_pid'
partition_args = options['partition_strategy']['args']
partition_args['process_id'] = index
partition_args['num_processes'] = num_processes
p = multiprocessing.Process(
name='crawler-%s' %
index, target=crawler_worker, args=(
index, logfile, params))
jobs.append((p, index))
p.start()
logger.info('Crawler %s (pid=%s) started', index, p.pid)
while jobs:
for (index, (job, process_id)) in enumerate(jobs):
if not job.is_alive():
exitcode = job.exitcode
pname = job.name
pid = job.pid
if job.exitcode:
logger.info(
'%s terminated unexpectedly with errorcode %s' %
(pname, exitcode))
for (other_job, process_id) in jobs:
if other_job != job:
logger.info(
'Terminating crawler %s (pid=%s)',
process_id,
other_job.pid)
os.kill(other_job.pid, 9)
logger.info('Exiting as all jobs were terminated.'
)
raise RuntimeError(
'%s terminated unexpectedly with errorcode %s' %
(pname, exitcode))
else:
logger.info(
'Crawler %s (pid=%s) exited normally.',
process_id,
pid)
del jobs[index]
time.sleep(0.1)
logger.info('Exiting as there are no more processes running.')
else:
# INVM, OUTVM, and others
setup_logger('crawlutils', logfile, 0)
crawlutils.snapshot(**params)
# Main listen/exec loop
if __name__ == '__main__':
euid = os.geteuid()
if euid != 0:
print 'Need to run this as root.'
exit(1)
parser = argparse.ArgumentParser()
parser.add_argument(
'--options',
dest='options',
type=str,
default=None,
help='JSON dict of crawler options (see README for defaults)')
parser.add_argument(
'--url',
dest='url',
type=str,
nargs='+',
default=None,
help='Send the snapshot data to URL. Defaults to file://frame',
)
parser.add_argument(
'--namespace',
dest='namespace',
type=str,
nargs='?',
default=None,
help='Data source this crawler is associated with. Defaults to '
'/localhost',
)
parser.add_argument(
'--features',
dest='features',
type=str,
default=defaults.DEFAULT_FEATURES_TO_CRAWL,
help='Comma-separated list of feature-types to crawl. Defaults to '
'{0}'.format(defaults.DEFAULT_FEATURES_TO_CRAWL))
parser.add_argument(
'--since',
dest='since',
type=str,
choices=[
'EPOCH',
'BOOT',
'LASTSNAPSHOT'],
default=None,
help='Only crawl features touched since {EPOCH,BOOT,LASTSNAPSHOT}. '
'Defaults to BOOT',
)
parser.add_argument(
'--frequency',
dest='frequency',
type=int,
default=None,
help='Target time period for iterations. Defaults to -1 which '
'means only run one iteration.')
parser.add_argument(
'--compress',
dest='compress',
type=str,
choices=[
'true',
'false'],
default='true' if defaults.DEFAULT_COMPRESS else 'false',
help='Whether to GZIP-compress the output frame data, must be one of '
'{true,false}. Defaults to true',
)
parser.add_argument('--logfile', dest='logfile', type=str,
default='crawler.log',
help='Logfile path. Defaults to crawler.log')
parser.add_argument(
'--crawlmode',
dest='crawlmode',
type=str,
choices=[
Modes.INVM,
Modes.OUTVM,
Modes.MOUNTPOINT,
Modes.DEVICE,
Modes.FILE,
Modes.ISCSI,
Modes.OUTCONTAINER,
],
default=Modes.INVM,
help='The crawler mode: '
'{INVM,OUTVM,MOUNTPOINT,DEVICE,FILE,ISCSI,OUTCONTAINER}. '
'Defaults to INVM',
)
parser.add_argument(
'--mountpoint',
dest='mountpoint',
type=str,
default=defaults.DEFAULT_MOUNTPOINT,
help='Mountpoint location (required for --crawlmode MOUNTPOINT)')
parser.add_argument(
'--inputfile',
dest='inputfile',
type=str,
default=None,
help='Path to file that contains frame data (required for '
'--crawlmode FILE)')
parser.add_argument(
'--format',
dest='format',
type=str,
default='csv',
choices=['csv', 'graphite'],
help='Emitted data format.',
)
parser.add_argument(
'--crawlContainers',
dest='crawlContainers',
type=str,
nargs='?',
default=defaults.DEFAULT_DOCKER_CONTAINERS_LIST,
help='List of containers to crawl as a list of Docker container IDs. '
'If this is not passed, then just the host is crawled. '
'Alternatively the word "ALL" can be used to crawl every '
'container. "ALL" will crawl all namespaces including the host '
'itself. This option is only valid for INVM crawl mode. Example: '
'--crawlContainers 5f3380d2319e,681be3e32661',
)
parser.add_argument(
'--environment',
dest='environment',
type=str,
default=defaults.DEFAULT_ENVIRONMENT,
help='This speficies some environment specific behavior, like how '
'to name a container. The way to add a new behavior is by '
'implementing a plugin (see plugins/cloudsight_environment.py '
'as an example. Defaults to "cloudsight".',
)
parser.add_argument(
'--plugins',
dest='plugin_places',
type=str,
default=defaults.DEFAULT_PLUGIN_PLACES,
help='This is a comma separated list of directories where to find '
'plugins. Each path can be an absolute, or a relative to the '
'location of the crawler.py.',
)
parser.add_argument(
'--numprocesses',
dest='numprocesses',
type=int,
default=None,
help='Number of processes used for container crawling. Defaults '
'to the number of cores.')
parser.add_argument(
'--extraMetadataFile',
dest='extraMetadataFile',
type=str,
default=None,
help='Json file with data to be annotate all features. It can be used '
'to append a set of system identifiers to the metadata feature '
'and if the --extraMetadataForAll')
parser.add_argument(
'--extraMetadataForAll',
dest='extraMetadataForAll',
action='store_true',
default=False,
help='If specified all features are appended with extra metadata.')
parser.add_argument(
'--linkContainerLogFiles',
dest='linkContainerLogFiles',
action='store_true',
default=defaults.DEFAULT_LINK_CONTAINER_LOG_FILES,
help='Experimental feature. If specified and if running in '
'OUTCONTAINER mode, then the crawler maintains links to '
'container log files.')
parser.add_argument(
'--overwrite',
dest='overwrite',
action='store_true',
default=False,
help='overwrite file type url parameter and strip trailing sequence number'
)
parser.add_argument(
'--avoidSetns',
dest='avoid_setns',
action='store_true',
default=False,
help='Avoids the use of the setns() syscall to crawl containers. '
'Some features like process will not work with this option. '
'Only applies to the OUTCONTAINER mode'
)
args = parser.parse_args()
params = {}
params['options'] = copy.deepcopy(defaults.DEFAULT_CRAWL_OPTIONS)
if args.options:
try:
_options = json.loads(args.options)
except (KeyError, ValueError):
sys.stderr.write('Can not parse the user options json.\n')
sys.exit(1)
# The default options are replaced at the root level of each option.
# For example: the 'file' option, which has many details (it's really a
# tree of options),is completely replaced by the the 'file' option in
# the user json.
for (option, value) in _options.iteritems():
if option in defaults.DEFAULT_CRAWL_OPTIONS:
# Check the data passed!
params['options'][option] = value
if option not in defaults.DEFAULT_CRAWL_OPTIONS:
sys.stderr.write('There is a problem with the options json.\n')
sys.exit(1)
# Arguments to the crawl snapshot function are passed as a big options
# tree,which defaults to DEFAULT_CRAWL_OPTIONS. Most of the following
# arguments just update that tree of options.
options = params['options']
if args.url:
params['urls'] = args.url
if args.namespace:
params['namespace'] = args.namespace
if args.features:
params['features'] = args.features
if args.since:
params['since'] = args.since
if args.frequency is not None:
params['frequency'] = args.frequency
if args.compress:
options['compress'] = (args.compress == 'true')
params['overwrite'] = args.overwrite
if args.crawlmode:
params['crawlmode'] = args.crawlmode
if args.crawlmode == 'MOUNTPOINT':
if not args.mountpoint:
print ('Need to specify mountpoint location (--mountpoint) '
'for MOUNTPOINT mode')
sys.exit(1)
if not os.path.exists(args.mountpoint):
print (
'Mountpoint location %s does not exist.' %
(args.mountpoint))
sys.exit(1)
options['mountpoint'] = args.mountpoint
options['os']['mountpoint'] = args.mountpoint
options['package']['root_dir'] = args.mountpoint
options['file']['root_dir'] = args.mountpoint
# To remove args.mountpoint (e.g. /mnt/CrawlDisk) from each
# reported file path.
options['file']['root_dir_alias'] = '/'
options['config']['root_dir'] = args.mountpoint
# To remove args.mountpoint (e.g. /mnt/CrawlDisk) from each
# reported file path.
options['config']['root_dir_alias'] = '/'
elif args.crawlmode == 'DEVICE':
print ('NOT IMPLEMENTED! Will Need to specify device location for '
'DEVICE mode')
sys.exit(1)
elif args.crawlmode == 'FILE':
if args.inputfile:
params['inputfile'] = args.inputfile
else:
print ('Need to specify frame file location (--inputfile) '
'for FILE mode')
sys.exit(1)
elif args.crawlmode == 'ISCSI':
print ('NOT IMPLEMENTED! Will Need to somehow specify connection '
'info for ISCSI mode')
sys.exit(1)
if args.crawlmode == 'OUTCONTAINER':
if args.crawlContainers:
options['docker_containers_list'] = args.crawlContainers
if not args.numprocesses:
args.numprocesses = multiprocessing.cpu_count()
if args.avoid_setns:
options['os']['avoid_setns'] = args.avoid_setns
options['config']['avoid_setns'] = args.avoid_setns
options['file']['avoid_setns'] = args.avoid_setns
options['package']['avoid_setns'] = args.avoid_setns
if args.format:
params['format'] = args.format
if args.environment:
options['environment'] = args.environment
if args.plugin_places:
options['plugin_places'] = args.plugin_places
if args.extraMetadataFile:
metadata = options['metadata']
metadata['extra_metadata_for_all'] = args.extraMetadataForAll
try:
with open(args.extraMetadataFile, 'r') as fp:
metadata['extra_metadata'] = fp.read()
except Exception as e:
print 'Could not read the feature metadata json file: %s' \
% e
sys.exit(1)
options['link_container_log_files'] = args.linkContainerLogFiles
setup_logger('crawler-main', args.logfile)
logger = logging.getLogger('crawler-main')
logger.info('Starting crawler at {0}'.format(CRAWLER_HOST))
start_autonomous_crawler(args.numprocesses, args.logfile)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.