code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# === This file is part of Calamares - <https://calamares.io> ===
#
# SPDX-FileCopyrightText: 2014 Anke Boersma <demm@kaosx.us>
# SPDX-FileCopyrightText: 2015 Philip Müller <philm@manjaro.org>
# SPDX-FileCopyrightText: 2016 Teo Mrnjavac <teo@kde.org>
# SPDX-FileCopyrightText: 2018 AlmAck <gluca86@gmail.com>
# SPDX-FileCopyrightText: 2018-2019 Adriaan de Groot <groot@kde.org>
# SPDX-License-Identifier: GPL-3.0-or-later
#
# Calamares is Free Software: see the License-Identifier above.
#
import os
import re
import shutil
import libcalamares
import gettext
_ = gettext.translation("calamares-python",
localedir=libcalamares.utils.gettext_path(),
languages=libcalamares.utils.gettext_languages(),
fallback=True).gettext
def pretty_name():
return _("Configuring locales.")
RE_IS_COMMENT = re.compile("^ *#")
def is_comment(line):
"""
Does the @p line look like a comment? Whitespace, followed by a #
is a comment-only line.
"""
return bool(RE_IS_COMMENT.match(line))
RE_TRAILING_COMMENT = re.compile("#.*$")
RE_REST_OF_LINE = re.compile("\\s.*$")
def extract_locale(line):
"""
Extracts a locale from the @p line, and returns a pair of
(extracted-locale, uncommented line). The locale is the
first word of the line after uncommenting (in the human-
readable text explanation at the top of most /etc/locale.gen
files, the locales may be bogus -- either "" or e.g. "Configuration")
"""
# Remove leading spaces and comment signs
line = RE_IS_COMMENT.sub("", line)
uncommented = line.strip()
fields = RE_TRAILING_COMMENT.sub("", uncommented).strip().split()
if len(fields) != 2:
# Not exactly two fields, can't be a proper locale line
return "", uncommented
else:
# Drop all but first field
locale = RE_REST_OF_LINE.sub("", uncommented)
return locale, uncommented
def rewrite_locale_gen(srcfilename, destfilename, locale_conf):
"""
Copies a locale.gen file from @p srcfilename to @p destfilename
(this may be the same name), enabling those locales that can
be found in the map @p locale_conf. Also always enables en_US.UTF-8.
"""
en_us_locale = 'en_US.UTF-8'
# Get entire source-file contents
text = []
with open(srcfilename, "r") as gen:
text = gen.readlines()
# we want unique values, so locale_values should have 1 or 2 items
locale_values = set(locale_conf.values())
locale_values.add(en_us_locale) # Always enable en_US as well
enabled_locales = {}
seen_locales = set()
# Write source out again, enabling some
with open(destfilename, "w") as gen:
for line in text:
c = is_comment(line)
locale, uncommented = extract_locale(line)
# Non-comment lines are preserved, and comment lines
# may be enabled if they match a desired locale
if not c:
seen_locales.add(locale)
else:
for locale_value in locale_values:
if locale.startswith(locale_value):
enabled_locales[locale] = uncommented
gen.write(line)
gen.write("\n###\n#\n# Locales enabled by Calamares\n")
for locale, line in enabled_locales.items():
if locale not in seen_locales:
gen.write(line + "\n")
seen_locales.add(locale)
for locale in locale_values:
if locale not in seen_locales:
gen.write("# Missing: %s\n" % locale)
def run():
""" Create locale """
import libcalamares
locale_conf = libcalamares.globalstorage.value("localeConf")
if not locale_conf:
locale_conf = {
'LANG': 'en_US.UTF-8',
'LC_NUMERIC': 'en_US.UTF-8',
'LC_TIME': 'en_US.UTF-8',
'LC_MONETARY': 'en_US.UTF-8',
'LC_PAPER': 'en_US.UTF-8',
'LC_NAME': 'en_US.UTF-8',
'LC_ADDRESS': 'en_US.UTF-8',
'LC_TELEPHONE': 'en_US.UTF-8',
'LC_MEASUREMENT': 'en_US.UTF-8',
'LC_IDENTIFICATION': 'en_US.UTF-8'
}
install_path = libcalamares.globalstorage.value("rootMountPoint")
if install_path is None:
libcalamares.utils.warning("rootMountPoint is empty, {!s}".format(install_path))
return (_("Configuration Error"),
_("No root mount point is given for <pre>{!s}</pre> to use." ).format("localecfg"))
target_locale_gen = "{!s}/etc/locale.gen".format(install_path)
target_locale_gen_bak = target_locale_gen + ".bak"
target_locale_conf_path = "{!s}/etc/locale.conf".format(install_path)
target_etc_default_path = "{!s}/etc/default".format(install_path)
# restore backup if available
if os.path.exists(target_locale_gen_bak):
shutil.copy2(target_locale_gen_bak, target_locale_gen)
libcalamares.utils.debug("Restored backup {!s} -> {!s}"
.format(target_locale_gen_bak, target_locale_gen))
# run locale-gen if detected; this *will* cause an exception
# if the live system has locale.gen, but the target does not:
# in that case, fix your installation filesystem.
if os.path.exists('/etc/locale.gen'):
rewrite_locale_gen(target_locale_gen, target_locale_gen, locale_conf)
libcalamares.utils.target_env_call(['locale-gen'])
libcalamares.utils.debug('{!s} done'.format(target_locale_gen))
# write /etc/locale.conf
with open(target_locale_conf_path, "w") as lcf:
for k, v in locale_conf.items():
lcf.write("{!s}={!s}\n".format(k, v))
libcalamares.utils.debug('{!s} done'.format(target_locale_conf_path))
# write /etc/default/locale if /etc/default exists and is a dir
if os.path.isdir(target_etc_default_path):
with open(os.path.join(target_etc_default_path, "locale"), "w") as edl:
for k, v in locale_conf.items():
edl.write("{!s}={!s}\n".format(k, v))
libcalamares.utils.debug('{!s} done'.format(target_etc_default_path))
return None
| calamares/calamares | src/modules/localecfg/main.py | Python | gpl-3.0 | 6,225 |
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
SIZE_T = np.int64 #the size-type of index-arrays
| google/TensorNetwork | tensornetwork/block_sparse/sizetypes.py | Python | apache-2.0 | 660 |
'''
Utilities for contentstore tests
'''
import json
import textwrap
from mock import Mock
from django.conf import settings
from django.contrib.auth.models import User
from django.test.client import Client
from opaque_keys.edx.locations import SlashSeparatedCourseKey, AssetLocation
from contentstore.utils import reverse_url
from student.models import Registration
from student.roles import GaGlobalCourseCreatorRole
from xmodule.modulestore.split_mongo.split import SplitMongoModuleStore
from xmodule.contentstore.django import contentstore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.inheritance import own_metadata
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.modulestore.tests.utils import ProceduralCourseTestMixin
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
def parse_json(response):
"""Parse response, which is assumed to be json"""
return json.loads(response.content)
def user(email):
"""look up a user by email"""
return User.objects.get(email=email)
def registration(email):
"""look up registration object by email"""
return Registration.objects.get(user__email=email)
class AjaxEnabledTestClient(Client):
"""
Convenience class to make testing easier.
"""
def ajax_post(self, path, data=None, content_type="application/json", **kwargs):
"""
Convenience method for client post which serializes the data into json and sets the accept type
to json
"""
if not isinstance(data, basestring):
data = json.dumps(data or {})
kwargs.setdefault("HTTP_X_REQUESTED_WITH", "XMLHttpRequest")
kwargs.setdefault("HTTP_ACCEPT", "application/json")
return self.post(path=path, data=data, content_type=content_type, **kwargs)
def get_html(self, path, data=None, follow=False, **extra):
"""
Convenience method for client.get which sets the accept type to html
"""
return self.get(path, data or {}, follow, HTTP_ACCEPT="text/html", **extra)
def get_json(self, path, data=None, follow=False, **extra):
"""
Convenience method for client.get which sets the accept type to json
"""
return self.get(path, data or {}, follow, HTTP_ACCEPT="application/json", **extra)
class CourseTestCase(ProceduralCourseTestMixin, ModuleStoreTestCase):
"""
Base class for Studio tests that require a logged in user and a course.
Also provides helper methods for manipulating and verifying the course.
"""
def setUp(self):
"""
These tests need a user in the DB so that the django Test Client can log them in.
The test user is created in the ModuleStoreTestCase setUp method.
They inherit from the ModuleStoreTestCase class so that the mongodb collection
will be cleared out before each test case execution and deleted
afterwards.
"""
self.user_password = super(CourseTestCase, self).setUp()
self.client = AjaxEnabledTestClient()
self.client.login(username=self.user.username, password=self.user_password)
self.course = CourseFactory.create()
def create_non_staff_authed_user_client(self, authenticate=True):
"""
Create a non-staff user, log them in (if authenticate=True), and return the client, user to use for testing.
"""
nonstaff, password = self.create_non_staff_user()
client = AjaxEnabledTestClient()
if authenticate:
client.login(username=nonstaff.username, password=password)
nonstaff.is_authenticated = lambda: authenticate
return client, nonstaff
def reload_course(self):
"""
Reloads the course object from the database
"""
self.course = self.store.get_course(self.course.id)
def save_course(self):
"""
Updates the course object in the database
"""
self.course.save()
self.store.update_item(self.course, self.user.id)
TEST_VERTICAL = 'vertical_test'
ORPHAN_DRAFT_VERTICAL = 'orphan_draft_vertical'
ORPHAN_DRAFT_HTML = 'orphan_draft_html'
PRIVATE_VERTICAL = 'a_private_vertical'
PUBLISHED_VERTICAL = 'a_published_vertical'
SEQUENTIAL = 'vertical_sequential'
DRAFT_HTML = 'draft_html'
DRAFT_VIDEO = 'draft_video'
LOCKED_ASSET_KEY = AssetLocation.from_deprecated_string('/c4x/edX/toy/asset/sample_static.txt')
def import_and_populate_course(self):
"""
Imports the test toy course and populates it with additional test data
"""
content_store = contentstore()
import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], static_content_store=content_store)
course_id = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
# create an Orphan
# We had a bug where orphaned draft nodes caused export to fail. This is here to cover that case.
vertical = self.store.get_item(course_id.make_usage_key('vertical', self.TEST_VERTICAL), depth=1)
vertical.location = vertical.location.replace(name='no_references')
self.store.update_item(vertical, self.user.id, allow_not_found=True)
orphan_vertical = self.store.get_item(vertical.location)
self.assertEqual(orphan_vertical.location.name, 'no_references')
self.assertEqual(len(orphan_vertical.children), len(vertical.children))
# create an orphan vertical and html; we already don't try to import
# the orphaned vertical, but we should make sure we don't import
# the orphaned vertical's child html, too
orphan_draft_vertical = self.store.create_item(
self.user.id, course_id, 'vertical', self.ORPHAN_DRAFT_VERTICAL
)
orphan_draft_html = self.store.create_item(
self.user.id, course_id, 'html', self.ORPHAN_DRAFT_HTML
)
orphan_draft_vertical.children.append(orphan_draft_html.location)
self.store.update_item(orphan_draft_vertical, self.user.id)
# create a Draft vertical
vertical = self.store.get_item(course_id.make_usage_key('vertical', self.TEST_VERTICAL), depth=1)
draft_vertical = self.store.convert_to_draft(vertical.location, self.user.id)
self.assertTrue(self.store.has_published_version(draft_vertical))
# create a Private (draft only) vertical
private_vertical = self.store.create_item(self.user.id, course_id, 'vertical', self.PRIVATE_VERTICAL)
self.assertFalse(self.store.has_published_version(private_vertical))
# create a Published (no draft) vertical
public_vertical = self.store.create_item(self.user.id, course_id, 'vertical', self.PUBLISHED_VERTICAL)
public_vertical = self.store.publish(public_vertical.location, self.user.id)
self.assertTrue(self.store.has_published_version(public_vertical))
# add the new private and new public as children of the sequential
sequential = self.store.get_item(course_id.make_usage_key('sequential', self.SEQUENTIAL))
sequential.children.append(private_vertical.location)
sequential.children.append(public_vertical.location)
self.store.update_item(sequential, self.user.id)
# create an html and video component to make drafts:
draft_html = self.store.create_item(self.user.id, course_id, 'html', self.DRAFT_HTML)
draft_video = self.store.create_item(self.user.id, course_id, 'video', self.DRAFT_VIDEO)
# add them as children to the public_vertical
public_vertical.children.append(draft_html.location)
public_vertical.children.append(draft_video.location)
self.store.update_item(public_vertical, self.user.id)
# publish changes to vertical
self.store.publish(public_vertical.location, self.user.id)
# convert html/video to draft
self.store.convert_to_draft(draft_html.location, self.user.id)
self.store.convert_to_draft(draft_video.location, self.user.id)
# lock an asset
content_store.set_attr(self.LOCKED_ASSET_KEY, 'locked', True)
# create a non-portable link - should be rewritten in new courses
html_module = self.store.get_item(course_id.make_usage_key('html', 'nonportable'))
new_data = html_module.data = html_module.data.replace(
'/static/',
'/c4x/{0}/{1}/asset/'.format(course_id.org, course_id.course)
)
self.store.update_item(html_module, self.user.id)
html_module = self.store.get_item(html_module.location)
self.assertEqual(new_data, html_module.data)
return course_id
def check_populated_course(self, course_id):
"""
Verifies the content of the given course, per data that was populated in import_and_populate_course
"""
items = self.store.get_items(
course_id,
qualifiers={'category': 'vertical'},
revision=ModuleStoreEnum.RevisionOption.published_only
)
self.check_verticals(items)
def verify_item_publish_state(item, publish_state):
"""Verifies the publish state of the item is as expected."""
self.assertEqual(self.store.has_published_version(item), publish_state)
def get_and_verify_publish_state(item_type, item_name, publish_state):
"""
Gets the given item from the store and verifies the publish state
of the item is as expected.
"""
item = self.store.get_item(course_id.make_usage_key(item_type, item_name))
verify_item_publish_state(item, publish_state)
return item
# verify draft vertical has a published version with published children
vertical = get_and_verify_publish_state('vertical', self.TEST_VERTICAL, True)
for child in vertical.get_children():
verify_item_publish_state(child, True)
# verify that it has a draft too
self.assertTrue(getattr(vertical, "is_draft", False))
# make sure that we don't have a sequential that is in draft mode
sequential = get_and_verify_publish_state('sequential', self.SEQUENTIAL, True)
self.assertFalse(getattr(sequential, "is_draft", False))
# verify that we have the private vertical
private_vertical = get_and_verify_publish_state('vertical', self.PRIVATE_VERTICAL, False)
# verify that we have the public vertical
public_vertical = get_and_verify_publish_state('vertical', self.PUBLISHED_VERTICAL, True)
# verify that we have the draft html
draft_html = self.store.get_item(course_id.make_usage_key('html', self.DRAFT_HTML))
self.assertTrue(getattr(draft_html, 'is_draft', False))
# verify that we have the draft video
draft_video = self.store.get_item(course_id.make_usage_key('video', self.DRAFT_VIDEO))
self.assertTrue(getattr(draft_video, 'is_draft', False))
# verify verticals are children of sequential
for vert in [vertical, private_vertical, public_vertical]:
self.assertIn(vert.location, sequential.children)
# verify draft html is the child of the public vertical
self.assertIn(draft_html.location, public_vertical.children)
# verify draft video is the child of the public vertical
self.assertIn(draft_video.location, public_vertical.children)
# verify textbook exists
course = self.store.get_course(course_id)
self.assertGreater(len(course.textbooks), 0)
# verify asset attributes of locked asset key
self.assertAssetsEqual(self.LOCKED_ASSET_KEY, self.LOCKED_ASSET_KEY.course_key, course_id)
# verify non-portable links are rewritten
html_module = self.store.get_item(course_id.make_usage_key('html', 'nonportable'))
self.assertIn('/static/foo.jpg', html_module.data)
return course
def assertCoursesEqual(self, course1_id, course2_id):
"""
Verifies the content of the two given courses are equal
"""
course1_items = self.store.get_items(course1_id)
course2_items = self.store.get_items(course2_id)
self.assertGreater(len(course1_items), 0) # ensure it found content instead of [] == []
if len(course1_items) != len(course2_items):
course1_block_ids = set([item.location.block_id for item in course1_items])
course2_block_ids = set([item.location.block_id for item in course2_items])
raise AssertionError(
u"Course1 extra blocks: {}; course2 extra blocks: {}".format(
course1_block_ids - course2_block_ids, course2_block_ids - course1_block_ids
)
)
for course1_item in course1_items:
course1_item_loc = course1_item.location
course2_item_loc = course2_id.make_usage_key(course1_item_loc.block_type, course1_item_loc.block_id)
if course1_item_loc.block_type == 'course':
# mongo uses the run as the name, split uses 'course'
store = self.store._get_modulestore_for_courselike(course2_id) # pylint: disable=protected-access
new_name = 'course' if isinstance(store, SplitMongoModuleStore) else course2_item_loc.run
course2_item_loc = course2_item_loc.replace(name=new_name)
course2_item = self.store.get_item(course2_item_loc)
# compare published state
self.assertEqual(
self.store.has_published_version(course1_item),
self.store.has_published_version(course2_item)
)
# compare data
self.assertEqual(hasattr(course1_item, 'data'), hasattr(course2_item, 'data'))
if hasattr(course1_item, 'data'):
self.assertEqual(course1_item.data, course2_item.data)
# compare meta-data
self.assertEqual(own_metadata(course1_item), own_metadata(course2_item))
# compare children
self.assertEqual(course1_item.has_children, course2_item.has_children)
if course1_item.has_children:
expected_children = []
for course1_item_child in course1_item.children:
expected_children.append(
course2_id.make_usage_key(course1_item_child.block_type, course1_item_child.block_id)
)
self.assertEqual(expected_children, course2_item.children)
# compare assets
content_store = self.store.contentstore
course1_assets, count_course1_assets = content_store.get_all_content_for_course(course1_id)
_, count_course2_assets = content_store.get_all_content_for_course(course2_id)
self.assertEqual(count_course1_assets, count_course2_assets)
for asset in course1_assets:
asset_son = asset.get('content_son', asset['_id'])
self.assertAssetsEqual(asset_son, course1_id, course2_id)
def check_verticals(self, items):
""" Test getting the editing HTML for each vertical. """
# assert is here to make sure that the course being tested actually has verticals (units) to check.
self.assertGreater(len(items), 0, "Course has no verticals (units) to check")
for descriptor in items:
resp = self.client.get_html(get_url('container_handler', descriptor.location))
self.assertEqual(resp.status_code, 200)
def assertAssetsEqual(self, asset_son, course1_id, course2_id):
"""Verifies the asset of the given key has the same attributes in both given courses."""
content_store = contentstore()
category = asset_son.block_type if hasattr(asset_son, 'block_type') else asset_son['category']
filename = asset_son.block_id if hasattr(asset_son, 'block_id') else asset_son['name']
course1_asset_attrs = content_store.get_attrs(course1_id.make_asset_key(category, filename))
course2_asset_attrs = content_store.get_attrs(course2_id.make_asset_key(category, filename))
self.assertEqual(len(course1_asset_attrs), len(course2_asset_attrs))
for key, value in course1_asset_attrs.iteritems():
if key in ['_id', 'filename', 'uploadDate', 'content_son', 'thumbnail_location']:
pass
else:
self.assertEqual(value, course2_asset_attrs[key])
def mock_requests_get(*args, **kwargs):
"""
Returns mock responses for the youtube API.
"""
# pylint: disable=unused-argument
response_transcript_list = """
<transcript_list>
<track id="1" name="Custom" lang_code="en" />
<track id="0" name="Custom1" lang_code="en-GB"/>
</transcript_list>
"""
response_transcript = textwrap.dedent("""
<transcript>
<text start="100" dur="100">subs #1</text>
<text start="200" dur="40">subs #2</text>
<text start="240" dur="140">subs #3</text>
</transcript>
""")
if kwargs == {'params': {'lang': 'en', 'v': 'good_id_2'}}:
return Mock(status_code=200, text='')
elif kwargs == {'params': {'type': 'list', 'v': 'good_id_2'}}:
return Mock(status_code=200, text=response_transcript_list, content=response_transcript_list)
elif kwargs == {'params': {'lang': 'en', 'v': 'good_id_2', 'name': 'Custom'}}:
return Mock(status_code=200, text=response_transcript, content=response_transcript)
return Mock(status_code=404, text='')
def get_url(handler_name, key_value, key_name='usage_key_string', kwargs=None):
"""
Helper function for getting HTML for a page in Studio and checking that it does not error.
"""
return reverse_url(handler_name, key_name, key_value, kwargs)
def switch_ga_global_course_creator(global_staff):
global_staff.is_staff = False
global_staff.save()
GaGlobalCourseCreatorRole().add_users(global_staff)
| nttks/edx-platform | cms/djangoapps/contentstore/tests/utils.py | Python | agpl-3.0 | 18,146 |
#!/usr/bin/env python
# encoding: utf-8
"""
ransom-note.py
Created by Shuailong on 2016-09-01.
https://leetcode.com/problems/ransom-note/.
"""
# 217ms
from collections import Counter
class Solution(object):
def canConstruct(self, ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
r = Counter(ransomNote)
m = Counter(magazine)
for i in r:
if i not in m or r[i] > m[i]:
return False
return True
def main():
solution = Solution()
print solution.canConstruct('a', 'b')
print solution.canConstruct('aa', 'ab')
print solution.canConstruct('aa', 'aab')
if __name__ == '__main__':
main()
| Shuailong/Leetcode | solutions/ransom-note.py | Python | mit | 752 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views import defaults as default_views
from django.views.generic import TemplateView
from rest_framework import routers
from sahem.events import views
router = routers.DefaultRouter()
router.register(r'events', views.EventViewSet)
router.register(r'users', views.UserViewSet)
router.register(r'categories', views.CategoryViewSet)
router.register(r'comments', views.CommentViewSet)
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='base.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
url(r'^privacy\-policy/$', TemplateView.as_view(template_name='pages/privacy_policy.html'),
name="privacy_policy"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("sahem.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^events/', include('sahem.events.urls', namespace='events')),
# Join event api
url(r'^api/events/join/event/(?P<event_id>\d+)/staff/(?P<staff_id>\d+)/$', views.join_event,
name='join_event_as_staff'),
url(r'^api/events/join/event/(?P<event_id>\d+)/participant/(?P<participant_id>\d+)/$',
views.join_event,
name='join_event_as_participant'),
# Leave event api
url(r'^api/events/leave/event/(?P<event_id>\d+)/staff/(?P<staff_id>\d+)/$', views.leave_event,
name='leave_event_as_staff'),
url(r'^api/events/leave/event/(?P<event_id>\d+)/participant/(?P<participant_id>\d+)/$',
views.leave_event,
name='leave_event_as_participant'),
# Comments api
url(r'^api/comments/add/$', views.add_comment, name='add_comment'),
# Api Endpoint
url(r'^api/events/(?P<id>\d+)/(?P<slug>\w+)/$', views.event_list, name='event_by_id_slug'),
url(r'^api/events/category/(?P<category_slug>\w+)/$', views.event_list, name='event_by_category'),
url(r'^api/', include(router.urls)),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception("Bad Request!")}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception("Permission Denied")}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception("Page not Found")}),
url(r'^500/$', default_views.server_error),
]
| D3vSt0rm/sahem | config/urls.py | Python | mit | 3,329 |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function, unicode_literals)
from flask import Flask, Blueprint, current_app, url_for
from flask.ext.restless import APIManager
import types
from greidinet.model import *
restapi = APIManager()
# Dynamically bind create_apis() method to the APIManager class (there probably should be a nicer way to accomplish this)
def create_apis(self):
self.create_api(GradeType, methods=['GET', 'POST', 'DELETE', 'PATCH'])
self.create_api(Grade)
self.create_api(Category)
self.create_api(Location)
self.create_api(Route)
self.create_api(User)
self.create_api(Entry)
restapi.create_apis = types.MethodType(create_apis, restapi, restapi.__class__)
| juusokorhonen/greidinet | greidinet/restapi/__init__.py | Python | mit | 745 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import glob
import os
import sys
import numpy as np
#from tqdm import tqdm
import fnmatch
import argparse
import logging
logger = logging.getLogger(os.path.basename(sys.argv[0]))
def main(argv):
def formatter(prog):
return argparse.HelpFormatter(prog, max_help_position=100, width=200)
argparser = argparse.ArgumentParser('Parse Countries logs', formatter_class=formatter)
argparser.add_argument('path', action='store', type=str)
argparser.add_argument('--regex', '-r', action='store', type=str, default='*')
args = argparser.parse_args(argv)
path = args.path
regex = args.regex
path_to_valid_aucpr, path_to_test_aucpr = {}, {}
for file_path in glob.glob('{}/*{}*.log'.format(path, regex)):
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
for line in f:
if '[valid]' in line and 'AUC-PR' in line:
path_to_valid_aucpr[file_path] = line
if '[test]' in line and 'AUC-PR' in line:
path_to_test_aucpr[file_path] = line
path_set = set(path_to_valid_aucpr.keys()) & set(path_to_test_aucpr.keys())
# Use the following for debugging:
# path_set = set(path_to_valid_aucpr.keys()) | set(path_to_test_aucpr.keys())
new_path_to_valid_aucprs, new_path_to_test_aucprs = {}, {}
for path in path_set:
new_path = path
for i in range(10):
new_path = new_path.replace('_seed={}'.format(i), '_seed=X')
if new_path not in new_path_to_valid_aucprs:
new_path_to_valid_aucprs[new_path] = []
if new_path not in new_path_to_test_aucprs:
new_path_to_test_aucprs[new_path] = []
new_path_to_valid_aucprs[new_path] += [path_to_valid_aucpr[path]]
new_path_to_test_aucprs[new_path] += [path_to_test_aucpr[path]]
new_paths = set(new_path_to_valid_aucprs.keys()) & set(new_path_to_test_aucprs.keys())
new_path_to_valid_aucpr_stats, new_path_to_test_aucpr_stats = {}, {}
def stats(values):
if len(values) != 10:
pass # return "0 ± 0"
return '{0:.3f} ± {1:.3f}'.format(round(np.mean(values), 3), round(np.std(values), 3))
for new_path in new_paths:
new_path_to_valid_aucpr_stats[new_path] = stats([float(l.split()[2]) for l in new_path_to_valid_aucprs[new_path]])
new_path_to_test_aucpr_stats[new_path] = stats([float(l.split()[2]) for l in new_path_to_test_aucprs[new_path]])
model_names = []
for m in ['DistMult', 'ComplEx', 'ERMLP']:
if any([m in new_path for new_path in new_paths]):
model_names += [m]
name_to_regex = {}
for model_name in model_names:
for s in [1, 12, 123, 2, 3]:
name_to_regex['{}-ASR-S{}'.format(model_name, s)] = '*_model={}*_s={}_*.log'.format(model_name, s)
name_to_regex['{}-S{}'.format(model_name, s)] = '*_adv_weight=0_*_model={}*_s={}_*.log'.format(model_name, s)
regex_to_name = {regex: name for name, regex in name_to_regex.items()}
regex_to_best_valid = {regex: None for _, regex in name_to_regex.items()}
for path, stats in new_path_to_valid_aucpr_stats.items():
for regex, best_valid in regex_to_best_valid.items():
if fnmatch.fnmatch(path, regex):
if best_valid is None:
regex_to_best_valid[regex] = (path, stats)
else:
(_, best_stats) = best_valid
if float(stats.split(' ')[0]) > float(best_stats.split(' ')[0]):
regex_to_best_valid[regex] = (path, stats)
for regex, best_valid in regex_to_best_valid.items():
print(regex, best_valid)
name_to_best_test = {}
for regex, (path, valid_stats) in regex_to_best_valid.items():
name = regex_to_name[regex]
test_stats = new_path_to_test_aucpr_stats[path]
name_to_best_test[name] = test_stats
sorted_names = sorted(name_to_regex.keys())
for name in sorted_names:
best_test = name_to_best_test[name]
print(name, best_test)
for model_name in model_names:
for is_asr in [False, True]:
local_model_name = '{}{}'.format(model_name, '-ASR' if is_asr else '')
col_model = '{}'.format(local_model_name)
col_s1 = name_to_best_test['{}-S1'.format(local_model_name)]
col_s2 = name_to_best_test['{}-S2'.format(local_model_name)]
col_s3 = name_to_best_test['{}-S3'.format(local_model_name)]
col_s12 = name_to_best_test['{}-S12'.format(local_model_name)]
col_s123 = name_to_best_test['{}-S123'.format(local_model_name)]
print('(S1, S2, S3)\t{}\t\t&\t{}\t&\t{}\t&\t{}'.format(col_model, col_s1, col_s2, col_s3))
print('(S1, S12, S123)\t{}\t\t&\t{}\t&\t{}\t&\t{}'.format(col_model, col_s1, col_s12, col_s123))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main(sys.argv[1:])
| uclmr/inferbeddings | tools/parse_results_countries.py | Python | mit | 5,035 |
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import traceback
import errno
import filecmp
import os
import random
import re
import shutil
import stat
import subprocess
import sys
import tarfile
import tempfile
import time
from color import Coloring
from git_command import GitCommand, git_require
from git_config import GitConfig, IsId, GetSchemeFromUrl, ID_RE
from error import GitError, HookError, UploadError
from error import ManifestInvalidRevisionError
from error import NoManifestException
from trace import IsTrace, Trace
from git_refs import GitRefs, HEAD, R_HEADS, R_TAGS, R_PUB, R_M
from pyversion import is_python3
if not is_python3():
# pylint:disable=W0622
input = raw_input
# pylint:enable=W0622
def _lwrite(path, content):
lock = '%s.lock' % path
fd = open(lock, 'w')
try:
fd.write(content)
finally:
fd.close()
try:
os.rename(lock, path)
except OSError:
os.remove(lock)
raise
def _error(fmt, *args):
msg = fmt % args
print('error: %s' % msg, file=sys.stderr)
def not_rev(r):
return '^' + r
def sq(r):
return "'" + r.replace("'", "'\''") + "'"
_project_hook_list = None
def _ProjectHooks():
"""List the hooks present in the 'hooks' directory.
These hooks are project hooks and are copied to the '.git/hooks' directory
of all subprojects.
This function caches the list of hooks (based on the contents of the
'repo/hooks' directory) on the first call.
Returns:
A list of absolute paths to all of the files in the hooks directory.
"""
global _project_hook_list
if _project_hook_list is None:
d = os.path.realpath(os.path.abspath(os.path.dirname(__file__)))
d = os.path.join(d , 'hooks')
_project_hook_list = [os.path.join(d, x) for x in os.listdir(d)]
return _project_hook_list
class DownloadedChange(object):
_commit_cache = None
def __init__(self, project, base, change_id, ps_id, commit):
self.project = project
self.base = base
self.change_id = change_id
self.ps_id = ps_id
self.commit = commit
@property
def commits(self):
if self._commit_cache is None:
self._commit_cache = self.project.bare_git.rev_list(
'--abbrev=8',
'--abbrev-commit',
'--pretty=oneline',
'--reverse',
'--date-order',
not_rev(self.base),
self.commit,
'--')
return self._commit_cache
class ReviewableBranch(object):
_commit_cache = None
def __init__(self, project, branch, base):
self.project = project
self.branch = branch
self.base = base
@property
def name(self):
return self.branch.name
@property
def commits(self):
if self._commit_cache is None:
self._commit_cache = self.project.bare_git.rev_list(
'--abbrev=8',
'--abbrev-commit',
'--pretty=oneline',
'--reverse',
'--date-order',
not_rev(self.base),
R_HEADS + self.name,
'--')
return self._commit_cache
@property
def unabbrev_commits(self):
r = dict()
for commit in self.project.bare_git.rev_list(
not_rev(self.base),
R_HEADS + self.name,
'--'):
r[commit[0:8]] = commit
return r
@property
def date(self):
return self.project.bare_git.log(
'--pretty=format:%cd',
'-n', '1',
R_HEADS + self.name,
'--')
def UploadForReview(self, people, auto_topic=False, draft=False, dest_branch=None):
self.project.UploadForReview(self.name,
people,
auto_topic=auto_topic,
draft=draft,
dest_branch=dest_branch)
def GetPublishedRefs(self):
refs = {}
output = self.project.bare_git.ls_remote(
self.branch.remote.SshReviewUrl(self.project.UserEmail),
'refs/changes/*')
for line in output.split('\n'):
try:
(sha, ref) = line.split()
refs[sha] = ref
except ValueError:
pass
return refs
class StatusColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'status')
self.project = self.printer('header', attr = 'bold')
self.branch = self.printer('header', attr = 'bold')
self.nobranch = self.printer('nobranch', fg = 'red')
self.important = self.printer('important', fg = 'red')
self.added = self.printer('added', fg = 'green')
self.changed = self.printer('changed', fg = 'red')
self.untracked = self.printer('untracked', fg = 'red')
class DiffColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'diff')
self.project = self.printer('header', attr = 'bold')
class _Annotation:
def __init__(self, name, value, keep):
self.name = name
self.value = value
self.keep = keep
class _CopyFile:
def __init__(self, src, dest, abssrc, absdest):
self.src = src
self.dest = dest
self.abs_src = abssrc
self.abs_dest = absdest
def _Copy(self):
src = self.abs_src
dest = self.abs_dest
# copy file if it does not exist or is out of date
if not os.path.exists(dest) or not filecmp.cmp(src, dest):
try:
# remove existing file first, since it might be read-only
if os.path.exists(dest):
os.remove(dest)
else:
dest_dir = os.path.dirname(dest)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
shutil.copy(src, dest)
# make the file read-only
mode = os.stat(dest)[stat.ST_MODE]
mode = mode & ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
os.chmod(dest, mode)
except IOError:
_error('Cannot copy file %s to %s', src, dest)
class _LinkFile:
def __init__(self, src, dest, abssrc, absdest):
self.src = src
self.dest = dest
self.abs_src = abssrc
self.abs_dest = absdest
def _Link(self):
src = self.abs_src
dest = self.abs_dest
# link file if it does not exist or is out of date
if not os.path.islink(dest) or os.readlink(dest) != src:
try:
# remove existing file first, since it might be read-only
if os.path.exists(dest):
os.remove(dest)
else:
dest_dir = os.path.dirname(dest)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
os.symlink(src, dest)
except IOError:
_error('Cannot link file %s to %s', src, dest)
class RemoteSpec(object):
def __init__(self,
name,
url = None,
review = None,
revision = None):
self.name = name
self.url = url
self.review = review
self.revision = revision
class RepoHook(object):
"""A RepoHook contains information about a script to run as a hook.
Hooks are used to run a python script before running an upload (for instance,
to run presubmit checks). Eventually, we may have hooks for other actions.
This shouldn't be confused with files in the 'repo/hooks' directory. Those
files are copied into each '.git/hooks' folder for each project. Repo-level
hooks are associated instead with repo actions.
Hooks are always python. When a hook is run, we will load the hook into the
interpreter and execute its main() function.
"""
def __init__(self,
hook_type,
hooks_project,
topdir,
abort_if_user_denies=False):
"""RepoHook constructor.
Params:
hook_type: A string representing the type of hook. This is also used
to figure out the name of the file containing the hook. For
example: 'pre-upload'.
hooks_project: The project containing the repo hooks. If you have a
manifest, this is manifest.repo_hooks_project. OK if this is None,
which will make the hook a no-op.
topdir: Repo's top directory (the one containing the .repo directory).
Scripts will run with CWD as this directory. If you have a manifest,
this is manifest.topdir
abort_if_user_denies: If True, we'll throw a HookError() if the user
doesn't allow us to run the hook.
"""
self._hook_type = hook_type
self._hooks_project = hooks_project
self._topdir = topdir
self._abort_if_user_denies = abort_if_user_denies
# Store the full path to the script for convenience.
if self._hooks_project:
self._script_fullpath = os.path.join(self._hooks_project.worktree,
self._hook_type + '.py')
else:
self._script_fullpath = None
def _GetHash(self):
"""Return a hash of the contents of the hooks directory.
We'll just use git to do this. This hash has the property that if anything
changes in the directory we will return a different has.
SECURITY CONSIDERATION:
This hash only represents the contents of files in the hook directory, not
any other files imported or called by hooks. Changes to imported files
can change the script behavior without affecting the hash.
Returns:
A string representing the hash. This will always be ASCII so that it can
be printed to the user easily.
"""
assert self._hooks_project, "Must have hooks to calculate their hash."
# We will use the work_git object rather than just calling GetRevisionId().
# That gives us a hash of the latest checked in version of the files that
# the user will actually be executing. Specifically, GetRevisionId()
# doesn't appear to change even if a user checks out a different version
# of the hooks repo (via git checkout) nor if a user commits their own revs.
#
# NOTE: Local (non-committed) changes will not be factored into this hash.
# I think this is OK, since we're really only worried about warning the user
# about upstream changes.
return self._hooks_project.work_git.rev_parse('HEAD')
def _GetMustVerb(self):
"""Return 'must' if the hook is required; 'should' if not."""
if self._abort_if_user_denies:
return 'must'
else:
return 'should'
def _CheckForHookApproval(self):
"""Check to see whether this hook has been approved.
We'll look at the hash of all of the hooks. If this matches the hash that
the user last approved, we're done. If it doesn't, we'll ask the user
about approval.
Note that we ask permission for each individual hook even though we use
the hash of all hooks when detecting changes. We'd like the user to be
able to approve / deny each hook individually. We only use the hash of all
hooks because there is no other easy way to detect changes to local imports.
Returns:
True if this hook is approved to run; False otherwise.
Raises:
HookError: Raised if the user doesn't approve and abort_if_user_denies
was passed to the consturctor.
"""
hooks_config = self._hooks_project.config
git_approval_key = 'repo.hooks.%s.approvedhash' % self._hook_type
# Get the last hash that the user approved for this hook; may be None.
old_hash = hooks_config.GetString(git_approval_key)
# Get the current hash so we can tell if scripts changed since approval.
new_hash = self._GetHash()
if old_hash is not None:
# User previously approved hook and asked not to be prompted again.
if new_hash == old_hash:
# Approval matched. We're done.
return True
else:
# Give the user a reason why we're prompting, since they last told
# us to "never ask again".
prompt = 'WARNING: Scripts have changed since %s was allowed.\n\n' % (
self._hook_type)
else:
prompt = ''
# Prompt the user if we're not on a tty; on a tty we'll assume "no".
if sys.stdout.isatty():
prompt += ('Repo %s run the script:\n'
' %s\n'
'\n'
'Do you want to allow this script to run '
'(yes/yes-never-ask-again/NO)? ') % (
self._GetMustVerb(), self._script_fullpath)
response = input(prompt).lower()
print()
# User is doing a one-time approval.
if response in ('y', 'yes'):
return True
elif response == 'yes-never-ask-again':
hooks_config.SetString(git_approval_key, new_hash)
return True
# For anything else, we'll assume no approval.
if self._abort_if_user_denies:
raise HookError('You must allow the %s hook or use --no-verify.' %
self._hook_type)
return False
def _ExecuteHook(self, **kwargs):
"""Actually execute the given hook.
This will run the hook's 'main' function in our python interpreter.
Args:
kwargs: Keyword arguments to pass to the hook. These are often specific
to the hook type. For instance, pre-upload hooks will contain
a project_list.
"""
# Keep sys.path and CWD stashed away so that we can always restore them
# upon function exit.
orig_path = os.getcwd()
orig_syspath = sys.path
try:
# Always run hooks with CWD as topdir.
os.chdir(self._topdir)
# Put the hook dir as the first item of sys.path so hooks can do
# relative imports. We want to replace the repo dir as [0] so
# hooks can't import repo files.
sys.path = [os.path.dirname(self._script_fullpath)] + sys.path[1:]
# Exec, storing global context in the context dict. We catch exceptions
# and convert to a HookError w/ just the failing traceback.
context = {}
try:
exec(compile(open(self._script_fullpath).read(),
self._script_fullpath, 'exec'), context)
except Exception:
raise HookError('%s\nFailed to import %s hook; see traceback above.' % (
traceback.format_exc(), self._hook_type))
# Running the script should have defined a main() function.
if 'main' not in context:
raise HookError('Missing main() in: "%s"' % self._script_fullpath)
# Add 'hook_should_take_kwargs' to the arguments to be passed to main.
# We don't actually want hooks to define their main with this argument--
# it's there to remind them that their hook should always take **kwargs.
# For instance, a pre-upload hook should be defined like:
# def main(project_list, **kwargs):
#
# This allows us to later expand the API without breaking old hooks.
kwargs = kwargs.copy()
kwargs['hook_should_take_kwargs'] = True
# Call the main function in the hook. If the hook should cause the
# build to fail, it will raise an Exception. We'll catch that convert
# to a HookError w/ just the failing traceback.
try:
context['main'](**kwargs)
except Exception:
raise HookError('%s\nFailed to run main() for %s hook; see traceback '
'above.' % (
traceback.format_exc(), self._hook_type))
finally:
# Restore sys.path and CWD.
sys.path = orig_syspath
os.chdir(orig_path)
def Run(self, user_allows_all_hooks, **kwargs):
"""Run the hook.
If the hook doesn't exist (because there is no hooks project or because
this particular hook is not enabled), this is a no-op.
Args:
user_allows_all_hooks: If True, we will never prompt about running the
hook--we'll just assume it's OK to run it.
kwargs: Keyword arguments to pass to the hook. These are often specific
to the hook type. For instance, pre-upload hooks will contain
a project_list.
Raises:
HookError: If there was a problem finding the hook or the user declined
to run a required hook (from _CheckForHookApproval).
"""
# No-op if there is no hooks project or if hook is disabled.
if ((not self._hooks_project) or
(self._hook_type not in self._hooks_project.enabled_repo_hooks)):
return
# Bail with a nice error if we can't find the hook.
if not os.path.isfile(self._script_fullpath):
raise HookError('Couldn\'t find repo hook: "%s"' % self._script_fullpath)
# Make sure the user is OK with running the hook.
if (not user_allows_all_hooks) and (not self._CheckForHookApproval()):
return
# Run the hook with the same version of python we're using.
self._ExecuteHook(**kwargs)
class Project(object):
def __init__(self,
manifest,
name,
remote,
gitdir,
objdir,
worktree,
relpath,
revisionExpr,
revisionId,
rebase = True,
groups = None,
sync_c = False,
sync_s = False,
clone_depth = None,
upstream = None,
parent = None,
is_derived = False,
dest_branch = None):
"""Init a Project object.
Args:
manifest: The XmlManifest object.
name: The `name` attribute of manifest.xml's project element.
remote: RemoteSpec object specifying its remote's properties.
gitdir: Absolute path of git directory.
objdir: Absolute path of directory to store git objects.
worktree: Absolute path of git working tree.
relpath: Relative path of git working tree to repo's top directory.
revisionExpr: The `revision` attribute of manifest.xml's project element.
revisionId: git commit id for checking out.
rebase: The `rebase` attribute of manifest.xml's project element.
groups: The `groups` attribute of manifest.xml's project element.
sync_c: The `sync-c` attribute of manifest.xml's project element.
sync_s: The `sync-s` attribute of manifest.xml's project element.
upstream: The `upstream` attribute of manifest.xml's project element.
parent: The parent Project object.
is_derived: False if the project was explicitly defined in the manifest;
True if the project is a discovered submodule.
dest_branch: The branch to which to push changes for review by default.
"""
self.manifest = manifest
self.name = name
self.remote = remote
self.gitdir = gitdir.replace('\\', '/')
self.objdir = objdir.replace('\\', '/')
if worktree:
self.worktree = worktree.replace('\\', '/')
else:
self.worktree = None
self.relpath = relpath
self.revisionExpr = revisionExpr
if revisionId is None \
and revisionExpr \
and IsId(revisionExpr):
self.revisionId = revisionExpr
else:
self.revisionId = revisionId
self.rebase = rebase
self.groups = groups
self.sync_c = sync_c
self.sync_s = sync_s
self.clone_depth = clone_depth
self.upstream = upstream
self.parent = parent
self.is_derived = is_derived
self.subprojects = []
self.snapshots = {}
self.copyfiles = []
self.linkfiles = []
self.annotations = []
self.config = GitConfig.ForRepository(
gitdir = self.gitdir,
defaults = self.manifest.globalConfig)
if self.worktree:
self.work_git = self._GitGetByExec(self, bare=False, gitdir=gitdir)
else:
self.work_git = None
self.bare_git = self._GitGetByExec(self, bare=True, gitdir=gitdir)
self.bare_ref = GitRefs(gitdir)
self.bare_objdir = self._GitGetByExec(self, bare=True, gitdir=objdir)
self.dest_branch = dest_branch
# This will be filled in if a project is later identified to be the
# project containing repo hooks.
self.enabled_repo_hooks = []
@property
def Derived(self):
return self.is_derived
@property
def Exists(self):
return os.path.isdir(self.gitdir)
@property
def CurrentBranch(self):
"""Obtain the name of the currently checked out branch.
The branch name omits the 'refs/heads/' prefix.
None is returned if the project is on a detached HEAD.
"""
b = self.work_git.GetHead()
if b.startswith(R_HEADS):
return b[len(R_HEADS):]
return None
def IsRebaseInProgress(self):
w = self.worktree
g = os.path.join(w, '.git')
return os.path.exists(os.path.join(g, 'rebase-apply')) \
or os.path.exists(os.path.join(g, 'rebase-merge')) \
or os.path.exists(os.path.join(w, '.dotest'))
def IsDirty(self, consider_untracked=True):
"""Is the working directory modified in some way?
"""
self.work_git.update_index('-q',
'--unmerged',
'--ignore-missing',
'--refresh')
if self.work_git.DiffZ('diff-index', '-M', '--cached', HEAD):
return True
if self.work_git.DiffZ('diff-files'):
return True
if consider_untracked and self.work_git.LsOthers():
return True
return False
_userident_name = None
_userident_email = None
@property
def UserName(self):
"""Obtain the user's personal name.
"""
if self._userident_name is None:
self._LoadUserIdentity()
return self._userident_name
@property
def UserEmail(self):
"""Obtain the user's email address. This is very likely
to be their Gerrit login.
"""
if self._userident_email is None:
self._LoadUserIdentity()
return self._userident_email
def _LoadUserIdentity(self):
u = self.bare_git.var('GIT_COMMITTER_IDENT')
m = re.compile("^(.*) <([^>]*)> ").match(u)
if m:
self._userident_name = m.group(1)
self._userident_email = m.group(2)
else:
self._userident_name = ''
self._userident_email = ''
def GetRemote(self, name):
"""Get the configuration for a single remote.
"""
return self.config.GetRemote(name)
def GetBranch(self, name):
"""Get the configuration for a single branch.
"""
return self.config.GetBranch(name)
def GetBranches(self):
"""Get all existing local branches.
"""
current = self.CurrentBranch
all_refs = self._allrefs
heads = {}
for name, ref_id in all_refs.items():
if name.startswith(R_HEADS):
name = name[len(R_HEADS):]
b = self.GetBranch(name)
b.current = name == current
b.published = None
b.revision = ref_id
heads[name] = b
for name, ref_id in all_refs.items():
if name.startswith(R_PUB):
name = name[len(R_PUB):]
b = heads.get(name)
if b:
b.published = ref_id
return heads
def MatchesGroups(self, manifest_groups):
"""Returns true if the manifest groups specified at init should cause
this project to be synced.
Prefixing a manifest group with "-" inverts the meaning of a group.
All projects are implicitly labelled with "all".
labels are resolved in order. In the example case of
project_groups: "all,group1,group2"
manifest_groups: "-group1,group2"
the project will be matched.
The special manifest group "default" will match any project that
does not have the special project group "notdefault"
"""
expanded_manifest_groups = manifest_groups or ['default']
expanded_project_groups = ['all'] + (self.groups or [])
if not 'notdefault' in expanded_project_groups:
expanded_project_groups += ['default']
matched = False
for group in expanded_manifest_groups:
if group.startswith('-') and group[1:] in expanded_project_groups:
matched = False
elif group in expanded_project_groups:
matched = True
return matched
## Status Display ##
def UncommitedFiles(self, get_all=True):
"""Returns a list of strings, uncommitted files in the git tree.
Args:
get_all: a boolean, if True - get information about all different
uncommitted files. If False - return as soon as any kind of
uncommitted files is detected.
"""
details = []
self.work_git.update_index('-q',
'--unmerged',
'--ignore-missing',
'--refresh')
if self.IsRebaseInProgress():
details.append("rebase in progress")
if not get_all:
return details
changes = self.work_git.DiffZ('diff-index', '--cached', HEAD).keys()
if changes:
details.extend(changes)
if not get_all:
return details
changes = self.work_git.DiffZ('diff-files').keys()
if changes:
details.extend(changes)
if not get_all:
return details
changes = self.work_git.LsOthers()
if changes:
details.extend(changes)
return details
def HasChanges(self):
"""Returns true if there are uncommitted changes.
"""
if self.UncommitedFiles(get_all=False):
return True
else:
return False
def PrintWorkTreeStatus(self, output_redir=None):
"""Prints the status of the repository to stdout.
Args:
output: If specified, redirect the output to this object.
"""
if not os.path.isdir(self.worktree):
if output_redir == None:
output_redir = sys.stdout
print(file=output_redir)
print('project %s/' % self.relpath, file=output_redir)
print(' missing (run "repo sync")', file=output_redir)
return
self.work_git.update_index('-q',
'--unmerged',
'--ignore-missing',
'--refresh')
rb = self.IsRebaseInProgress()
di = self.work_git.DiffZ('diff-index', '-M', '--cached', HEAD)
df = self.work_git.DiffZ('diff-files')
do = self.work_git.LsOthers()
if not rb and not di and not df and not do and not self.CurrentBranch:
return 'CLEAN'
out = StatusColoring(self.config)
if not output_redir == None:
out.redirect(output_redir)
out.project('project %-40s', self.relpath + '/')
branch = self.CurrentBranch
if branch is None:
out.nobranch('(*** NO BRANCH ***)')
else:
out.branch('branch %s', branch)
out.nl()
if rb:
out.important('prior sync failed; rebase still in progress')
out.nl()
paths = list()
paths.extend(di.keys())
paths.extend(df.keys())
paths.extend(do)
for p in sorted(set(paths)):
try:
i = di[p]
except KeyError:
i = None
try:
f = df[p]
except KeyError:
f = None
if i:
i_status = i.status.upper()
else:
i_status = '-'
if f:
f_status = f.status.lower()
else:
f_status = '-'
if i and i.src_path:
line = ' %s%s\t%s => %s (%s%%)' % (i_status, f_status,
i.src_path, p, i.level)
else:
line = ' %s%s\t%s' % (i_status, f_status, p)
if i and not f:
out.added('%s', line)
elif (i and f) or (not i and f):
out.changed('%s', line)
elif not i and not f:
out.untracked('%s', line)
else:
out.write('%s', line)
out.nl()
return 'DIRTY'
def PrintWorkTreeDiff(self, absolute_paths=False):
"""Prints the status of the repository to stdout.
"""
out = DiffColoring(self.config)
cmd = ['diff']
if out.is_on:
cmd.append('--color')
cmd.append(HEAD)
if absolute_paths:
cmd.append('--src-prefix=a/%s/' % self.relpath)
cmd.append('--dst-prefix=b/%s/' % self.relpath)
cmd.append('--')
p = GitCommand(self,
cmd,
capture_stdout = True,
capture_stderr = True)
has_diff = False
for line in p.process.stdout:
if not has_diff:
out.nl()
out.project('project %s/' % self.relpath)
out.nl()
has_diff = True
print(line[:-1])
p.Wait()
## Publish / Upload ##
def WasPublished(self, branch, all_refs=None):
"""Was the branch published (uploaded) for code review?
If so, returns the SHA-1 hash of the last published
state for the branch.
"""
key = R_PUB + branch
if all_refs is None:
try:
return self.bare_git.rev_parse(key)
except GitError:
return None
else:
try:
return all_refs[key]
except KeyError:
return None
def CleanPublishedCache(self, all_refs=None):
"""Prunes any stale published refs.
"""
if all_refs is None:
all_refs = self._allrefs
heads = set()
canrm = {}
for name, ref_id in all_refs.items():
if name.startswith(R_HEADS):
heads.add(name)
elif name.startswith(R_PUB):
canrm[name] = ref_id
for name, ref_id in canrm.items():
n = name[len(R_PUB):]
if R_HEADS + n not in heads:
self.bare_git.DeleteRef(name, ref_id)
def GetUploadableBranches(self, selected_branch=None):
"""List any branches which can be uploaded for review.
"""
heads = {}
pubed = {}
for name, ref_id in self._allrefs.items():
if name.startswith(R_HEADS):
heads[name[len(R_HEADS):]] = ref_id
elif name.startswith(R_PUB):
pubed[name[len(R_PUB):]] = ref_id
ready = []
for branch, ref_id in heads.items():
if branch in pubed and pubed[branch] == ref_id:
continue
if selected_branch and branch != selected_branch:
continue
rb = self.GetUploadableBranch(branch)
if rb:
ready.append(rb)
return ready
def GetUploadableBranch(self, branch_name):
"""Get a single uploadable branch, or None.
"""
branch = self.GetBranch(branch_name)
base = branch.LocalMerge
if branch.LocalMerge:
rb = ReviewableBranch(self, branch, base)
if rb.commits:
return rb
return None
def UploadForReview(self, branch=None,
people=([],[]),
auto_topic=False,
draft=False,
dest_branch=None):
"""Uploads the named branch for code review.
"""
if branch is None:
branch = self.CurrentBranch
if branch is None:
raise GitError('not currently on a branch')
branch = self.GetBranch(branch)
if not branch.LocalMerge:
raise GitError('branch %s does not track a remote' % branch.name)
if not branch.remote.review:
raise GitError('remote %s has no review url' % branch.remote.name)
if dest_branch is None:
dest_branch = self.dest_branch
if dest_branch is None:
dest_branch = branch.merge
if not dest_branch.startswith(R_HEADS):
dest_branch = R_HEADS + dest_branch
if not branch.remote.projectname:
branch.remote.projectname = self.name
branch.remote.Save()
url = branch.remote.ReviewUrl(self.UserEmail)
if url is None:
raise UploadError('review not configured')
cmd = ['push']
if url.startswith('ssh://'):
rp = ['gerrit receive-pack']
for e in people[0]:
rp.append('--reviewer=%s' % sq(e))
for e in people[1]:
rp.append('--cc=%s' % sq(e))
cmd.append('--receive-pack=%s' % " ".join(rp))
cmd.append(url)
if dest_branch.startswith(R_HEADS):
dest_branch = dest_branch[len(R_HEADS):]
upload_type = 'for'
if draft:
upload_type = 'drafts'
ref_spec = '%s:refs/%s/%s' % (R_HEADS + branch.name, upload_type,
dest_branch)
if auto_topic:
ref_spec = ref_spec + '/' + branch.name
if not url.startswith('ssh://'):
rp = ['r=%s' % p for p in people[0]] + \
['cc=%s' % p for p in people[1]]
if rp:
ref_spec = ref_spec + '%' + ','.join(rp)
cmd.append(ref_spec)
if GitCommand(self, cmd, bare = True).Wait() != 0:
raise UploadError('Upload failed')
msg = "posted to %s for %s" % (branch.remote.review, dest_branch)
self.bare_git.UpdateRef(R_PUB + branch.name,
R_HEADS + branch.name,
message = msg)
## Sync ##
def _ExtractArchive(self, tarpath, path=None):
"""Extract the given tar on its current location
Args:
- tarpath: The path to the actual tar file
"""
try:
with tarfile.open(tarpath, 'r') as tar:
tar.extractall(path=path)
return True
except (IOError, tarfile.TarError) as e:
print("error: Cannot extract archive %s: "
"%s" % (tarpath, str(e)), file=sys.stderr)
return False
def Sync_NetworkHalf(self,
quiet=False,
is_new=None,
current_branch_only=False,
clone_bundle=True,
no_tags=False,
archive=False):
"""Perform only the network IO portion of the sync process.
Local working directory/branch state is not affected.
"""
if archive and not isinstance(self, MetaProject):
if self.remote.url.startswith(('http://', 'https://')):
print("error: %s: Cannot fetch archives from http/https "
"remotes." % self.name, file=sys.stderr)
return False
name = self.relpath.replace('\\', '/')
name = name.replace('/', '_')
tarpath = '%s.tar' % name
topdir = self.manifest.topdir
try:
self._FetchArchive(tarpath, cwd=topdir)
except GitError as e:
print('error: %s' % str(e), file=sys.stderr)
return False
# From now on, we only need absolute tarpath
tarpath = os.path.join(topdir, tarpath)
if not self._ExtractArchive(tarpath, path=topdir):
return False
try:
os.remove(tarpath)
except OSError as e:
print("warn: Cannot remove archive %s: "
"%s" % (tarpath, str(e)), file=sys.stderr)
self._CopyAndLinkFiles()
return True
if is_new is None:
is_new = not self.Exists
if is_new:
self._InitGitDir()
else:
self._UpdateHooks()
self._InitRemote()
if is_new:
alt = os.path.join(self.gitdir, 'objects/info/alternates')
try:
fd = open(alt, 'rb')
try:
alt_dir = fd.readline().rstrip()
finally:
fd.close()
except IOError:
alt_dir = None
else:
alt_dir = None
if clone_bundle \
and alt_dir is None \
and self._ApplyCloneBundle(initial=is_new, quiet=quiet):
is_new = False
if not current_branch_only:
if self.sync_c:
current_branch_only = True
elif not self.manifest._loaded:
# Manifest cannot check defaults until it syncs.
current_branch_only = False
elif self.manifest.default.sync_c:
current_branch_only = True
has_sha1 = ID_RE.match(self.revisionExpr) and self._CheckForSha1()
if (not has_sha1 #Need to fetch since we don't already have this revision
and not self._RemoteFetch(initial=is_new, quiet=quiet, alt_dir=alt_dir,
current_branch_only=current_branch_only,
no_tags=no_tags)):
return False
if self.worktree:
self._InitMRef()
else:
self._InitMirrorHead()
try:
os.remove(os.path.join(self.gitdir, 'FETCH_HEAD'))
except OSError:
pass
return True
def PostRepoUpgrade(self):
self._InitHooks()
def _CopyAndLinkFiles(self):
for copyfile in self.copyfiles:
copyfile._Copy()
for linkfile in self.linkfiles:
linkfile._Link()
def GetCommitRevisionId(self):
"""Get revisionId of a commit.
Use this method instead of GetRevisionId to get the id of the commit rather
than the id of the current git object (for example, a tag)
"""
if not self.revisionExpr.startswith(R_TAGS):
return self.GetRevisionId(self._allrefs)
try:
return self.bare_git.rev_list(self.revisionExpr, '-1')[0]
except GitError:
raise ManifestInvalidRevisionError(
'revision %s in %s not found' % (self.revisionExpr,
self.name))
def GetRevisionId(self, all_refs=None):
if self.revisionId:
return self.revisionId
rem = self.GetRemote(self.remote.name)
rev = rem.ToLocal(self.revisionExpr)
if all_refs is not None and rev in all_refs:
return all_refs[rev]
try:
return self.bare_git.rev_parse('--verify', '%s^0' % rev)
except GitError:
raise ManifestInvalidRevisionError(
'revision %s in %s not found' % (self.revisionExpr,
self.name))
def Sync_LocalHalf(self, syncbuf):
"""Perform only the local IO portion of the sync process.
Network access is not required.
"""
self._InitWorkTree()
all_refs = self.bare_ref.all
self.CleanPublishedCache(all_refs)
revid = self.GetRevisionId(all_refs)
def _doff():
self._FastForward(revid)
self._CopyAndLinkFiles()
head = self.work_git.GetHead()
if head.startswith(R_HEADS):
branch = head[len(R_HEADS):]
try:
head = all_refs[head]
except KeyError:
head = None
else:
branch = None
if branch is None or syncbuf.detach_head:
# Currently on a detached HEAD. The user is assumed to
# not have any local modifications worth worrying about.
#
if self.IsRebaseInProgress():
syncbuf.fail(self, _PriorSyncFailedError())
return
if head == revid:
# No changes; don't do anything further.
# Except if the head needs to be detached
#
if not syncbuf.detach_head:
return
else:
lost = self._revlist(not_rev(revid), HEAD)
if lost:
syncbuf.info(self, "discarding %d commits", len(lost))
try:
self._Checkout(revid, quiet=True)
except GitError as e:
syncbuf.fail(self, e)
return
self._CopyAndLinkFiles()
return
if head == revid:
# No changes; don't do anything further.
#
return
branch = self.GetBranch(branch)
if not branch.LocalMerge:
# The current branch has no tracking configuration.
# Jump off it to a detached HEAD.
#
syncbuf.info(self,
"leaving %s; does not track upstream",
branch.name)
try:
self._Checkout(revid, quiet=True)
except GitError as e:
syncbuf.fail(self, e)
return
self._CopyAndLinkFiles()
return
upstream_gain = self._revlist(not_rev(HEAD), revid)
pub = self.WasPublished(branch.name, all_refs)
if pub:
not_merged = self._revlist(not_rev(revid), pub)
if not_merged:
if upstream_gain:
# The user has published this branch and some of those
# commits are not yet merged upstream. We do not want
# to rewrite the published commits so we punt.
#
syncbuf.fail(self,
"branch %s is published (but not merged) and is now %d commits behind"
% (branch.name, len(upstream_gain)))
return
elif pub == head:
# All published commits are merged, and thus we are a
# strict subset. We can fast-forward safely.
#
syncbuf.later1(self, _doff)
return
# Examine the local commits not in the remote. Find the
# last one attributed to this user, if any.
#
local_changes = self._revlist(not_rev(revid), HEAD, format='%H %ce')
last_mine = None
cnt_mine = 0
for commit in local_changes:
commit_id, committer_email = commit.decode('utf-8').split(' ', 1)
if committer_email == self.UserEmail:
last_mine = commit_id
cnt_mine += 1
if not upstream_gain and cnt_mine == len(local_changes):
return
if self.IsDirty(consider_untracked=False):
syncbuf.fail(self, _DirtyError())
return
# If the upstream switched on us, warn the user.
#
if branch.merge != self.revisionExpr:
if branch.merge and self.revisionExpr:
syncbuf.info(self,
'manifest switched %s...%s',
branch.merge,
self.revisionExpr)
elif branch.merge:
syncbuf.info(self,
'manifest no longer tracks %s',
branch.merge)
if cnt_mine < len(local_changes):
# Upstream rebased. Not everything in HEAD
# was created by this user.
#
syncbuf.info(self,
"discarding %d commits removed from upstream",
len(local_changes) - cnt_mine)
branch.remote = self.GetRemote(self.remote.name)
if not ID_RE.match(self.revisionExpr):
# in case of manifest sync the revisionExpr might be a SHA1
branch.merge = self.revisionExpr
branch.Save()
if cnt_mine > 0 and self.rebase:
def _dorebase():
self._Rebase(upstream = '%s^1' % last_mine, onto = revid)
self._CopyAndLinkFiles()
syncbuf.later2(self, _dorebase)
elif local_changes:
try:
self._ResetHard(revid)
self._CopyAndLinkFiles()
except GitError as e:
syncbuf.fail(self, e)
return
else:
syncbuf.later1(self, _doff)
def AddCopyFile(self, src, dest, absdest):
# dest should already be an absolute path, but src is project relative
# make src an absolute path
abssrc = os.path.join(self.worktree, src)
self.copyfiles.append(_CopyFile(src, dest, abssrc, absdest))
def AddLinkFile(self, src, dest, absdest):
# dest should already be an absolute path, but src is project relative
# make src an absolute path
abssrc = os.path.join(self.worktree, src)
self.linkfiles.append(_LinkFile(src, dest, abssrc, absdest))
def AddAnnotation(self, name, value, keep):
self.annotations.append(_Annotation(name, value, keep))
def DownloadPatchSet(self, change_id, patch_id):
"""Download a single patch set of a single change to FETCH_HEAD.
"""
remote = self.GetRemote(self.remote.name)
cmd = ['fetch', remote.name]
cmd.append('refs/changes/%2.2d/%d/%d' \
% (change_id % 100, change_id, patch_id))
if GitCommand(self, cmd, bare=True).Wait() != 0:
return None
return DownloadedChange(self,
self.GetRevisionId(),
change_id,
patch_id,
self.bare_git.rev_parse('FETCH_HEAD'))
## Branch Management ##
def StartBranch(self, name):
"""Create a new branch off the manifest's revision.
"""
head = self.work_git.GetHead()
if head == (R_HEADS + name):
return True
all_refs = self.bare_ref.all
if (R_HEADS + name) in all_refs:
return GitCommand(self,
['checkout', name, '--'],
capture_stdout = True,
capture_stderr = True).Wait() == 0
branch = self.GetBranch(name)
branch.remote = self.GetRemote(self.remote.name)
branch.merge = self.revisionExpr
revid = self.GetRevisionId(all_refs)
if head.startswith(R_HEADS):
try:
head = all_refs[head]
except KeyError:
head = None
if revid and head and revid == head:
ref = os.path.join(self.gitdir, R_HEADS + name)
try:
os.makedirs(os.path.dirname(ref))
except OSError:
pass
_lwrite(ref, '%s\n' % revid)
_lwrite(os.path.join(self.worktree, '.git', HEAD),
'ref: %s%s\n' % (R_HEADS, name))
branch.Save()
return True
if GitCommand(self,
['checkout', '-b', branch.name, revid],
capture_stdout = True,
capture_stderr = True).Wait() == 0:
branch.Save()
return True
return False
def CheckoutBranch(self, name):
"""Checkout a local topic branch.
Args:
name: The name of the branch to checkout.
Returns:
True if the checkout succeeded; False if it didn't; None if the branch
didn't exist.
"""
rev = R_HEADS + name
head = self.work_git.GetHead()
if head == rev:
# Already on the branch
#
return True
all_refs = self.bare_ref.all
try:
revid = all_refs[rev]
except KeyError:
# Branch does not exist in this project
#
return None
if head.startswith(R_HEADS):
try:
head = all_refs[head]
except KeyError:
head = None
if head == revid:
# Same revision; just update HEAD to point to the new
# target branch, but otherwise take no other action.
#
_lwrite(os.path.join(self.worktree, '.git', HEAD),
'ref: %s%s\n' % (R_HEADS, name))
return True
return GitCommand(self,
['checkout', name, '--'],
capture_stdout = True,
capture_stderr = True).Wait() == 0
def AbandonBranch(self, name):
"""Destroy a local topic branch.
Args:
name: The name of the branch to abandon.
Returns:
True if the abandon succeeded; False if it didn't; None if the branch
didn't exist.
"""
rev = R_HEADS + name
all_refs = self.bare_ref.all
if rev not in all_refs:
# Doesn't exist
return None
head = self.work_git.GetHead()
if head == rev:
# We can't destroy the branch while we are sitting
# on it. Switch to a detached HEAD.
#
head = all_refs[head]
revid = self.GetRevisionId(all_refs)
if head == revid:
_lwrite(os.path.join(self.worktree, '.git', HEAD),
'%s\n' % revid)
else:
self._Checkout(revid, quiet=True)
return GitCommand(self,
['branch', '-D', name],
capture_stdout = True,
capture_stderr = True).Wait() == 0
def PruneHeads(self):
"""Prune any topic branches already merged into upstream.
"""
cb = self.CurrentBranch
kill = []
left = self._allrefs
for name in left.keys():
if name.startswith(R_HEADS):
name = name[len(R_HEADS):]
if cb is None or name != cb:
kill.append(name)
rev = self.GetRevisionId(left)
if cb is not None \
and not self._revlist(HEAD + '...' + rev) \
and not self.IsDirty(consider_untracked = False):
self.work_git.DetachHead(HEAD)
kill.append(cb)
if kill:
old = self.bare_git.GetHead()
if old is None:
old = 'refs/heads/please_never_use_this_as_a_branch_name'
try:
self.bare_git.DetachHead(rev)
b = ['branch', '-d']
b.extend(kill)
b = GitCommand(self, b, bare=True,
capture_stdout=True,
capture_stderr=True)
b.Wait()
finally:
self.bare_git.SetHead(old)
left = self._allrefs
for branch in kill:
if (R_HEADS + branch) not in left:
self.CleanPublishedCache()
break
if cb and cb not in kill:
kill.append(cb)
kill.sort()
kept = []
for branch in kill:
if (R_HEADS + branch) in left:
branch = self.GetBranch(branch)
base = branch.LocalMerge
if not base:
base = rev
kept.append(ReviewableBranch(self, branch, base))
return kept
## Submodule Management ##
def GetRegisteredSubprojects(self):
result = []
def rec(subprojects):
if not subprojects:
return
result.extend(subprojects)
for p in subprojects:
rec(p.subprojects)
rec(self.subprojects)
return result
def _GetSubmodules(self):
# Unfortunately we cannot call `git submodule status --recursive` here
# because the working tree might not exist yet, and it cannot be used
# without a working tree in its current implementation.
def get_submodules(gitdir, rev):
# Parse .gitmodules for submodule sub_paths and sub_urls
sub_paths, sub_urls = parse_gitmodules(gitdir, rev)
if not sub_paths:
return []
# Run `git ls-tree` to read SHAs of submodule object, which happen to be
# revision of submodule repository
sub_revs = git_ls_tree(gitdir, rev, sub_paths)
submodules = []
for sub_path, sub_url in zip(sub_paths, sub_urls):
try:
sub_rev = sub_revs[sub_path]
except KeyError:
# Ignore non-exist submodules
continue
submodules.append((sub_rev, sub_path, sub_url))
return submodules
re_path = re.compile(r'^submodule\.([^.]+)\.path=(.*)$')
re_url = re.compile(r'^submodule\.([^.]+)\.url=(.*)$')
def parse_gitmodules(gitdir, rev):
cmd = ['cat-file', 'blob', '%s:.gitmodules' % rev]
try:
p = GitCommand(None, cmd, capture_stdout = True, capture_stderr = True,
bare = True, gitdir = gitdir)
except GitError:
return [], []
if p.Wait() != 0:
return [], []
gitmodules_lines = []
fd, temp_gitmodules_path = tempfile.mkstemp()
try:
os.write(fd, p.stdout)
os.close(fd)
cmd = ['config', '--file', temp_gitmodules_path, '--list']
p = GitCommand(None, cmd, capture_stdout = True, capture_stderr = True,
bare = True, gitdir = gitdir)
if p.Wait() != 0:
return [], []
gitmodules_lines = p.stdout.split('\n')
except GitError:
return [], []
finally:
os.remove(temp_gitmodules_path)
names = set()
paths = {}
urls = {}
for line in gitmodules_lines:
if not line:
continue
m = re_path.match(line)
if m:
names.add(m.group(1))
paths[m.group(1)] = m.group(2)
continue
m = re_url.match(line)
if m:
names.add(m.group(1))
urls[m.group(1)] = m.group(2)
continue
names = sorted(names)
return ([paths.get(name, '') for name in names],
[urls.get(name, '') for name in names])
def git_ls_tree(gitdir, rev, paths):
cmd = ['ls-tree', rev, '--']
cmd.extend(paths)
try:
p = GitCommand(None, cmd, capture_stdout = True, capture_stderr = True,
bare = True, gitdir = gitdir)
except GitError:
return []
if p.Wait() != 0:
return []
objects = {}
for line in p.stdout.split('\n'):
if not line.strip():
continue
object_rev, object_path = line.split()[2:4]
objects[object_path] = object_rev
return objects
try:
rev = self.GetRevisionId()
except GitError:
return []
return get_submodules(self.gitdir, rev)
def GetDerivedSubprojects(self):
result = []
if not self.Exists:
# If git repo does not exist yet, querying its submodules will
# mess up its states; so return here.
return result
for rev, path, url in self._GetSubmodules():
name = self.manifest.GetSubprojectName(self, path)
relpath, worktree, gitdir, objdir = \
self.manifest.GetSubprojectPaths(self, name, path)
project = self.manifest.paths.get(relpath)
if project:
result.extend(project.GetDerivedSubprojects())
continue
remote = RemoteSpec(self.remote.name,
url = url,
review = self.remote.review,
revision = self.remote.revision)
subproject = Project(manifest = self.manifest,
name = name,
remote = remote,
gitdir = gitdir,
objdir = objdir,
worktree = worktree,
relpath = relpath,
revisionExpr = self.revisionExpr,
revisionId = rev,
rebase = self.rebase,
groups = self.groups,
sync_c = self.sync_c,
sync_s = self.sync_s,
parent = self,
is_derived = True)
result.append(subproject)
result.extend(subproject.GetDerivedSubprojects())
return result
## Direct Git Commands ##
def _CheckForSha1(self):
try:
# if revision (sha or tag) is not present then following function
# throws an error.
self.bare_git.rev_parse('--verify', '%s^0' % self.revisionExpr)
return True
except GitError:
# There is no such persistent revision. We have to fetch it.
return False
def _FetchArchive(self, tarpath, cwd=None):
cmd = ['archive', '-v', '-o', tarpath]
cmd.append('--remote=%s' % self.remote.url)
cmd.append('--prefix=%s/' % self.relpath)
cmd.append(self.revisionExpr)
command = GitCommand(self, cmd, cwd=cwd,
capture_stdout=True,
capture_stderr=True)
if command.Wait() != 0:
raise GitError('git archive %s: %s' % (self.name, command.stderr))
def _RemoteFetch(self, name=None,
current_branch_only=False,
initial=False,
quiet=False,
alt_dir=None,
no_tags=False):
is_sha1 = False
tag_name = None
depth = None
# The depth should not be used when fetching to a mirror because
# it will result in a shallow repository that cannot be cloned or
# fetched from.
if not self.manifest.IsMirror:
if self.clone_depth:
depth = self.clone_depth
else:
depth = self.manifest.manifestProject.config.GetString('repo.depth')
if depth:
current_branch_only = True
if ID_RE.match(self.revisionExpr) is not None:
is_sha1 = True
if current_branch_only:
if self.revisionExpr.startswith(R_TAGS):
# this is a tag and its sha1 value should never change
tag_name = self.revisionExpr[len(R_TAGS):]
if is_sha1 or tag_name is not None:
if self._CheckForSha1():
return True
if is_sha1 and (not self.upstream or ID_RE.match(self.upstream)):
current_branch_only = False
if not name:
name = self.remote.name
ssh_proxy = False
remote = self.GetRemote(name)
if remote.PreConnectFetch():
ssh_proxy = True
if initial:
if alt_dir and 'objects' == os.path.basename(alt_dir):
ref_dir = os.path.dirname(alt_dir)
packed_refs = os.path.join(self.gitdir, 'packed-refs')
remote = self.GetRemote(name)
all_refs = self.bare_ref.all
ids = set(all_refs.values())
tmp = set()
for r, ref_id in GitRefs(ref_dir).all.items():
if r not in all_refs:
if r.startswith(R_TAGS) or remote.WritesTo(r):
all_refs[r] = ref_id
ids.add(ref_id)
continue
if ref_id in ids:
continue
r = 'refs/_alt/%s' % ref_id
all_refs[r] = ref_id
ids.add(ref_id)
tmp.add(r)
tmp_packed = ''
old_packed = ''
for r in sorted(all_refs):
line = '%s %s\n' % (all_refs[r], r)
tmp_packed += line
if r not in tmp:
old_packed += line
_lwrite(packed_refs, tmp_packed)
else:
alt_dir = None
cmd = ['fetch']
# The --depth option only affects the initial fetch; after that we'll do
# full fetches of changes.
if depth and initial:
cmd.append('--depth=%s' % depth)
if quiet:
cmd.append('--quiet')
if not self.worktree:
cmd.append('--update-head-ok')
cmd.append(name)
# If using depth then we should not get all the tags since they may
# be outside of the depth.
if no_tags or depth:
cmd.append('--no-tags')
else:
cmd.append('--tags')
spec = []
if not current_branch_only:
# Fetch whole repo
spec.append(str((u'+refs/heads/*:') + remote.ToLocal('refs/heads/*')))
elif tag_name is not None:
spec.append('tag')
spec.append(tag_name)
branch = self.revisionExpr
if is_sha1:
branch = self.upstream
if branch is not None and branch.strip():
if not branch.startswith('refs/'):
branch = R_HEADS + branch
spec.append(str((u'+%s:' % branch) + remote.ToLocal(branch)))
cmd.extend(spec)
shallowfetch = self.config.GetString('repo.shallowfetch')
if shallowfetch and shallowfetch != ' '.join(spec):
GitCommand(self, ['fetch', '--unshallow', name] + shallowfetch.split(),
bare=True, ssh_proxy=ssh_proxy).Wait()
if depth:
self.config.SetString('repo.shallowfetch', ' '.join(spec))
else:
self.config.SetString('repo.shallowfetch', None)
ok = False
for _i in range(2):
ret = GitCommand(self, cmd, bare=True, ssh_proxy=ssh_proxy).Wait()
if ret == 0:
ok = True
break
elif current_branch_only and is_sha1 and ret == 128:
# Exit code 128 means "couldn't find the ref you asked for"; if we're in sha1
# mode, we just tried sync'ing from the upstream field; it doesn't exist, thus
# abort the optimization attempt and do a full sync.
break
time.sleep(random.randint(30, 45))
if initial:
if alt_dir:
if old_packed != '':
_lwrite(packed_refs, old_packed)
else:
os.remove(packed_refs)
self.bare_git.pack_refs('--all', '--prune')
if is_sha1 and current_branch_only and self.upstream:
# We just synced the upstream given branch; verify we
# got what we wanted, else trigger a second run of all
# refs.
if not self._CheckForSha1():
return self._RemoteFetch(name=name, current_branch_only=False,
initial=False, quiet=quiet, alt_dir=alt_dir)
return ok
def _ApplyCloneBundle(self, initial=False, quiet=False):
if initial and (self.manifest.manifestProject.config.GetString('repo.depth') or self.clone_depth):
return False
remote = self.GetRemote(self.remote.name)
bundle_url = remote.url + '/clone.bundle'
bundle_url = GitConfig.ForUser().UrlInsteadOf(bundle_url)
if GetSchemeFromUrl(bundle_url) not in (
'http', 'https', 'persistent-http', 'persistent-https'):
return False
bundle_dst = os.path.join(self.gitdir, 'clone.bundle')
bundle_tmp = os.path.join(self.gitdir, 'clone.bundle.tmp')
exist_dst = os.path.exists(bundle_dst)
exist_tmp = os.path.exists(bundle_tmp)
if not initial and not exist_dst and not exist_tmp:
return False
if not exist_dst:
exist_dst = self._FetchBundle(bundle_url, bundle_tmp, bundle_dst, quiet)
if not exist_dst:
return False
cmd = ['fetch']
if quiet:
cmd.append('--quiet')
if not self.worktree:
cmd.append('--update-head-ok')
cmd.append(bundle_dst)
for f in remote.fetch:
cmd.append(str(f))
cmd.append('refs/tags/*:refs/tags/*')
ok = GitCommand(self, cmd, bare=True).Wait() == 0
if os.path.exists(bundle_dst):
os.remove(bundle_dst)
if os.path.exists(bundle_tmp):
os.remove(bundle_tmp)
return ok
def _FetchBundle(self, srcUrl, tmpPath, dstPath, quiet):
if os.path.exists(dstPath):
os.remove(dstPath)
cmd = ['curl', '--fail', '--output', tmpPath, '--netrc', '--location']
if quiet:
cmd += ['--silent']
if os.path.exists(tmpPath):
size = os.stat(tmpPath).st_size
if size >= 1024:
cmd += ['--continue-at', '%d' % (size,)]
else:
os.remove(tmpPath)
if 'http_proxy' in os.environ and 'darwin' == sys.platform:
cmd += ['--proxy', os.environ['http_proxy']]
cookiefile = self._GetBundleCookieFile(srcUrl)
if cookiefile:
cmd += ['--cookie', cookiefile]
if srcUrl.startswith('persistent-'):
srcUrl = srcUrl[len('persistent-'):]
cmd += [srcUrl]
if IsTrace():
Trace('%s', ' '.join(cmd))
try:
proc = subprocess.Popen(cmd)
except OSError:
return False
curlret = proc.wait()
if curlret == 22:
# From curl man page:
# 22: HTTP page not retrieved. The requested url was not found or
# returned another error with the HTTP error code being 400 or above.
# This return code only appears if -f, --fail is used.
if not quiet:
print("Server does not provide clone.bundle; ignoring.",
file=sys.stderr)
return False
if os.path.exists(tmpPath):
if curlret == 0 and self._IsValidBundle(tmpPath):
os.rename(tmpPath, dstPath)
return True
else:
os.remove(tmpPath)
return False
else:
return False
def _IsValidBundle(self, path):
try:
with open(path) as f:
if f.read(16) == '# v2 git bundle\n':
return True
else:
print("Invalid clone.bundle file; ignoring.", file=sys.stderr)
return False
except OSError:
return False
def _GetBundleCookieFile(self, url):
if url.startswith('persistent-'):
try:
p = subprocess.Popen(
['git-remote-persistent-https', '-print_config', url],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.stdin.close() # Tell subprocess it's ok to close.
prefix = 'http.cookiefile='
cookiefile = None
for line in p.stdout:
line = line.strip()
if line.startswith(prefix):
cookiefile = line[len(prefix):]
break
if p.wait():
err_msg = p.stderr.read()
if ' -print_config' in err_msg:
pass # Persistent proxy doesn't support -print_config.
else:
print(err_msg, file=sys.stderr)
if cookiefile:
return cookiefile
except OSError as e:
if e.errno == errno.ENOENT:
pass # No persistent proxy.
raise
return GitConfig.ForUser().GetString('http.cookiefile')
def _Checkout(self, rev, quiet=False):
cmd = ['checkout']
if quiet:
cmd.append('-q')
cmd.append(rev)
cmd.append('--')
if GitCommand(self, cmd).Wait() != 0:
if self._allrefs:
raise GitError('%s checkout %s ' % (self.name, rev))
def _CherryPick(self, rev, quiet=False):
cmd = ['cherry-pick']
cmd.append(rev)
cmd.append('--')
if GitCommand(self, cmd).Wait() != 0:
if self._allrefs:
raise GitError('%s cherry-pick %s ' % (self.name, rev))
def _Revert(self, rev, quiet=False):
cmd = ['revert']
cmd.append('--no-edit')
cmd.append(rev)
cmd.append('--')
if GitCommand(self, cmd).Wait() != 0:
if self._allrefs:
raise GitError('%s revert %s ' % (self.name, rev))
def _ResetHard(self, rev, quiet=True):
cmd = ['reset', '--hard']
if quiet:
cmd.append('-q')
cmd.append(rev)
if GitCommand(self, cmd).Wait() != 0:
raise GitError('%s reset --hard %s ' % (self.name, rev))
def _Rebase(self, upstream, onto = None):
cmd = ['rebase']
if onto is not None:
cmd.extend(['--onto', onto])
cmd.append(upstream)
if GitCommand(self, cmd).Wait() != 0:
raise GitError('%s rebase %s ' % (self.name, upstream))
def _FastForward(self, head, ffonly=False):
cmd = ['merge', head]
if ffonly:
cmd.append("--ff-only")
if GitCommand(self, cmd).Wait() != 0:
raise GitError('%s merge %s ' % (self.name, head))
def _InitGitDir(self, mirror_git=None):
if not os.path.exists(self.gitdir):
# Initialize the bare repository, which contains all of the objects.
if not os.path.exists(self.objdir):
os.makedirs(self.objdir)
self.bare_objdir.init()
# If we have a separate directory to hold refs, initialize it as well.
if self.objdir != self.gitdir:
os.makedirs(self.gitdir)
self._ReferenceGitDir(self.objdir, self.gitdir, share_refs=False,
copy_all=True)
mp = self.manifest.manifestProject
ref_dir = mp.config.GetString('repo.reference') or ''
if ref_dir or mirror_git:
if not mirror_git:
mirror_git = os.path.join(ref_dir, self.name + '.git')
repo_git = os.path.join(ref_dir, '.repo', 'projects',
self.relpath + '.git')
if os.path.exists(mirror_git):
ref_dir = mirror_git
elif os.path.exists(repo_git):
ref_dir = repo_git
else:
ref_dir = None
if ref_dir:
_lwrite(os.path.join(self.gitdir, 'objects/info/alternates'),
os.path.join(ref_dir, 'objects') + '\n')
self._UpdateHooks()
m = self.manifest.manifestProject.config
for key in ['user.name', 'user.email']:
if m.Has(key, include_defaults = False):
self.config.SetString(key, m.GetString(key))
if self.manifest.IsMirror:
self.config.SetString('core.bare', 'true')
else:
self.config.SetString('core.bare', None)
def _UpdateHooks(self):
if os.path.exists(self.gitdir):
# Always recreate hooks since they can have been changed
# since the latest update.
hooks = self._gitdir_path('hooks')
try:
to_rm = os.listdir(hooks)
except OSError:
to_rm = []
for old_hook in to_rm:
os.remove(os.path.join(hooks, old_hook))
self._InitHooks()
def _InitHooks(self):
hooks = os.path.realpath(self._gitdir_path('hooks'))
if not os.path.exists(hooks):
os.makedirs(hooks)
for stock_hook in _ProjectHooks():
name = os.path.basename(stock_hook)
if name in ('commit-msg',) and not self.remote.review \
and not self is self.manifest.manifestProject:
# Don't install a Gerrit Code Review hook if this
# project does not appear to use it for reviews.
#
# Since the manifest project is one of those, but also
# managed through gerrit, it's excluded
continue
dst = os.path.join(hooks, name)
if os.path.islink(dst):
continue
if os.path.exists(dst):
if filecmp.cmp(stock_hook, dst, shallow=False):
os.remove(dst)
else:
_error("%s: Not replacing %s hook", self.relpath, name)
continue
try:
os.symlink(os.path.relpath(stock_hook, os.path.dirname(dst)), dst)
except OSError as e:
if e.errno == errno.EPERM:
raise GitError('filesystem must support symlinks')
else:
raise
def _InitRemote(self):
if self.remote.url:
remote = self.GetRemote(self.remote.name)
remote.url = self.remote.url
remote.review = self.remote.review
remote.projectname = self.name
if self.worktree:
remote.ResetFetch(mirror=False)
else:
remote.ResetFetch(mirror=True)
remote.Save()
def _InitMRef(self):
if self.manifest.branch:
self._InitAnyMRef(R_M + self.manifest.branch)
def _InitMirrorHead(self):
self._InitAnyMRef(HEAD)
def _InitAnyMRef(self, ref):
cur = self.bare_ref.symref(ref)
if self.revisionId:
if cur != '' or self.bare_ref.get(ref) != self.revisionId:
msg = 'manifest set to %s' % self.revisionId
dst = self.revisionId + '^0'
self.bare_git.UpdateRef(ref, dst, message = msg, detach = True)
else:
remote = self.GetRemote(self.remote.name)
dst = remote.ToLocal(self.revisionExpr)
if cur != dst:
msg = 'manifest set to %s' % self.revisionExpr
self.bare_git.symbolic_ref('-m', msg, ref, dst)
def _ReferenceGitDir(self, gitdir, dotgit, share_refs, copy_all):
"""Update |dotgit| to reference |gitdir|, using symlinks where possible.
Args:
gitdir: The bare git repository. Must already be initialized.
dotgit: The repository you would like to initialize.
share_refs: If true, |dotgit| will store its refs under |gitdir|.
Only one work tree can store refs under a given |gitdir|.
copy_all: If true, copy all remaining files from |gitdir| -> |dotgit|.
This saves you the effort of initializing |dotgit| yourself.
"""
# These objects can be shared between several working trees.
symlink_files = ['description', 'info']
symlink_dirs = ['hooks', 'objects', 'rr-cache', 'svn']
if share_refs:
# These objects can only be used by a single working tree.
symlink_files += ['config', 'packed-refs', 'shallow']
symlink_dirs += ['logs', 'refs']
to_symlink = symlink_files + symlink_dirs
to_copy = []
if copy_all:
to_copy = os.listdir(gitdir)
for name in set(to_copy).union(to_symlink):
try:
src = os.path.realpath(os.path.join(gitdir, name))
dst = os.path.realpath(os.path.join(dotgit, name))
if os.path.lexists(dst) and not os.path.islink(dst):
raise GitError('cannot overwrite a local work tree')
# If the source dir doesn't exist, create an empty dir.
if name in symlink_dirs and not os.path.lexists(src):
os.makedirs(src)
# If the source file doesn't exist, ensure the destination
# file doesn't either.
if name in symlink_files and not os.path.lexists(src):
try:
os.remove(dst)
except OSError:
pass
if name in to_symlink:
os.symlink(os.path.relpath(src, os.path.dirname(dst)), dst)
elif copy_all and not os.path.islink(dst):
if os.path.isdir(src):
shutil.copytree(src, dst)
elif os.path.isfile(src):
shutil.copy(src, dst)
except OSError as e:
if e.errno == errno.EPERM:
raise GitError('filesystem must support symlinks')
else:
raise
def _InitWorkTree(self):
dotgit = os.path.join(self.worktree, '.git')
if not os.path.exists(dotgit):
os.makedirs(dotgit)
self._ReferenceGitDir(self.gitdir, dotgit, share_refs=True,
copy_all=False)
_lwrite(os.path.join(dotgit, HEAD), '%s\n' % self.GetRevisionId())
cmd = ['read-tree', '--reset', '-u']
cmd.append('-v')
cmd.append(HEAD)
if GitCommand(self, cmd).Wait() != 0:
raise GitError("cannot initialize work tree")
self._CopyAndLinkFiles()
def _gitdir_path(self, path):
return os.path.realpath(os.path.join(self.gitdir, path))
def _revlist(self, *args, **kw):
a = []
a.extend(args)
a.append('--')
return self.work_git.rev_list(*a, **kw)
@property
def _allrefs(self):
return self.bare_ref.all
def _getLogs(self, rev1, rev2, oneline=False, color=True):
"""Get logs between two revisions of this project."""
comp = '..'
if rev1:
revs = [rev1]
if rev2:
revs.extend([comp, rev2])
cmd = ['log', ''.join(revs)]
out = DiffColoring(self.config)
if out.is_on and color:
cmd.append('--color')
if oneline:
cmd.append('--oneline')
try:
log = GitCommand(self, cmd, capture_stdout=True, capture_stderr=True)
if log.Wait() == 0:
return log.stdout
except GitError:
# worktree may not exist if groups changed for example. In that case,
# try in gitdir instead.
if not os.path.exists(self.worktree):
return self.bare_git.log(*cmd[1:])
else:
raise
return None
def getAddedAndRemovedLogs(self, toProject, oneline=False, color=True):
"""Get the list of logs from this revision to given revisionId"""
logs = {}
selfId = self.GetRevisionId(self._allrefs)
toId = toProject.GetRevisionId(toProject._allrefs)
logs['added'] = self._getLogs(selfId, toId, oneline=oneline, color=color)
logs['removed'] = self._getLogs(toId, selfId, oneline=oneline, color=color)
return logs
class _GitGetByExec(object):
def __init__(self, project, bare, gitdir):
self._project = project
self._bare = bare
self._gitdir = gitdir
def LsOthers(self):
p = GitCommand(self._project,
['ls-files',
'-z',
'--others',
'--exclude-standard'],
bare = False,
gitdir=self._gitdir,
capture_stdout = True,
capture_stderr = True)
if p.Wait() == 0:
out = p.stdout
if out:
return out[:-1].split('\0') # pylint: disable=W1401
# Backslash is not anomalous
return []
def DiffZ(self, name, *args):
cmd = [name]
cmd.append('-z')
cmd.extend(args)
p = GitCommand(self._project,
cmd,
gitdir=self._gitdir,
bare = False,
capture_stdout = True,
capture_stderr = True)
try:
out = p.process.stdout.read()
r = {}
if out:
out = iter(out[:-1].split('\0')) # pylint: disable=W1401
while out:
try:
info = next(out)
path = next(out)
except StopIteration:
break
class _Info(object):
def __init__(self, path, omode, nmode, oid, nid, state):
self.path = path
self.src_path = None
self.old_mode = omode
self.new_mode = nmode
self.old_id = oid
self.new_id = nid
if len(state) == 1:
self.status = state
self.level = None
else:
self.status = state[:1]
self.level = state[1:]
while self.level.startswith('0'):
self.level = self.level[1:]
info = info[1:].split(' ')
info = _Info(path, *info)
if info.status in ('R', 'C'):
info.src_path = info.path
info.path = next(out)
r[info.path] = info
return r
finally:
p.Wait()
def GetHead(self):
if self._bare:
path = os.path.join(self._project.gitdir, HEAD)
else:
path = os.path.join(self._project.worktree, '.git', HEAD)
try:
fd = open(path, 'rb')
except IOError as e:
raise NoManifestException(path, str(e))
try:
line = fd.read()
finally:
fd.close()
try:
line = line.decode()
except AttributeError:
pass
if line.startswith('ref: '):
return line[5:-1]
return line[:-1]
def SetHead(self, ref, message=None):
cmdv = []
if message is not None:
cmdv.extend(['-m', message])
cmdv.append(HEAD)
cmdv.append(ref)
self.symbolic_ref(*cmdv)
def DetachHead(self, new, message=None):
cmdv = ['--no-deref']
if message is not None:
cmdv.extend(['-m', message])
cmdv.append(HEAD)
cmdv.append(new)
self.update_ref(*cmdv)
def UpdateRef(self, name, new, old=None,
message=None,
detach=False):
cmdv = []
if message is not None:
cmdv.extend(['-m', message])
if detach:
cmdv.append('--no-deref')
cmdv.append(name)
cmdv.append(new)
if old is not None:
cmdv.append(old)
self.update_ref(*cmdv)
def DeleteRef(self, name, old=None):
if not old:
old = self.rev_parse(name)
self.update_ref('-d', name, old)
self._project.bare_ref.deleted(name)
def rev_list(self, *args, **kw):
if 'format' in kw:
cmdv = ['log', '--pretty=format:%s' % kw['format']]
else:
cmdv = ['rev-list']
cmdv.extend(args)
p = GitCommand(self._project,
cmdv,
bare = self._bare,
gitdir=self._gitdir,
capture_stdout = True,
capture_stderr = True)
r = []
for line in p.process.stdout:
if line[-1] == '\n':
line = line[:-1]
r.append(line)
if p.Wait() != 0:
raise GitError('%s rev-list %s: %s' % (
self._project.name,
str(args),
p.stderr))
return r
def __getattr__(self, name):
"""Allow arbitrary git commands using pythonic syntax.
This allows you to do things like:
git_obj.rev_parse('HEAD')
Since we don't have a 'rev_parse' method defined, the __getattr__ will
run. We'll replace the '_' with a '-' and try to run a git command.
Any other positional arguments will be passed to the git command, and the
following keyword arguments are supported:
config: An optional dict of git config options to be passed with '-c'.
Args:
name: The name of the git command to call. Any '_' characters will
be replaced with '-'.
Returns:
A callable object that will try to call git with the named command.
"""
name = name.replace('_', '-')
def runner(*args, **kwargs):
cmdv = []
config = kwargs.pop('config', None)
for k in kwargs:
raise TypeError('%s() got an unexpected keyword argument %r'
% (name, k))
if config is not None:
if not git_require((1, 7, 2)):
raise ValueError('cannot set config on command line for %s()'
% name)
for k, v in config.items():
cmdv.append('-c')
cmdv.append('%s=%s' % (k, v))
cmdv.append(name)
cmdv.extend(args)
p = GitCommand(self._project,
cmdv,
bare = self._bare,
gitdir=self._gitdir,
capture_stdout = True,
capture_stderr = True)
if p.Wait() != 0:
raise GitError('%s %s: %s' % (
self._project.name,
name,
p.stderr))
r = p.stdout
try:
r = r.decode('utf-8')
except AttributeError:
pass
if r.endswith('\n') and r.index('\n') == len(r) - 1:
return r[:-1]
return r
return runner
class _PriorSyncFailedError(Exception):
def __str__(self):
return 'prior sync failed; rebase still in progress'
class _DirtyError(Exception):
def __str__(self):
return 'contains uncommitted changes'
class _InfoMessage(object):
def __init__(self, project, text):
self.project = project
self.text = text
def Print(self, syncbuf):
syncbuf.out.info('%s/: %s', self.project.relpath, self.text)
syncbuf.out.nl()
class _Failure(object):
def __init__(self, project, why):
self.project = project
self.why = why
def Print(self, syncbuf):
syncbuf.out.fail('error: %s/: %s',
self.project.relpath,
str(self.why))
syncbuf.out.nl()
class _Later(object):
def __init__(self, project, action):
self.project = project
self.action = action
def Run(self, syncbuf):
out = syncbuf.out
out.project('project %s/', self.project.relpath)
out.nl()
try:
self.action()
out.nl()
return True
except GitError:
out.nl()
return False
class _SyncColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'reposync')
self.project = self.printer('header', attr = 'bold')
self.info = self.printer('info')
self.fail = self.printer('fail', fg='red')
class SyncBuffer(object):
def __init__(self, config, detach_head=False):
self._messages = []
self._failures = []
self._later_queue1 = []
self._later_queue2 = []
self.out = _SyncColoring(config)
self.out.redirect(sys.stderr)
self.detach_head = detach_head
self.clean = True
def info(self, project, fmt, *args):
self._messages.append(_InfoMessage(project, fmt % args))
def fail(self, project, err=None):
self._failures.append(_Failure(project, err))
self.clean = False
def later1(self, project, what):
self._later_queue1.append(_Later(project, what))
def later2(self, project, what):
self._later_queue2.append(_Later(project, what))
def Finish(self):
self._PrintMessages()
self._RunLater()
self._PrintMessages()
return self.clean
def _RunLater(self):
for q in ['_later_queue1', '_later_queue2']:
if not self._RunQueue(q):
return
def _RunQueue(self, queue):
for m in getattr(self, queue):
if not m.Run(self):
self.clean = False
return False
setattr(self, queue, [])
return True
def _PrintMessages(self):
for m in self._messages:
m.Print(self)
for m in self._failures:
m.Print(self)
self._messages = []
self._failures = []
class MetaProject(Project):
"""A special project housed under .repo.
"""
def __init__(self, manifest, name, gitdir, worktree):
Project.__init__(self,
manifest = manifest,
name = name,
gitdir = gitdir,
objdir = gitdir,
worktree = worktree,
remote = RemoteSpec('origin'),
relpath = '.repo/%s' % name,
revisionExpr = 'refs/heads/master',
revisionId = None,
groups = None)
def PreSync(self):
if self.Exists:
cb = self.CurrentBranch
if cb:
base = self.GetBranch(cb).merge
if base:
self.revisionExpr = base
self.revisionId = None
def MetaBranchSwitch(self, target):
""" Prepare MetaProject for manifest branch switch
"""
# detach and delete manifest branch, allowing a new
# branch to take over
syncbuf = SyncBuffer(self.config, detach_head = True)
self.Sync_LocalHalf(syncbuf)
syncbuf.Finish()
return GitCommand(self,
['update-ref', '-d', 'refs/heads/default'],
capture_stdout = True,
capture_stderr = True).Wait() == 0
@property
def LastFetch(self):
try:
fh = os.path.join(self.gitdir, 'FETCH_HEAD')
return os.path.getmtime(fh)
except OSError:
return 0
@property
def HasChanges(self):
"""Has the remote received new commits not yet checked out?
"""
if not self.remote or not self.revisionExpr:
return False
all_refs = self.bare_ref.all
revid = self.GetRevisionId(all_refs)
head = self.work_git.GetHead()
if head.startswith(R_HEADS):
try:
head = all_refs[head]
except KeyError:
head = None
if revid == head:
return False
elif self._revlist(not_rev(HEAD), revid):
return True
return False
| 1379266135/git-repo | project.py | Python | apache-2.0 | 83,899 |
from core import ungzip
from core import ungzip_html
| mutarock/python-utils | compress/__init__.py | Python | mit | 53 |
"""
WSGI config for pelawak project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pelawak.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| mclumd/pelawak | pelawak/wsgi.py | Python | mit | 482 |
# coding=utf8
import json
from datetime import date, timedelta, datetime, time
from attivita.stats import statistiche_attivita_persona
from django.db.models import Count, F, Sum
from django.utils import timezone
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.shortcuts import redirect, get_object_or_404
from anagrafica.costanti import NAZIONALE
from anagrafica.models import Sede
from anagrafica.permessi.applicazioni import RESPONSABILE_AREA, DELEGATO_AREA, REFERENTE, REFERENTE_GRUPPO
from anagrafica.permessi.costanti import MODIFICA, GESTIONE_ATTIVITA, ERRORE_PERMESSI, GESTIONE_GRUPPO, \
GESTIONE_AREE_SEDE, COMPLETO, GESTIONE_ATTIVITA_AREA, GESTIONE_REFERENTI_ATTIVITA, GESTIONE_ATTIVITA_SEDE, \
GESTIONE_POTERI_CENTRALE_OPERATIVA_SEDE
from attivita.elenchi import ElencoPartecipantiTurno, ElencoPartecipantiAttivita
from attivita.forms import ModuloStoricoTurni, ModuloAttivitaInformazioni, ModuloModificaTurno, \
ModuloAggiungiPartecipanti, ModuloCreazioneTurno, ModuloCreazioneArea, ModuloOrganizzaAttivita, \
ModuloOrganizzaAttivitaReferente, ModuloStatisticheAttivita, ModuloRipetiTurno, ModuloStatisticheAttivitaPersona
from attivita.models import Partecipazione, Attivita, Turno, Area
from attivita.utils import turni_raggruppa_giorno
from autenticazione.funzioni import pagina_privata, pagina_pubblica
from base.errori import ci_siamo_quasi, errore_generico, messaggio_generico, errore_no_volontario
from base.files import Excel, FoglioExcel
from base.utils import poco_fa, timedelta_ore
from gruppi.models import Gruppo
def attivita(request):
return redirect('/attivita/calendario/')
@pagina_privata
def attivita_aree(request, me):
sedi = me.oggetti_permesso(GESTIONE_AREE_SEDE)
contesto = {
"sedi": sedi,
}
return 'attivita_aree.html', contesto
@pagina_privata
def attivita_aree_sede(request, me, sede_pk=None):
sede = get_object_or_404(Sede, pk=sede_pk)
if not sede in me.oggetti_permesso(GESTIONE_AREE_SEDE):
return redirect(ERRORE_PERMESSI)
aree = sede.aree.all()
modulo = ModuloCreazioneArea(request.POST or None)
if modulo.is_valid():
area = modulo.save(commit=False)
area.sede = sede
area.save()
return redirect("/attivita/aree/%d/%d/responsabili/" % (
sede.pk, area.pk,
))
contesto = {
"sede": sede,
"aree": aree,
"modulo": modulo,
}
return 'attivita_aree_sede.html', contesto
@pagina_privata
def attivita_aree_sede_area_responsabili(request, me, sede_pk=None, area_pk=None):
area = get_object_or_404(Area, pk=area_pk)
if not me.permessi_almeno(area, COMPLETO):
return redirect(ERRORE_PERMESSI)
sede = area.sede
delega = DELEGATO_AREA
contesto = {
"area": area,
"delega": delega,
"continua_url": "/attivita/aree/%d/" % (sede.pk,)
}
return 'attivita_aree_sede_area_responsabili.html', contesto
@pagina_privata
def attivita_aree_sede_area_cancella(request, me, sede_pk=None, area_pk=None):
area = get_object_or_404(Area, pk=area_pk)
if not me.permessi_almeno(area, COMPLETO):
return redirect(ERRORE_PERMESSI)
sede = area.sede
if area.attivita.exists():
return errore_generico(request, me, titolo="L'area ha delle attività associate",
messaggio="Non è possibile cancellare delle aree che hanno delle "
"attività associate.",
torna_titolo="Torna indietro",
torna_url="/attivita/aree/%d/" % (sede.pk,))
area.delete()
return redirect("/attivita/aree/%d/" % (sede.pk,))
@pagina_privata
def attivita_gestisci(request, me, stato="aperte"):
# stato = "aperte" | "chiuse"
attivita_tutte = me.oggetti_permesso(GESTIONE_ATTIVITA, solo_deleghe_attive=False)
attivita_aperte = attivita_tutte.filter(apertura=Attivita.APERTA)
attivita_chiuse = attivita_tutte.filter(apertura=Attivita.CHIUSA)
if stato == "aperte":
attivita = attivita_aperte
else: # stato == "chiuse"
attivita = attivita_chiuse
attivita_referenti_modificabili = me.oggetti_permesso(GESTIONE_REFERENTI_ATTIVITA)
attivita = attivita.annotate(num_turni=Count('turni'))
attivita = Paginator(attivita, 30)
pagina = request.GET.get('pagina')
try:
attivita = attivita.page(pagina)
except PageNotAnInteger:
attivita = attivita.page(1)
except EmptyPage:
attivita = attivita.page(attivita.num_pages)
contesto = {
"stato": stato,
"attivita": attivita,
"attivita_aperte": attivita_aperte,
"attivita_chiuse": attivita_chiuse,
"attivita_referenti_modificabili": attivita_referenti_modificabili,
}
return 'attivita_gestisci.html', contesto
@pagina_privata
def attivita_organizza(request, me):
aree = me.oggetti_permesso(GESTIONE_ATTIVITA_AREA)
sedi = Sede.objects.filter(aree__in=aree)
deleghe = me.deleghe_attuali()
from anagrafica.permessi.applicazioni import DELEGATI_NON_SONO_UN_BERSAGLIO
deleghe_bersaglio = deleghe.filter(tipo__in=DELEGATI_NON_SONO_UN_BERSAGLIO)
if deleghe_bersaglio:
if not aree.filter(obiettivo=4, nome__icontains='non sono un bersaglio'):
for delega in deleghe.filter(tipo__in=DELEGATI_NON_SONO_UN_BERSAGLIO):
area = Area(nome='Non sono un bersaglio', obiettivo=4, sede=delega.oggetto)
area.save()
if not aree:
return messaggio_generico(request, me, titolo="Crea un'area di intervento, prima!",
messaggio="Le aree di intervento fungono da 'contenitori' per le "
"attività. Per organizzare un'attività, è necessario creare "
"almeno un'area di intervento. ",
torna_titolo="Gestisci le Aree di intervento",
torna_url="/attivita/aree/")
modulo_referente = ModuloOrganizzaAttivitaReferente(request.POST or None)
modulo = ModuloOrganizzaAttivita(request.POST or None)
modulo.fields['area'].queryset = me.oggetti_permesso(GESTIONE_ATTIVITA_AREA)
if deleghe_bersaglio:
modulo_referente.fields['scelta'].choices = ModuloOrganizzaAttivitaReferente.popola_scelta()
if modulo_referente.is_valid() and modulo.is_valid():
attivita = modulo.save(commit=False)
attivita.sede = attivita.area.sede
attivita.estensione = attivita.sede.comitato
attivita.save()
# Crea gruppo per questa specifica attività se la casella viene selezionata.
crea_gruppo = modulo.cleaned_data['gruppo']
if crea_gruppo:
area = attivita.area
gruppo = Gruppo.objects.create(nome=attivita.nome, sede=attivita.sede, obiettivo=area.obiettivo,
attivita=attivita, estensione=attivita.estensione.estensione,
area=area)
gruppo.aggiungi_delegato(REFERENTE_GRUPPO, me)
if modulo_referente.cleaned_data['scelta'] == modulo_referente.SONO_IO:
# Io sono il referente.
attivita.aggiungi_delegato(REFERENTE, me, firmatario=me, inizio=poco_fa())
return redirect(attivita.url_modifica)
elif modulo_referente.cleaned_data['scelta'] == modulo_referente.SCEGLI_REFERENTI: # Il referente e' qualcun altro.
return redirect("/attivita/organizza/%d/referenti/" % (attivita.pk,))
else:
from anagrafica.models import Persona
persona = Persona.objects.get(pk=modulo_referente.cleaned_data['scelta'])
attivita.aggiungi_delegato(REFERENTE, persona, firmatario=me, inizio=poco_fa())
return redirect(attivita.url_modifica)
contesto = {
"modulo": modulo,
"modulo_referente": modulo_referente,
}
return 'attivita_organizza.html', contesto
@pagina_privata
def attivita_organizza_fatto(request, me, pk=None):
attivita = get_object_or_404(Attivita, pk=pk)
if not me.permessi_almeno(attivita, MODIFICA):
return redirect(ERRORE_PERMESSI)
return messaggio_generico(request, me, titolo="Attività organizzata",
messaggio="Abbiamo inviato un messaggio ai referenti che hai "
"selezionato. Appena accederanno a Gaia, gli chiederemo "
"di darci maggiori informazioni sull'attività, come "
"gli orari dei turni e l'indirizzo.",
torna_titolo="Torna a Gestione Attività",
torna_url="/attivita/gestisci/")
@pagina_privata
def attivita_referenti(request, me, pk=None, nuova=False):
attivita = get_object_or_404(Attivita, pk=pk)
if not me.permessi_almeno(attivita, MODIFICA):
return redirect(ERRORE_PERMESSI)
delega = REFERENTE
if nuova:
continua_url = "/attivita/organizza/%d/fatto/" % (attivita.pk,)
else:
continua_url = "/attivita/gestisci/"
contesto = {
"delega": delega,
"attivita": attivita,
"continua_url": continua_url
}
return 'attivita_referenti.html', contesto
@pagina_privata
def attivita_calendario(request, me=None, inizio=None, fine=None, vista="calendario"):
"""
Mostra il calendario delle attivita' personalizzato.
"""
if not me.volontario:
return errore_no_volontario(request, me)
# Range default e massimo
DEFAULT_GIORNI = 6
MASSIMO_GIORNI = 31
# Formato date URL
FORMATO = "%d-%m-%Y"
if inizio is None:
inizio = date.today().strftime(FORMATO)
inizio = datetime.strptime(inizio, FORMATO).date()
if fine is None:
fine = inizio + timedelta(DEFAULT_GIORNI)
else:
fine = datetime.strptime(fine, FORMATO).date()
# Assicura che il range sia valido (non troppo breve, non troppo lungo)
differenza = (fine - inizio)
if differenza.days < 0 or differenza.days > MASSIMO_GIORNI:
return attivita_calendario(request, me, inizio=inizio, fine=None)
# Successivo
successivo_inizio = inizio + differenza
successivo_inizio_stringa = successivo_inizio.strftime(FORMATO)
successivo_fine = fine + differenza
successivo_fine_stringa = successivo_fine.strftime(FORMATO)
successivo_url = "/attivita/calendario/%s/%s/" % (successivo_inizio_stringa, successivo_fine_stringa, )
# Oggi
oggi_url = "/attivita/calendario/"
# Precedente
precedente_inizio = inizio - differenza
precedente_inizio_stringa = precedente_inizio.strftime(FORMATO)
precedente_fine = fine - differenza
precedente_fine_stringa = precedente_fine.strftime(FORMATO)
precedente_url = "/attivita/calendario/%s/%s/" % (precedente_inizio_stringa, precedente_fine_stringa, )
# Elenco
turni = me.calendario_turni(inizio, fine)
raggruppati = turni_raggruppa_giorno(turni)
contesto = {
"inizio": inizio,
"fine": fine,
"successivo_inizio": successivo_inizio,
"successivo_fine": successivo_fine,
"successivo_url": successivo_url,
"oggi_url": oggi_url,
"precedente_inizio": precedente_inizio,
"precedente_fine": precedente_fine,
"precedente_url": precedente_url,
"turni": turni,
"raggruppati": raggruppati,
}
return 'attivita_calendario.html', contesto
@pagina_privata
def attivita_storico(request, me):
"""
Mostra uno storico delle attivita' a cui ho chiesto di partecipare/partecipato.
"""
storico = Partecipazione.objects.filter(persona=me).order_by('-turno__inizio')
modulo = ModuloStatisticheAttivitaPersona(request.POST or None)
statistiche = statistiche_attivita_persona(me, modulo)
contesto = {
"storico": storico,
"statistiche": statistiche,
"statistiche_modulo": modulo,
}
return 'attivita_storico.html', contesto\
@pagina_privata
def attivita_storico_excel(request, me):
"""
Scarica il foglio di servizio
"""
excel = me.genera_foglio_di_servizio()
return redirect(excel.download_url)
@pagina_pubblica
def attivita_scheda_informazioni(request, me=None, pk=None):
"""
Mostra la scheda "Informazioni" di una attivita'.
"""
attivita = get_object_or_404(Attivita, pk=pk)
puo_modificare = me and me.permessi_almeno(attivita, MODIFICA)
contesto = {
"attivita": attivita,
"puo_modificare": puo_modificare,
"me": me,
}
return 'attivita_scheda_informazioni.html', contesto
@pagina_privata
def attivita_scheda_cancella(request, me, pk):
attivita = get_object_or_404(Attivita, pk=pk)
if not me.permessi_almeno(attivita, COMPLETO):
return redirect(ERRORE_PERMESSI)
if not attivita.cancellabile:
return errore_generico(request, me, titolo="Attività non cancellabile",
messaggio="Questa attività non può essere cancellata.")
titolo_messaggio = "Attività cancellata"
testo_messaggio = "L'attività è stata cancellata con successo."
if 'cancella-gruppo' in request.path.split('/'):
try:
gruppo = Gruppo.objects.get(attivita=attivita)
gruppo.delete()
titolo_messaggio = "Attività e gruppo cancellati"
testo_messaggio = "L'attività e il gruppo associato sono stati cancellati con successo."
except Gruppo.DoesNotExist:
testo_messaggio = "L'attività è stata cancellata con successo (non esisteva un gruppo associato a quest'attività)."
attivita.delete()
return messaggio_generico(request, me, titolo=titolo_messaggio,
messaggio=testo_messaggio,
torna_titolo="Gestione attività", torna_url="/attivita/gestisci/")
@pagina_pubblica
def attivita_scheda_mappa(request, me=None, pk=None):
"""
Mostra la scheda "Informazioni" di una attivita'.
"""
attivita = get_object_or_404(Attivita, pk=pk)
puo_modificare = me and me.permessi_almeno(attivita, MODIFICA)
contesto = {
"attivita": attivita,
"puo_modificare": puo_modificare,
}
return 'attivita_scheda_mappa.html', contesto
@pagina_privata
def attivita_scheda_turni(request, me=None, pk=None, pagina=None):
"""
Mostra la scheda "Informazioni" di una attivita'.
"""
if False:
return ci_siamo_quasi(request, me)
attivita = get_object_or_404(Attivita, pk=pk)
if pagina is None:
pagina = "/attivita/scheda/%d/turni/%d/" % (attivita.pk, attivita.pagina_turni_oggi())
return redirect(pagina)
turni = attivita.turni.all()
puo_modificare = me and me.permessi_almeno(attivita, MODIFICA)
evidenzia_turno = Turno.objects.get(pk=request.GET['evidenzia_turno']) if 'evidenzia_turno' in request.GET else None
pagina = int(pagina)
if pagina < 0:
pagina = 1
p = Paginator(turni, Turno.PER_PAGINA)
pg = p.page(pagina)
contesto = {
'pagina': pagina,
'pagine': p.num_pages,
'totale': p.count,
'turni': pg.object_list,
'ha_precedente': pg.has_previous(),
'ha_successivo': pg.has_next(),
'pagina_precedente': pagina-1,
'pagina_successiva': pagina+1,
"attivita": attivita,
"puo_modificare": puo_modificare,
"evidenzia_turno": evidenzia_turno,
}
return 'attivita_scheda_turni.html', contesto
@pagina_privata
def attivita_scheda_turni_nuovo(request, me=None, pk=None):
"""
Pagina di creazione di un nuovo turno
"""
attivita = get_object_or_404(Attivita, pk=pk)
if not me.permessi_almeno(attivita, MODIFICA):
redirect(ERRORE_PERMESSI)
tra_una_settimana = timezone.now() + timedelta(days=7)
tra_una_settimana_e_una_ora = tra_una_settimana + timedelta(hours=1)
modulo = ModuloCreazioneTurno(request.POST or None, initial={
"inizio": tra_una_settimana, "fine": tra_una_settimana_e_una_ora,
})
modulo_ripeti = ModuloRipetiTurno(request.POST or None, prefix="ripeti")
if modulo.is_valid():
turno = modulo.save(commit=False)
turno.attivita = attivita
turno.save()
if request.POST.get('ripeti', default="no") == 'si' \
and modulo_ripeti.is_valid():
numero_ripetizioni = modulo_ripeti.cleaned_data['numero_ripetizioni']
giorni = modulo_ripeti.cleaned_data['giorni']
giorni_ripetuti = 0
giorni_nel_futuro = 1
while giorni_ripetuti < numero_ripetizioni:
ripetizione = Turno(
attivita=attivita,
inizio=turno.inizio + timedelta(days=giorni_nel_futuro),
fine=turno.fine + timedelta(days=giorni_nel_futuro),
prenotazione=turno.prenotazione + timedelta(days=giorni_nel_futuro),
minimo=turno.minimo,
massimo=turno.massimo,
nome=turno.nome,
)
if str(ripetizione.inizio.weekday()) in giorni:
giorni_ripetuti += 1
ripetizione.save()
giorni_nel_futuro += 1
pass
return redirect(turno.url)
contesto = {
"modulo": modulo,
"modulo_ripeti": modulo_ripeti,
"attivita": attivita,
"puo_modificare": True
}
return 'attivita_scheda_turni_nuovo.html', contesto
@pagina_privata
def attivita_scheda_turni_turno_cancella(request, me, pk=None, turno_pk=None):
turno = Turno.objects.get(pk=turno_pk)
attivita = turno.attivita
if not me.permessi_almeno(attivita, MODIFICA):
redirect(ERRORE_PERMESSI)
precedente = attivita.turni.all().filter(inizio__lt=turno.inizio).order_by('inizio').last()
if precedente:
url_torna = precedente.url_modifica
else:
url_torna = attivita.url_turni_modifica
turno.delete()
return redirect(url_torna)
@pagina_privata
def attivita_scheda_turni_partecipa(request, me, pk=None, turno_pk=None):
"""
Mostra la scheda "Informazioni" di una attivita'.
"""
turno = get_object_or_404(Turno, pk=turno_pk)
stato = turno.persona(me)
if stato not in turno.TURNO_PUOI_PARTECIPARE:
return errore_generico(request, me, titolo="Non puoi partecipare a questo turno",
messaggio="Siamo spiacenti, ma ci risulta che tu non possa "
"richiedere partecipazione a questo turno. Vai "
"all'elenco dei turni per maggiori informazioni "
"sulla motivazione. ",
torna_titolo="Turni dell'attività",
torna_url=turno.url,
)
p = Partecipazione(
turno=turno,
persona=me,
)
p.save()
p.richiedi()
return messaggio_generico(request, me, titolo="Ottimo! Richiesta inoltrata.",
messaggio="La tua richiesta è stata inoltrata ai referenti di "
"questa attività, che potranno confermarla o negarla. "
"Ti manderemo una e-mail non appena risponderanno alla "
"tua richiesta. Puoi sempre controllare lo stato delle tue"
"richieste di partecipazione da 'Attivita' > 'I miei turni'. ",
torna_titolo="Vai a 'I miei turni'",
torna_url="/attivita/storico/")
@pagina_privata
def attivita_scheda_turni_ritirati(request, me, pk=None, turno_pk=None):
turno = get_object_or_404(Turno, pk=turno_pk)
stato = turno.persona(me)
if stato != turno.TURNO_PRENOTATO_PUOI_RITIRARTI:
return errore_generico(request, me, titolo="Non puoi ritirare la tua partecipazione",
messaggio="Una volta che la tua partecipazione è stata confermata, "
"non puoi più ritirarla da Gaia. Se non puoi presentarti, "
"scrivi a un referente dell'attività, che potrà valutare "
"la situazione e rimuoverti dai partecipanti.",
torna_titolo="Torna al turno",
torna_url=turno.url)
partecipazione = Partecipazione.con_esito_pending(turno=turno, persona=me).first()
if not partecipazione:
raise ValueError("TURNO_PRENOTATO_PUOI_RITIRARTI assegnato, ma nessuna partecipazione"
"trovata. ")
partecipazione.autorizzazioni_ritira()
return messaggio_generico(request, me, titolo="Richiesta ritirata.",
messaggio="La tua richiesta di partecipazione a questo turno "
"è stata ritirata con successo.",
torna_titolo="Torna al turno",
torna_url=turno.url)
@pagina_privata
def attivita_scheda_turni_partecipanti(request, me, pk=None, turno_pk=None):
turno = get_object_or_404(Turno, pk=turno_pk)
if not me.permessi_almeno(turno.attivita, MODIFICA):
return redirect(ERRORE_PERMESSI)
elenco = ElencoPartecipantiTurno(turno.queryset_modello())
contesto = {
"attivita": turno.attivita,
"turno": turno,
"elenco": elenco,
"puo_modificare": True
}
return "attivita_scheda_turni_elenco.html", contesto
@pagina_privata
def attivita_scheda_partecipanti(request, me, pk=None):
attivita = get_object_or_404(Attivita, pk=pk)
if not me.permessi_almeno(attivita, MODIFICA):
return redirect(ERRORE_PERMESSI)
elenco = ElencoPartecipantiAttivita(attivita.queryset_modello())
contesto = {
"attivita": attivita,
"elenco": elenco,
"puo_modificare": True
}
return "attivita_scheda_partecipanti.html", contesto
@pagina_privata
def attivita_scheda_turni_rimuovi(request, me, pk=None, turno_pk=None, partecipante_pk=None):
turno = get_object_or_404(Turno, pk=turno_pk)
stato = turno.persona(me)
if stato != turno.TURNO_PRENOTATO_PUOI_RITIRARTI:
return errore_generico(request, me, titolo="Non puoi ritirare la tua partecipazione",
messaggio="Una volta che la tua partecipazione è stata confermata, "
"non puoi più ritirarla da Gaia. Se non puoi presentarti, "
"scrivi a un referente dell'attività, che potrà valutare "
"la situazione e rimuoverti dai partecipanti.",
torna_titolo="Torna al turno",
torna_url=turno.url)
partecipazione = Partecipazione.con_esito_pending(turno=turno, persona=me).first()
if not partecipazione:
raise ValueError("TURNO_PRENOTATO_PUOI_RITIRARTI assegnato, ma nessuna partecipazione"
"trovata. ")
partecipazione.autorizzazioni_ritira()
return messaggio_generico(request, me, titolo="Richiesta ritirata.",
messaggio="La tua richiesta di partecipazione a questo turno "
"è stata ritirata con successo.",
torna_titolo="Torna al turno",
torna_url=turno.url)
@pagina_privata
def attivita_scheda_turni_link_permanente(request, me, pk=None, turno_pk=None):
turno = get_object_or_404(Turno, pk=turno_pk)
attivita = turno.attivita
pagina = turno.elenco_pagina()
return redirect("/attivita/scheda/%d/turni/%d/?evidenzia_turno=%d#turno-%d" % (
attivita.pk, pagina, turno.pk, turno.pk,
))
@pagina_privata
def attivita_scheda_turni_modifica_link_permanente(request, me, pk=None, turno_pk=None):
turno = get_object_or_404(Turno, pk=turno_pk)
attivita = turno.attivita
pagina = turno.elenco_pagina()
return redirect("/attivita/scheda/%d/turni/modifica/%d/?evidenzia_turno=%d#turno-%d" % (
attivita.pk, pagina, turno.pk, turno.pk
))
@pagina_privata(permessi=(GESTIONE_ATTIVITA,))
def attivita_scheda_informazioni_modifica(request, me, pk=None):
"""
Mostra la pagina di modifica di una attivita'.
"""
attivita = get_object_or_404(Attivita, pk=pk)
apertura_precedente = attivita.apertura
if not me.permessi_almeno(attivita, MODIFICA):
if me.permessi_almeno(attivita, MODIFICA, solo_deleghe_attive=False):
# Se la mia delega e' sospesa per l'attivita', vai in prima pagina
# per riattivarla.
return redirect(attivita.url)
return redirect(ERRORE_PERMESSI)
if request.POST and not me.ha_permesso(GESTIONE_POTERI_CENTRALE_OPERATIVA_SEDE):
request.POST = request.POST.copy()
request.POST['centrale_operativa'] = attivita.centrale_operativa
modulo = ModuloAttivitaInformazioni(request.POST or None, instance=attivita)
modulo.fields['estensione'].queryset = attivita.sede.get_ancestors(include_self=True).exclude(estensione=NAZIONALE)
if not me.ha_permesso(GESTIONE_POTERI_CENTRALE_OPERATIVA_SEDE):
modulo.fields['centrale_operativa'].widget.attrs['disabled'] = True
if modulo.is_valid():
modulo.save()
# Se e' stato cambiato lo stato dell'attivita'
attivita.refresh_from_db()
if attivita.apertura != apertura_precedente:
if attivita.apertura == attivita.APERTA:
attivita.riapri()
else:
attivita.chiudi(autore=me)
contesto = {
"attivita": attivita,
"puo_modificare": True,
"modulo": modulo,
}
return 'attivita_scheda_informazioni_modifica.html', contesto
@pagina_privata(permessi=(GESTIONE_ATTIVITA,))
def attivita_riapri(request, me, pk=None):
"""
Riapre l'attivita'.
"""
attivita = get_object_or_404(Attivita, pk=pk)
if not me.permessi_almeno(attivita, MODIFICA, solo_deleghe_attive=False):
return redirect(ERRORE_PERMESSI)
attivita.riapri(invia_notifiche=True)
return redirect(attivita.url)
@pagina_privata(permessi=(GESTIONE_ATTIVITA,))
def attivita_scheda_turni_modifica(request, me, pk=None, pagina=None):
"""
Mostra la pagina di modifica di una attivita'.
"""
attivita = get_object_or_404(Attivita, pk=pk)
if not me.permessi_almeno(attivita, MODIFICA):
if me.permessi_almeno(attivita, MODIFICA, solo_deleghe_attive=False):
# Se la mia delega e' sospesa per l'attivita', vai in prima pagina
# per riattivarla.
return redirect(attivita.url)
return redirect(ERRORE_PERMESSI)
if pagina is None:
pagina = "/attivita/scheda/%d/turni/modifica/%d/" % (attivita.pk, attivita.pagina_turni_oggi())
return redirect(pagina)
turni = attivita.turni.all()
pagina = int(pagina)
if pagina < 0:
pagina = 1
p = Paginator(turni, Turno.PER_PAGINA)
pg = p.page(pagina)
moduli = []
moduli_aggiungi_partecipanti = []
turni = pg.object_list
for turno in turni:
modulo = ModuloModificaTurno(request.POST or None,
instance=turno,
prefix="turno_%d" % (turno.pk,))
moduli += [modulo]
modulo_aggiungi_partecipanti = ModuloAggiungiPartecipanti(request.POST or None,
prefix="turno_agg_%d" % (turno.pk,))
moduli_aggiungi_partecipanti += [modulo_aggiungi_partecipanti]
if modulo.is_valid():
modulo.save()
if modulo_aggiungi_partecipanti.is_valid():
# Aggiungi partecipante.
for partecipante in modulo_aggiungi_partecipanti.cleaned_data['persone']:
turno.aggiungi_partecipante(partecipante, richiedente=me)
redirect(turno.url_modifica)
# Salva e aggiorna le presenze.
for chiave, valore in request.POST.items():
if "presenza-" in chiave:
p_pk = int(chiave.replace("presenza-", ""))
p_si = '1' in valore
pa = Partecipazione.objects.get(pk=p_pk)
pa.stato = Partecipazione.RICHIESTA if p_si else Partecipazione.NON_PRESENTATO
pa.save()
turni_e_moduli = zip(turni, moduli, moduli_aggiungi_partecipanti)
evidenzia_turno = Turno.objects.get(pk=request.GET['evidenzia_turno']) if 'evidenzia_turno' in request.GET else None
contesto = {
'pagina': pagina,
'pagine': p.num_pages,
'totale': p.count,
'turni': turni_e_moduli,
'ha_precedente': pg.has_previous(),
'ha_successivo': pg.has_next(),
'pagina_precedente': pagina-1,
'pagina_successiva': pagina+1,
"attivita": attivita,
"puo_modificare": True,
"url_modifica": '/modifica',
"evidenzia_turno": evidenzia_turno,
}
return 'attivita_scheda_turni_modifica.html', contesto
@pagina_privata
def attivita_scheda_partecipazione_cancella(request, me, pk, partecipazione_pk):
partecipazione = get_object_or_404(Partecipazione, pk=partecipazione_pk)
if not me.permessi_almeno(partecipazione.turno.attivita, MODIFICA):
return redirect(ERRORE_PERMESSI)
turno = partecipazione.turno
partecipazione.delete()
return redirect(turno.url_modifica)
@pagina_privata(permessi=(GESTIONE_ATTIVITA,))
def attivita_scheda_report(request, me, pk=None):
"""Mostra la pagina di modifica di una attivita'."""
if False:
return ci_siamo_quasi(request, me)
attivita = get_object_or_404(Attivita, pk=pk)
if not me.permessi_almeno(attivita, MODIFICA):
return redirect(ERRORE_PERMESSI)
if request.POST:
# pdf = redirect(pdf.download_url)
return attivita.genera_report(format=Attivita.REPORT_FORMAT_EXCEL)
context = {
"attivita": attivita,
"puo_modificare": True,
}
return 'attivita_scheda_report.html', context
@pagina_privata
def attivita_statistiche(request, me):
sedi = me.oggetti_permesso(GESTIONE_ATTIVITA_SEDE)
modulo = ModuloStatisticheAttivita(request.POST or None, initial={"sedi": sedi})
modulo.fields['sedi'].queryset = sedi
statistiche = []
chart = {}
periodi = 12
if modulo.is_valid():
oggi = date.today()
giorni = int(modulo.cleaned_data['periodo'])
if giorni == modulo.SETTIMANA:
etichetta = "sett."
elif giorni == modulo.QUINDICI_GIORNI:
etichetta = "fortn."
elif giorni == modulo.MESE:
etichetta = "mesi"
else:
raise ValueError("Etichetta mancante.")
for periodo in range(periodi, 0, -1):
dati = {}
fine = oggi - timedelta(days=(giorni*periodo))
inizio = fine - timedelta(days=giorni-1)
fine = datetime.combine(fine, time(23, 59, 59))
inizio = datetime.combine(inizio, time(0, 0, 0))
dati['inizio'] = inizio
dati['fine'] = fine
# Prima, ottiene tutti i queryset.
qs_attivita = Attivita.objects.filter(stato=Attivita.VISIBILE, sede__in=sedi)
qs_turni = Turno.objects.filter(attivita__in=qs_attivita, inizio__lte=fine, fine__gte=inizio)
qs_part = Partecipazione.con_esito_ok(turno__in=qs_turni)
ore_di_servizio = qs_turni.annotate(durata=F('fine') - F('inizio')).aggregate(totale_ore=Sum('durata'))['totale_ore'] or timedelta()
ore_uomo_di_servizio = qs_part.annotate(durata=F('turno__fine') - F('turno__inizio')).aggregate(totale_ore=Sum('durata'))['totale_ore'] or timedelta()
# Poi, associa al dizionario statistiche.
dati['etichetta'] = "%d %s fa" % (periodo, etichetta,)
dati['num_turni'] = qs_turni.count()
dati['ore_di_servizio'] = ore_di_servizio
dati['ore_uomo_di_servizio'] = ore_uomo_di_servizio
try:
dati['rapporto'] = round(ore_uomo_di_servizio / ore_di_servizio, 3)
except ZeroDivisionError:
dati['rapporto'] = 0
statistiche.append(dati)
chart['labels'] = json.dumps([x['etichetta'] for x in statistiche])
chart['num_turni'] = json.dumps([x['num_turni'] for x in statistiche])
chart['ore_di_servizio'] = json.dumps([timedelta_ore(x['ore_di_servizio']) for x in statistiche])
chart['ore_uomo_di_servizio'] = json.dumps([timedelta_ore(x['ore_uomo_di_servizio']) for x in statistiche])
chart['rapporto'] = json.dumps([x['rapporto'] for x in statistiche])
contesto = {
"modulo": modulo,
"statistiche": statistiche,
"chart": chart,
}
return 'attivita_statistiche.html', contesto
| CroceRossaItaliana/jorvik | attivita/viste.py | Python | gpl-3.0 | 33,384 |
import pytest
import numpy as np
import os
import sys
import tempfile
from shutil import rmtree
from pyCGM_Single.pyCGM_Helpers import getfilenames
from pyCGM_Single.pycgmCalc import calcAngles, calcKinetics
from pyCGM_Single.pycgmIO import dataAsDict, loadData, loadVSK, writeResult
from pyCGM_Single.pycgmStatic import getStatic, rotmat
#Define several helper functions used in loading and comparing output CSV files
def convert_to_pycgm_label(label):
"""Convert angle label name to known pycgm angle label.
Since output from other programs can use slightly
different angle labels, we convert them to the pycgm format
to make it easier to compare csv outputs across different
formats.
Parameters
----------
label : string
String of the label name.
Returns
-------
string
String of the known pycgm label corresponding to `label`.
"""
known_labels = set(['Pelvis','R Hip','L Hip','R Knee','L Knee','R Ankle',
'L Ankle','R Foot','L Foot',
'Head','Thorax','Neck','Spine','R Shoulder','L Shoulder',
'R Elbow','L Elbow','R Wrist','L Wrist'])
label_aliases = {
#Angle names commonly used to pycgm angle names
'RPelvisAngles': 'Pelvis',
'RHipAngles' : 'R Hip',
'LHipAngles' : 'L Hip',
'RKneeAngles' : 'R Knee',
'LKneeAngles' : 'L Knee',
'RAnkleAngles' : 'R Ankle',
'LAnkleAngles' : 'L Ankle',
'RFootProgressAngles' : 'R Foot',
'LFootProgressAngles' : 'L Foot',
'RHeadAngles' : 'Head',
'RThoraxAngles' : 'Thorax',
'RNeckAngles' : 'Neck',
'RSpineAngles' : 'Spine',
'RShoulderAngles' : 'R Shoulder',
'LShoulderAngles' : 'L Shoulder',
'RElbowAngles' : 'R Elbow',
'LElbowAngles' : 'L Elbow',
'RWristAngles' : 'R Wrist',
'LWristAngles' : 'L Wrist'
}
if label in known_labels:
return label
elif label in label_aliases:
return label_aliases[label]
else:
return None
def load_output_csv(csv_file, header_line_number=5, first_output_row=7, first_output_col=1, label_prefix_len=0):
"""
Loads an output csv of angles or markers into a 2d array where each index
represents a row in the csv.
This function tests for equality of the 19 angles that pycgm outputs, but allows
loading of files of different formats. Assumes that each angle has exactly three
values associated with it (x, y, z).
Parameters
----------
csv_file : string
String of the path of the filename to be loaded.
header_line_number : int
Index of the line number in which the angle or marker labels are written.
The default header_line_number of 5 represents the output from pycgmIO.writeResult().
first_output_row : int
Index of the line number in which the first row of output begins.
The default first_output_row of 7 represents the output from pycgmIO.writeResult().
first_output_col : int
Index of the column number in which the first column of output begins.
The default first_output_col of 1 represents the output from pycgmIO.writeResult().
label_prefix_len : int
Length of the prefix on each label, if it exists. 0 by default.
Returns
-------
output : 2darray
2d matrix where each index represents a row of angle data loaded from
the csv.
"""
known_labels = ['Pelvis','R Hip','L Hip','R Knee','L Knee','R Ankle',
'L Ankle','R Foot','L Foot',
'Head','Thorax','Neck','Spine','R Shoulder','L Shoulder',
'R Elbow','L Elbow','R Wrist','L Wrist']
output = []
infile = open(csv_file, 'r')
lines = infile.readlines()
#Create a dict of index to known pycgm label:
index_to_header = {}
headers = lines[header_line_number].strip().split(',')[first_output_col:]
for i in range(len(headers)):
header = headers[i]
if header != "":
#Find which known pycgm header this header corresponds to, trimming prefix length if needed
header = header.strip()[label_prefix_len:]
header = convert_to_pycgm_label(header)
#Record that index i corresponds to this header
index_to_header[i] = header
#Loop over all lines starting from the first line of output
for line in lines[first_output_row:]:
arr = [0 for i in range(19*3)]
#Convert line in the csv to an array of floats
formatted_line = line.strip().split(',')[first_output_col:]
l = []
for num in formatted_line:
try:
l.append(float(num))
except:
l.append(0)
#Loop over the array of floats, knowing which indices
#corresponds to which angles from the index_to_header dictionary
for i in range(len(l)):
if i in index_to_header:
label = index_to_header[i]
if (label != None):
index = known_labels.index(label) * 3
arr[index] = l[i]
arr[index+1] = l[i+1]
arr[index+2] = l[i+2]
output.append(arr)
infile.close()
return np.array(output)
def load_center_of_mass(csv_file, row_start, col_start):
"""Load center of mass values into an array, where each index
has the center of mass coordinates for a frame.
Parameters
----------
csv_file : string
Filename of the csv file to be loaded.
row_start : int
Index of the first row in which center of mass data begins.
col_start : int
Index of the first column in which center of mass data begins.
Returns
-------
center_of_mass : 2darray
Array representation of the center of mass data.
"""
infile = open(csv_file, 'r')
center_of_mass = []
lines = infile.readlines()
for line in lines[row_start:]:
formatted_line = line.strip().split(',')
coordinates = formatted_line[col_start:col_start+3]
coordinates = [float(x) for x in coordinates]
center_of_mass.append(coordinates)
infile.close()
return center_of_mass
def compare_center_of_mass(result, expected, tolerance):
"""Asserts that two arrays of center of mass coordinates
are equal with a certain tolerance.
Assumes that center of mass coordinates are in mm.
Result and expected must be the same length.
Parameters
----------
result : array
Array of result center of mass coordinates.
expected : array
Array of expected center of mass coordinates.
tolerance : int
Sets how large the difference between any two center of mass coordinates
can be.
"""
for i in range(len(expected)):
for j in range(len(expected[i])):
assert abs(result[i][j] - expected[i][j] < tolerance)
def load_files(dynamic_trial, static_trial, vsk_file):
"""
Uses load functions from pycgmIO to load data from c3d and
vsk files.
"""
motion_data = loadData(dynamic_trial)
static_data = loadData(static_trial)
vsk_data = loadVSK(vsk_file, dict=False)
return motion_data, static_data, vsk_data
def get_columns_to_compare(test_folder):
"""
Helper function to test the files in SampleData. Gets
indices of angles that can be compared for equality, depending
on which file is being compared.
There are 57 angle coordinates to be compared, with 3 coordinates
for each of 19 angles.
If the global coordinate system is unknown for a given file,
angles affected by the GCS are ignored.
Ignored angles are Pelvis, R Foot, L Foot, Head, Thorax, with corresponding
indices 0, 1, 2 and 21 - 32.
The files in Test_Files also ignore the Neck X coordinate, at
index 33.
"""
gcs_ignore = [i for i in range(21, 33)]
gcs_ignore.extend([0,1,2])
columns = [i for i in range(57)]
if (test_folder == 'ROM'):
return columns
if (test_folder == '59993_Frame'):
for i in gcs_ignore:
columns.remove(i)
return columns
if (test_folder == 'Test_Files'):
for i in gcs_ignore:
columns.remove(i)
columns.remove(33)
return columns
class TestCSVOutput:
@classmethod
def setup_class(self):
"""
Called once for all tests in TestCSVOutput.
Sets rounding precision, and sets the current working
directory to the pyCGM folder. Sets the current python version
and loads filenames used for testing.
We also use the pycgm functions to generate and load output CSV data
and load them into the class.
"""
self.rounding_precision = 8
cwd = os.getcwd()
if (cwd.split(os.sep)[-1]=="pyCGM_Single"):
parent = os.path.dirname(cwd)
os.chdir(parent)
self.cwd = os.getcwd()
self.pyver = sys.version_info.major
#Create a temporary directory used for writing CSVs to
if (self.pyver == 2):
self.tmp_dir_name = tempfile.mkdtemp()
else:
self.tmp_dir = tempfile.TemporaryDirectory()
self.tmp_dir_name = self.tmp_dir.name
#Create file path names for the files being tested
self.sample_data_directory = os.path.join(self.cwd, "SampleData")
self.directory_59993_Frame = os.path.join(self.sample_data_directory, '59993_Frame')
self.directory_ROM = os.path.join(self.sample_data_directory, 'ROM')
self.directory_test = os.path.join(self.sample_data_directory, 'Test_Files')
#Load outputs to be tested for SampleData/59993_Frame/
self.filename_59993_Frame_dynamic = os.path.join(self.directory_59993_Frame, '59993_Frame_Dynamic.c3d')
self.filename_59993_Frame_static = os.path.join(self.directory_59993_Frame, '59993_Frame_Static.c3d')
self.filename_59993_Frame_vsk = os.path.join(self.directory_59993_Frame, '59993_Frame_SM.vsk')
motion_data,static_data,vsk_data = load_files(self.filename_59993_Frame_dynamic, self.filename_59993_Frame_static, self.filename_59993_Frame_vsk)
cal_SM = getStatic(static_data,vsk_data,flat_foot=False)
kinematics,joint_centers=calcAngles(motion_data,start=0, end=500,vsk=cal_SM,splitAnglesAxis=False,formatData=False,returnjoints=True)
outfile = os.path.join(self.tmp_dir_name, 'output_59993_Frame')
writeResult(kinematics,outfile,angles=True,axis=False)
expected_file = os.path.join(self.directory_59993_Frame,'pycgm_results.csv.csv')
self.result_59993_Frame = load_output_csv(outfile + '.csv')
self.expected_59993_Frame = load_output_csv(expected_file)
#Load outputs to be tested for SampleData/ROM/
self.filename_ROM_dynamic = os.path.join(self.directory_ROM, 'Sample_Dynamic.c3d')
self.filename_ROM_static = os.path.join(self.directory_ROM, 'Sample_Static.c3d')
self.filename_ROM_vsk = os.path.join(self.directory_ROM, 'Sample_SM.vsk')
motion_data,static_data,vsk_data = load_files(self.filename_ROM_dynamic, self.filename_ROM_static, self.filename_ROM_vsk)
cal_SM = getStatic(static_data,vsk_data,flat_foot=False)
kinematics,joint_centers=calcAngles(motion_data,vsk=cal_SM,splitAnglesAxis=False,formatData=False,returnjoints=True)
outfile = os.path.join(self.tmp_dir_name, 'output_ROM')
writeResult(kinematics,outfile,angles=True,axis=False)
expected_file = os.path.join(self.directory_ROM,'pycgm_results.csv.csv')
self.result_ROM = load_output_csv(outfile + '.csv')
self.expected_ROM = load_output_csv(expected_file)
#Load outputs to be tested for SampleData/Test_Files/
self.filename_test_dynamic = os.path.join(self.directory_test, 'Movement_trial.c3d')
self.filename_test_static = os.path.join(self.directory_test, 'Static_trial.c3d')
self.filename_test_vsk = os.path.join(self.directory_test, 'Test.vsk')
motion_data,static_data,vsk_data = load_files(self.filename_test_dynamic, self.filename_test_static, self.filename_test_vsk)
cal_SM = getStatic(static_data,vsk_data,flat_foot=False)
kinematics,joint_centers=calcAngles(motion_data,vsk=cal_SM,splitAnglesAxis=False,formatData=False,returnjoints=True)
outfile = os.path.join(self.tmp_dir_name, 'output_Test_Files')
writeResult(kinematics,outfile,angles=True,axis=False)
expected_file = os.path.join(self.directory_test,'Movement_trial.csv')
self.result_Test_Files = load_output_csv(outfile + '.csv')
self.expected_Test_Files = load_output_csv(expected_file, header_line_number=2, first_output_row=5, first_output_col=2, label_prefix_len=5)
@classmethod
def teardown_class(self):
"""
Called once after all tests in TestCSVOutput are finished running.
If using Python 2, perform cleanup of the previously created
temporary directory in setup_class(). Cleanup is done automatically in
Python 3.
"""
if (self.pyver == 2):
rmtree(self.tmp_dir_name)
@pytest.fixture
def angles_ROM(self, request):
column = request.param
return self.result_ROM[:,column], self.expected_ROM[:,column]
@pytest.mark.parametrize("angles_ROM", get_columns_to_compare("ROM"), indirect=True)
def test_ROM(self, angles_ROM):
"""
Tests pycgm output csv files using input files from SampleData/ROM/.
"""
result_angles, expected_angles = angles_ROM
np.testing.assert_almost_equal(result_angles, expected_angles, self.rounding_precision)
@pytest.fixture
def angles_59993_Frame(self, request):
column = request.param
return self.result_59993_Frame[:,column], self.expected_59993_Frame[:,column]
@pytest.mark.parametrize("angles_59993_Frame", get_columns_to_compare("59993_Frame"), indirect=True)
def test_59993_Frame(self, angles_59993_Frame):
"""
Tests pycgm output csv files using input files from SampleData/ROM/.
"""
result_angles, expected_angles = angles_59993_Frame
np.testing.assert_almost_equal(result_angles, expected_angles, self.rounding_precision)
@pytest.fixture
def angles_Test_Files(self, request):
column = request.param
return self.result_Test_Files[:,column], self.expected_Test_Files[:,column]
@pytest.mark.parametrize("angles_Test_Files", get_columns_to_compare("Test_Files"), indirect=True)
def test_Test_Files(self, angles_Test_Files):
"""
Tests pycgm output csv files using input files from SampleData/ROM/.
"""
result_angles, expected_angles = angles_Test_Files
np.testing.assert_almost_equal(result_angles, expected_angles, 3)
def test_Test_Files_center_of_mass(self):
"""
Test center of mass output values using sample files in SampleData/Test_Files/.
"""
motion_data,static_data,vsk_data = load_files(self.filename_test_dynamic, self.filename_test_static, self.filename_test_vsk)
cal_SM = getStatic(static_data,vsk_data,flat_foot=False)
kinematics,joint_centers=calcAngles(motion_data,vsk=cal_SM,splitAnglesAxis=False,formatData=False,returnjoints=True)
kinetics = calcKinetics(joint_centers, cal_SM['Bodymass'])
expected = load_center_of_mass(os.path.join(self.directory_test,'Movement_trial.csv'), 5, 2)
compare_center_of_mass(kinetics, expected, 30) | cadop/pyCGM | pyCGM_Single/tests/test_csvOutput.py | Python | mit | 15,746 |
# pylint: disable=no-member
from collections import Counter
from CommonServerUserPython import *
from CommonServerPython import *
from sklearn.feature_extraction.text import TfidfVectorizer
import pickle
import uuid
import spacy
import string
from html.parser import HTMLParser
from html import unescape
from re import compile as _Re
import pandas as pd
from langdetect import detect
from langdetect.lang_detect_exception import LangDetectException
ANY_LANGUAGE = 'Any'
OTHER_LANGUAGE = 'Other'
def hash_word(word, hash_seed):
return str(hash_djb2(word, int(hash_seed)))
CODES_TO_LANGUAGES = {'en': 'English',
'de': 'German',
'fr': 'French',
'es': 'Spanish',
'pt': 'Portuguese',
'it': 'Italian',
'nl': 'Dutch',
}
html_patterns = [
re.compile(r"(?is)<(script|style).*?>.*?(</\1>)"),
re.compile(r"(?s)<!--(.*?)-->[\n]?"),
re.compile(r"(?s)<.*?>"),
re.compile(r" "),
re.compile(r" +")
]
LANGUAGE_KEY = 'language'
def create_text_result(original_text, tokenized_text, original_words_to_tokens, hash_seed=None):
text_result = {
'originalText': original_text,
'tokenizedText': tokenized_text,
'originalWordsToTokens': original_words_to_tokens,
}
if hash_seed is not None:
hash_tokenized_text = ' '.join(hash_word(word, hash_seed) for word in tokenized_text.split())
words_to_hashed_tokens = {word: [hash_word(t, hash_seed) for t in tokens_list] for word, tokens_list in
original_words_to_tokens.items()}
text_result['hashedTokenizedText'] = hash_tokenized_text
text_result['wordsToHashedTokens'] = words_to_hashed_tokens
return text_result
def clean_html_from_text(text):
cleaned = text
for pattern in html_patterns:
cleaned = pattern.sub(" ", cleaned)
return unescape(cleaned).strip()
class Tokenizer:
def __init__(self, clean_html=True, remove_new_lines=True, hash_seed=None, remove_non_english=True,
remove_stop_words=True, remove_punct=True, remove_non_alpha=True, replace_emails=True,
replace_numbers=True, lemma=True, replace_urls=True, language=ANY_LANGUAGE,
tokenization_method='tokenizer'):
self.number_pattern = "NUMBER_PATTERN"
self.url_pattern = "URL_PATTERN"
self.email_pattern = "EMAIL_PATTERN"
self.reserved_tokens = set([self.number_pattern, self.url_pattern, self.email_pattern])
self.clean_html = clean_html
self.remove_new_lines = remove_new_lines
self.hash_seed = hash_seed
self.remove_non_english = remove_non_english
self.remove_stop_words = remove_stop_words
self.remove_punct = remove_punct
self.remove_non_alpha = remove_non_alpha
self.replace_emails = replace_emails
self.replace_urls = replace_urls
self.replace_numbers = replace_numbers
self.lemma = lemma
self.language = language
self.tokenization_method = tokenization_method
self.max_text_length = 10 ** 5
self.nlp = None
self.html_parser = HTMLParser()
self._unicode_chr_splitter = _Re('(?s)((?:[\ud800-\udbff][\udc00-\udfff])|.)').split
self.spacy_count = 0
self.spacy_reset_count = 500
def handle_long_text(self):
return '', ''
def map_indices_to_words(self, text):
original_text_indices_to_words = {}
word_start = 0
while word_start < len(text) and text[word_start].isspace():
word_start += 1
for word in text.split():
for char_idx, char in enumerate(word):
original_text_indices_to_words[word_start + char_idx] = word
# find beginning of next word
word_start += len(word)
while word_start < len(text) and text[word_start].isspace():
word_start += 1
return original_text_indices_to_words
def remove_line_breaks(self, text):
return text.replace("\r", " ").replace("\n", " ")
def remove_multiple_whitespaces(self, text):
return re.sub(r"\s+", " ", text).strip()
def handle_tokenizaion_method(self, text):
if self.tokenization_method == 'tokenizer':
tokens_list, original_words_to_tokens = self.tokenize_text_spacy(text)
else:
tokens_list, original_words_to_tokens = self.tokenize_text_other(text)
tokenized_text = ' '.join(tokens_list).strip()
return tokenized_text, original_words_to_tokens
def tokenize_text_other(self, text):
tokens_list = []
tokenization_method = self.tokenization_method
if tokenization_method == 'byWords':
original_words_to_tokens = {}
for t in text.split():
token_without_punct = ''.join([c for c in t if c not in string.punctuation])
if len(token_without_punct) > 0:
tokens_list.append(token_without_punct)
original_words_to_tokens[token_without_punct] = t
elif tokenization_method == 'byLetters':
for t in text:
tokens_list += [chr for chr in self._unicode_chr_splitter(t) if chr and chr != ' ']
original_words_to_tokens = {c: t for c in tokens_list}
else:
return_error('Unsupported tokenization method: when language is "Other" ({})'.format(tokenization_method))
return tokens_list, original_words_to_tokens
def tokenize_text_spacy(self, text):
if self.nlp is None or self.spacy_count % self.spacy_reset_count == 0:
self.init_spacy_model()
doc = self.nlp(text) # type: ignore
self.spacy_count += 1
original_text_indices_to_words = self.map_indices_to_words(text)
tokens_list = []
original_words_to_tokens = {} # type: ignore
for word in doc:
if word.is_space:
continue
elif self.remove_stop_words and word.is_stop:
continue
elif self.remove_punct and word.is_punct:
continue
elif self.replace_emails and '@' in word.text:
tokens_list.append(self.email_pattern)
elif self.replace_urls and word.like_url:
tokens_list.append(self.url_pattern)
elif self.replace_numbers and (word.like_num or word.pos_ == 'NUM'):
tokens_list.append(self.number_pattern)
elif self.remove_non_alpha and not word.is_alpha:
continue
elif self.remove_non_english and word.text not in self.nlp.vocab: # type: ignore
continue
else:
if self.lemma and word.lemma_ != '-PRON-':
token_to_add = word.lemma_
else:
token_to_add = word.lower_
tokens_list.append(token_to_add)
original_word = original_text_indices_to_words[word.idx]
if original_word not in original_words_to_tokens:
original_words_to_tokens[original_word] = []
original_words_to_tokens[original_word].append(token_to_add)
return tokens_list, original_words_to_tokens
def init_spacy_model(self):
self.nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner', 'textcat'])
def word_tokenize(self, text):
if not isinstance(text, list):
text = [text]
result = []
for t in text:
original_text = t
if self.remove_new_lines:
t = self.remove_line_breaks(t)
if self.clean_html:
t = clean_html_from_text(t)
original_text = t
t = self.remove_multiple_whitespaces(t)
if len(t) < self.max_text_length:
tokenized_text, original_words_to_tokens = self.handle_tokenizaion_method(t)
else:
tokenized_text, original_words_to_tokens = self.handle_long_text()
text_result = create_text_result(original_text, tokenized_text, original_words_to_tokens,
hash_seed=self.hash_seed)
result.append(text_result)
if len(result) == 1:
result = result[0] # type: ignore
return result
# define global parsers
DBOT_TEXT_FIELD = 'dbot_text'
DBOT_PROCESSED_TEXT_FIELD = 'dbot_processed_text'
CONTEXT_KEY = 'DBotPreProcessTextData'
HTML_PATTERNS = [
re.compile(r"(?is)<(script|style).*?>.*?(</\1>)"),
re.compile(r"(?s)<!--(.*?)-->[\n]?"),
re.compile(r"(?s)<.*?>"),
re.compile(r" "),
re.compile(r" +")
]
html_parser = HTMLParser()
tokenizer = None
def read_file(input_data, input_type):
data = [] # type: ignore
if not input_data:
return data
if input_type.endswith("string"):
if 'b64' in input_type:
input_data = base64.b64decode(input_data)
file_content = input_data.decode("utf-8")
else:
file_content = input_data
else:
res = demisto.getFilePath(input_data)
if not res:
return_error("Entry {} not found".format(input_data))
file_path = res['path']
if input_type.startswith('json'):
with open(file_path, 'r') as f:
file_content = f.read()
if input_type.startswith('csv'):
return pd.read_csv(file_path).fillna('').to_dict(orient='records')
elif input_type.startswith('json'):
return json.loads(file_content)
elif input_type.startswith('pickle'):
return pd.read_pickle(file_path, compression=None)
else:
return_error("Unsupported file type %s" % input_type)
def concat_text_fields(data, target_field, text_fields):
for d in data:
text = ''
for fields in text_fields:
for field in fields.strip().split("|"):
field = field.strip()
if "." in field:
value = demisto.dt(d, field)
if type(value) is list and len(value) > 0:
value = value[0]
else:
value = d.get(field) or d.get(field.lower(), '')
if value and isinstance(value, str):
text += value
text += ' '
break
text = text.strip()
d[target_field] = text
return data
def remove_line_breaks(text):
return re.sub(r"\s+", " ", text.replace("\r", " ").replace("\n", " ")).strip()
def clean_text_of_single_text(text, remove_html_tags):
if remove_html_tags:
text = clean_html_from_text(text)
return remove_line_breaks(text)
def clean_text_of_incidents_list(data, source_text_field, remove_html_tags):
for inc in data:
inc[source_text_field] = clean_text_of_single_text(inc[source_text_field], remove_html_tags)
return data
def pre_process_batch(data, source_text_field, target_text_field, pre_process_type, hash_seed):
raw_text_data = [x[source_text_field] for x in data]
tokenized_text_data = []
for raw_text in raw_text_data:
tokenized_text = pre_process_single_text(raw_text, hash_seed, pre_process_type)
if hash_seed is None:
tokenized_text_data.append(tokenized_text['tokenizedText'])
else:
tokenized_text_data.append(tokenized_text['hashedTokenizedText'])
for d, tokenized_text in zip(data, tokenized_text_data):
d[target_text_field] = tokenized_text
return data
def pre_process_single_text(raw_text, hash_seed, pre_process_type):
pre_process_func = PRE_PROCESS_TYPES[pre_process_type]
tokenized_text = pre_process_func(raw_text, hash_seed)
return tokenized_text
def pre_process_tokenizer(text, seed):
global tokenizer
if tokenizer is None:
tokenizer = Tokenizer(tokenization_method=demisto.args()['tokenizationMethod'],
language=demisto.args()['language'], hash_seed=seed)
processed_text = tokenizer.word_tokenize(text)
return processed_text
def pre_process_none(text, seed):
cleaned_text = clean_html_from_text(text)
tokenized_text = text
original_words_to_tokens = {x: x for x in cleaned_text.split()}
return create_text_result(original_text=cleaned_text,
tokenized_text=tokenized_text,
original_words_to_tokens=original_words_to_tokens,
hash_seed=seed)
PRE_PROCESS_TYPES = {
'none': pre_process_none,
'nlp': pre_process_tokenizer,
}
def remove_short_text(data, text_field, target_text_field, remove_short_threshold):
description = ""
before_count = len(data)
data = [x for x in data if len(x[text_field].split(" ")) > remove_short_threshold
and len(x[target_text_field]) > remove_short_threshold]
after_count = len(data)
dropped_count = before_count - after_count
if dropped_count > 0:
description += "Dropped %d samples shorter than %d words" % (dropped_count, remove_short_threshold) + "\n"
return data, description
def remove_foreign_language(data, text_field, language):
description = ""
for inc in data:
is_correct_lang, actual_language = is_text_in_input_language(inc[text_field], language)
inc['is_correct_lang'] = is_correct_lang
inc[LANGUAGE_KEY] = actual_language
filtered_data = [inc for inc in data if inc['is_correct_lang']]
dropped_count = len(data) - len(filtered_data)
if dropped_count > 0:
lang_counter = Counter(inc[LANGUAGE_KEY] for inc in data).most_common()
description += "Dropped %d sample(s) that were detected as being in foreign languages. " % dropped_count
description += 'Found language counts: {}'.format(', '.join(['{}:{}'.format(lang, count) for lang, count
in lang_counter]))
description += "\n"
return filtered_data, description
def is_text_in_input_language(text, input_language):
if input_language in [ANY_LANGUAGE, OTHER_LANGUAGE]:
return True, 'UNK'
if '<html' in text:
text = clean_html_from_text(text)
try:
actual_language = detect(text)
except LangDetectException:
return True, 'UNK'
is_correct_lang = actual_language in CODES_TO_LANGUAGES and CODES_TO_LANGUAGES[actual_language] == input_language
return is_correct_lang, actual_language
def get_tf_idf_similarity_arr(documents):
tfidf = TfidfVectorizer(stop_words="english", min_df=1).fit_transform(documents)
pairwise_similarity = tfidf * tfidf.T
return pairwise_similarity.toarray()
def find_duplicate_indices(texts, dedup_threshold):
similarity_arr = get_tf_idf_similarity_arr(texts)
indices_to_remove = []
for i in range(similarity_arr.shape[0]):
for j in range(similarity_arr.shape[1]):
if j > i and similarity_arr[i][j] > dedup_threshold:
indices_to_remove.append(j)
return set(indices_to_remove)
def remove_duplicate_by_indices(data, duplicate_indices):
description = ""
data = [x for i, x in enumerate(data) if i not in duplicate_indices]
dropped_count = len(duplicate_indices)
if dropped_count > 0:
description += "Dropped %d samples duplicate to other samples" % dropped_count + "\n"
return data, description
def whitelist_dict_fields(data, fields):
fields = [x.strip() for x in fields] + [x.strip().lower() for x in fields]
new_data = []
for d in data:
new_data.append({k: v for k, v in d.items() if k in fields})
return new_data
def main():
text_fields = demisto.args()['textFields'].split(",")
input = demisto.args().get('input')
input_type = demisto.args()['inputType']
hash_seed = int(demisto.args().get('hashSeed')) if demisto.args().get('hashSeed') else None
remove_short_threshold = int(demisto.args().get('removeShortTextThreshold', 1))
de_dup_threshold = float(demisto.args()['dedupThreshold'])
pre_process_type = demisto.args()['preProcessType']
remove_html_tags = demisto.args()['cleanHTML'] == 'true'
whitelist_fields = demisto.args().get('whitelistFields').split(",") if demisto.args().get(
'whitelistFields') else None
language = demisto.args().get('language', ANY_LANGUAGE)
# if input is a snigle string (from DbotPredictPhishingWords):
if input_type == 'string':
input_str = demisto.args().get('input')
input_str = clean_text_of_single_text(input_str, remove_html_tags)
is_correct_lang, actual_language = is_text_in_input_language(input_str, language)
if not is_correct_lang:
return_error("Input text was detected as as being in a different language from {} ('{}' found)."
.format(language, actual_language))
res = pre_process_single_text(raw_text=input_str,
hash_seed=hash_seed, pre_process_type=pre_process_type)
return res
output_original_text_fields = demisto.args().get('outputOriginalTextFields', 'false') == 'true'
description = ""
# read data
data = read_file(input, input_type)
# concat text fields
concat_text_fields(data, DBOT_TEXT_FIELD, text_fields)
description += "Read initial %d samples" % len(data) + "\n"
# clean text
if pre_process_type not in PRE_PROCESS_TYPES:
return_error('Pre-process type {} is not supported'.format(pre_process_type))
# clean html and new lines
data = clean_text_of_incidents_list(data, DBOT_TEXT_FIELD, remove_html_tags)
# filter incidents not in specified languages
data, desc = remove_foreign_language(data, DBOT_TEXT_FIELD, language)
description += desc
# apply tokenizer
data = pre_process_batch(data, DBOT_TEXT_FIELD, DBOT_PROCESSED_TEXT_FIELD, pre_process_type,
hash_seed)
# remove short emails
data, desc = remove_short_text(data, DBOT_TEXT_FIELD, DBOT_PROCESSED_TEXT_FIELD, remove_short_threshold)
description += desc
# remove duplicates
try:
if 0 < de_dup_threshold < 1:
duplicate_indices = find_duplicate_indices([x[DBOT_PROCESSED_TEXT_FIELD] for x in data], de_dup_threshold)
data, desc = remove_duplicate_by_indices(data, duplicate_indices)
description += desc
except Exception:
pass
if output_original_text_fields:
for field in text_fields:
whitelist_fields += [x.strip() for x in field.split('|')]
if whitelist_fields and len(whitelist_fields) > 0:
whitelist_fields.append(DBOT_PROCESSED_TEXT_FIELD)
data = whitelist_dict_fields(data, whitelist_fields)
description += "Done processing: %d samples" % len(data) + "\n"
# output
file_name = str(uuid.uuid4())
output_format = demisto.args()['outputFormat']
if output_format == 'pickle':
data_encoded = pickle.dumps(data, protocol=2)
elif output_format == 'json':
data_encoded = json.dumps(data, default=str) # type: ignore
else:
return_error("Invalid output format: %s" % output_format)
entry = fileResult(file_name, data_encoded)
entry['Contents'] = data
entry['HumanReadable'] = description
entry['EntryContext'] = {
CONTEXT_KEY: {
'Filename': file_name,
'FileFormat': output_format,
'TextField': DBOT_TEXT_FIELD,
'TextFieldProcessed': DBOT_PROCESSED_TEXT_FIELD,
}
}
return entry
if __name__ in ['builtins', '__main__']:
entry = main()
demisto.results(entry)
| VirusTotal/content | Packs/Base/Scripts/DBotPreprocessTextData/DBotPreprocessTextData.py | Python | mit | 19,856 |
#!python
"""Computes mean of amino-acid preferences or differential preferences.
Written by Jesse Bloom.
"""
import sys
import os
import math
import time
import mapmuts
import mapmuts.io
import mapmuts.sequtils
import mapmuts.bayesian
def Entropy(pi_mean):
"""Computes site entropy in bits from array of probabilities."""
h = 0.0 # calculate entropy
for pi in pi_mean:
if pi == 0:
pass
elif pi < 0:
raise ValueError("Negative pi value of %g" % pi)
else:
h -= pi * math.log(pi, 2)
return h
def main():
"""Main body of script."""
# hard-coded variables
log = sys.stdout
# read input variables
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument"\
+ ' specifying the name of the input file.')
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile of %s" % infilename)
d = mapmuts.io.ParseInfile(open(infilename))
log.write("Beginning execution of mapmuts_preferencemeans.py"\
" in directory %s" % (os.getcwd()))
mapmuts.io.PrintVersions(log)
log.write("Input data being read from infile %s\n\n" % infilename)
log.write("Read the following key/value pairs from infile %s:" % infilename)
for (key, value) in d.iteritems():
log.write("\n%s %s" % (key, value))
includestop = mapmuts.io.ParseBoolValue(d, 'includestop')
if 'preferencefiles' in d and 'differentialpreferencefiles' in d:
raise ValueError("Input file can only specify one of preferencefiles or differentialpreferencefiles, not both")
elif 'preferencefiles' in d:
use_diffprefs = False
preferencefiles = mapmuts.io.ParseFileList(d, 'preferencefiles')
log.write("\nReading in the preferences...")
log.flush()
preferences = [mapmuts.io.ReadEntropyAndEquilFreqs(f) for f in preferencefiles]
residues = preferences[0].keys()
residues.sort()
if includestop and ('PI_*' in preferences[0][residues[0]]):
includestop = True
else:
includestop = False
for x in preferences:
mapmuts.bayesian.PreferencesRemoveStop(x)
elif 'differentialpreferencefiles' in d:
use_diffprefs = True
preferencefiles = mapmuts.io.ParseFileList(d, 'differentialpreferencefiles')
log.write("\nReading in the differential preferences...")
log.flush()
preferences = [mapmuts.io.ReadDifferentialPreferences(f) for f in preferencefiles]
residues = preferences[0].keys()
residues.sort()
if includestop and ('dPI_*' in preferences[0][residues[0]]):
includestop = True
else:
includestop = False
for x in preferences:
mapmuts.bayesian.DifferentialPreferencesRemoveStop(x)
else:
raise ValueError("Input file failed to specify either preferencefiles or differentialpreferencefiles")
nfiles = len(preferencefiles)
if nfiles < 2:
raise ValueError('preferencefiles / differentialpreferencefiles must specify at least 2 files')
outfile = mapmuts.io.ParseStringValue(d, 'outfile')
# read the preferences
residues = preferences[0].keys()
residues.sort()
if not residues:
raise ValueError("No residues specified in the first preference file")
aas = mapmuts.sequtils.AminoAcids(includestop=includestop)
log.write("\nNow writing means to %s..." % outfile)
log.flush()
out = open(outfile, 'w')
if use_diffprefs:
out.write('#SITE\tWT_AA\tRMS_dPI\t%s\n' % '\t'.join(['dPI_%s' % aa for aa in aas]))
else:
out.write('#SITE\tWT_AA\tSITE_ENTROPY\t%s\n' % '\t'.join(['PI_%s' % aa for aa in aas]))
for r in residues:
# get wildtype(s) for all files
wts = [preferences[ifile][r]['WT_AA'] for ifile in range(nfiles)]
if len(dict([(wt, True) for wt in wts])) == 1:
wts = wts[0]
else:
wts = ','.join(wts)
# check that all files are consistent with stop codon presence/absence
for ifile in range(nfiles):
if includestop != (('PI_*' in preferences[ifile][r]) or ('dPI_*' in preferences[ifile][r])):
raise ValueError("Not all files and residues are consistent with regard to the presence or absence of a key for stop codons (PI_* or dPI_*). All files and residues must either have or lack this key.")
# start writing preference sums
means = {}
for aa in aas:
if use_diffprefs:
means[aa] = sum([preferences[ifile][r]['dPI_%s' % aa] for ifile in range(nfiles)]) / float(nfiles)
else:
means[aa] = sum([preferences[ifile][r]['PI_%s' % aa] for ifile in range(nfiles)]) / float(nfiles)
if use_diffprefs and abs(sum(means.values())) > 1.0e-5:
raise ValueError("The mean differential preferences do not sum to nearly zero for residue %d. Check that the preferences in the individual files correctly sum to zero." % r)
elif not use_diffprefs and abs(1.0 - sum(means.values())) > 1.0e-5:
raise ValueError("The mean differential preferences do not sum to nearly one for residue %d. Check that the preferences in the individual files correctly sum to one." % r)
if use_diffprefs:
h = math.sqrt(sum([dpi**2 for dpi in means.values()]))
else:
h = Entropy(means.values())
out.write("%d\t%s\t%g\t%s\n" % (r, wts, h, '\t'.join(['%g' % means[aa] for aa in aas])))
out.close()
log.write("\n\nExecution completed at %s." % time.ctime())
if __name__ == '__main__':
main() # run the script
| jbloom/mapmuts | scripts/mapmuts_preferencemeans.py | Python | gpl-3.0 | 5,798 |
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from flask import Flask, render_template, flash, request, Markup, \
session, redirect, url_for, escape, Response, abort, send_file
from flask.ext.login import current_user
from iatidataquality import app
import os
import sys
import markdown
current = os.path.dirname(os.path.abspath(__file__))
parent = os.path.dirname(current)
sys.path.append(parent)
from iatidq import dqindicators, dqorganisations, dqusers, donorresponse
import iatidq.survey.data as dqsurveys
import iatidq.survey.mapping
import usermanagement
from iatidq import util, ui_support
@app.route("/surveys/admin/")
@usermanagement.perms_required()
def surveys_admin():
surveys = dqsurveys.surveys()
workflows = dqsurveys.workflowsAll()
publishedstatuses=dqsurveys.publishedStatus()
admin = usermanagement.check_perms('admin')
loggedinuser = current_user
return render_template("surveys/surveys_admin.html",
**locals())
@app.route("/surveys/create/", methods=["GET", "POST"])
@app.route("/surveys/<organisation_code>/create/", methods=["GET", "POST"])
def create_survey(organisation_code=None):
return "You're trying to create a survey"
def completion_percentage(survey):
stages = ['researcher', 'send', 'donorreview', 'pwyfreview',
'cso', 'donorcomments', 'pwyffinal', 'finalised']
# can ValueError; used to raise NameError
idx = stages.index(survey.Workflow.name)
return float(idx + 1) / 8 * 100
@app.route("/organisations/<organisation_code>/survey/repair/")
@usermanagement.perms_required('survey', 'view')
def organisation_survey_repair(organisation_code):
status = dqsurveys.repairSurveyData(organisation_code)
if status['changes'] == True:
indicators = ", ".join(status['changed_indicators'])
flash('Survey successfully repaired indicators '+indicators, 'success')
else:
flash('Survey could not be repaired', 'error')
return redirect(url_for('organisation_survey', organisation_code=organisation_code))
@app.route("/organisations/<organisation_code>/survey/")
@usermanagement.perms_required('survey', 'view')
def organisation_survey(organisation_code=None):
organisation = dqorganisations.organisations(organisation_code)
# make sure survey exists
dqsurveys.getOrCreateSurvey({'organisation_id':organisation.id})
survey = dqsurveys.getSurvey(organisation_code)
surveydata = dqsurveys.getSurveyDataAllWorkflows(organisation_code)
workflows = dqsurveys.workflowsAll()
pct_complete = completion_percentage(survey)
users = dqusers.surveyPermissions(organisation_code)
admin = usermanagement.check_perms('admin')
loggedinuser = current_user
checksurveyOK = dqsurveys.checkSurveyData(organisation_code)
return render_template("surveys/survey.html",
**locals())
def getTimeRemainingNotice(deadline):
# Skip this for now
return ""
remaining = deadline.date() - datetime.utcnow().date()
if remaining.days > 1:
return "You have %d days to submit your response." % remaining.days
else:
return "Today is the last day for making any changes to your survey."
def __survey_process(organisation, workflow, request,
organisationsurvey, published_accepted):
indicators = dqindicators.indicators(app.config["INDICATOR_GROUP"])
form_indicators = map(int, request.form.getlist('indicator'))
workflow_id = workflow.Workflow.id
currentworkflow_deadline = organisationsurvey.currentworkflow_deadline
for indicator in indicators:
data = {
'organisationsurvey_id': organisationsurvey.id,
'indicator_id': str(indicator.id),
'workflow_id': workflow_id,
}
if indicator.id not in form_indicators:
# It's an IATI indicator...
data['published_status'] = dqsurveys.publishedStatusByName('always').id
data['published_format'] = dqsurveys.publishedFormatByName('iati').id
else:
data['published_status'] = request.form.get(str(indicator.id)+"-published")
if indicator.indicator_noformat:
data['published_format'] = dqsurveys.publishedFormatByName('document').id
else:
data['published_format'] = request.form.get(str(indicator.id) + "-publishedformat")
if indicator.indicator_ordinal:
data['ordinal_value'] = request.form.get(str(indicator.id) + "-ordinal_value")
else:
data['ordinal_value'] = None
data['published_comment'] = request.form.get(str(indicator.id)+"-comments")
data['published_source'] = request.form.get(str(indicator.id)+"-source")
data['published_accepted'] = published_accepted(str(indicator.id))
surveydata = dqsurveys.addSurveyData(data)
if 'submit' in request.form:
if workflow.Workflow.id == organisationsurvey.currentworkflow_id:
# save data, change currentworkflow_id to leadsto
dqsurveys.advanceSurvey(organisationsurvey)
flash('Successfully submitted survey data', 'success')
else:
flash("Your survey data was updated.", 'warning')
else:
time_remaining_notice = getTimeRemainingNotice(
organisationsurvey.currentworkflow_deadline)
flash('Note: your survey has not yet been submitted. '
+ time_remaining_notice, 'warning')
none = lambda i: None
add_agree = lambda indicator: request.form.get(indicator + "-agree")
def _survey_process_collect(organisation, workflow,
request, organisationsurvey):
return __survey_process(organisation, workflow,
request, organisationsurvey, none)
def _survey_process_review(organisation, workflow,
request, organisationsurvey):
return __survey_process(organisation, workflow,
request, organisationsurvey, none)
def _survey_process_finalreview(organisation, workflow,
request, organisationsurvey):
return __survey_process(organisation, workflow, request,
organisationsurvey,
add_agree)
def _survey_process_comment(organisation, workflow,
request, organisationsurvey):
return __survey_process(organisation, workflow,
request, organisationsurvey,
add_agree)
def _survey_process_send(organisation, workflow, request, organisationsurvey):
indicators = request.form.getlist('indicator')
#FIXME: need to actually send
dqsurveys.advanceSurvey(organisationsurvey)
flash('Successfully sent survey to donor.', 'success')
def get_old_publication_status():
pses = [
('4', 'Always published', 'success'),
('3', 'Sometimes published', 'warning'),
('2', 'Collected', 'important'),
('1', 'Not collected', 'inverse'),
('', 'Unknown', ''),
('iati', 'Published to IATI', 'success'),
('always', 'Always published', 'success'),
('sometimes', 'Sometimes published', 'warning'),
('not published', 'Not published', 'important'),
]
struct = lambda ps: (ps[0], {
"text": ps[1],
"class": ps[2]
})
return dict(map(struct, pses))
def get_ordinal_values_years():
years = [
(3, '3 years ahead', 'success'),
(2, '2 years ahead', 'warning'),
(1, '1 year ahead', 'important'),
(0, 'No forward data', 'inverse'),
(None, 'Unknown', '')
]
struct = lambda yr: (yr[0], ({
"text": yr[1],
"class": yr[2]
}))
return map(struct, years)
id_tuple = lambda p: (p.id, p)
def organisation_survey_view(organisation_code, workflow,
workflow_name, organisationsurvey,
allowed_to_edit):
organisation = ui_support.organisation_by_code(organisation_code)
if organisation == None:
abort(404)
# the next line may be being called for its side effects
dqsurveys.getSurveyData(organisation_code, workflow_name)
surveydata = dqsurveys.getSurveyDataAllWorkflows(organisation_code)
try:
print surveydata['cso']
except Exception:
pass
indicators = dqindicators.indicators(app.config["INDICATOR_GROUP"])
org_indicators = dqorganisations._organisation_indicators_split(
organisation, 2)
twentytwelvedata = iatidq.survey.mapping.get_organisation_results(
organisation_code,
[i[1]["indicator"]["name"] for i in org_indicators["zero"].items()]
)
publishedstatuses = dict(map(id_tuple, dqsurveys.publishedStatus()))
publishedformats = dict(map(id_tuple, dqsurveys.publishedFormatAll()))
years = get_ordinal_values_years()
year_data = dict(years)
years.pop()
donorresponses = donorresponse.RESPONSE_IDS
old_publication_status = get_old_publication_status()
admin = usermanagement.check_perms('admin')
loggedinuser = current_user
org_indicators['commitment'] = util.resort_sqlalchemy_indicator(org_indicators['commitment'])
org_indicators['zero'] = util.resort_dict_indicator(org_indicators['zero'])
return render_template(
"surveys/_survey_%s.html" % workflow.WorkflowType.name,
**locals())
@app.route("/organisations/<organisation_code>/survey/<workflow_name>/", methods=["GET", "POST"])
def organisation_survey_edit(organisation_code=None, workflow_name=None):
workflow = dqsurveys.workflowByName(workflow_name)
if not workflow:
flash('That workflow does not exist.', 'error')
return abort(404)
organisation = dqorganisations.organisations(organisation_code)
organisationsurvey = dqsurveys.getOrCreateSurvey({
'organisation_id': organisation.id
})
def allowed(method):
permission_name = "survey_" + workflow_name
permission_value = {'organisation_code': organisation_code}
return usermanagement.check_perms(permission_name,
method,
permission_value)
allowed_to_edit = allowed("edit")
allowed_to_view = allowed("view")
def no_permission():
# If not logged in, redirect to login page
if not current_user.is_authenticated():
flash('You must log in to access that page.', 'error')
return redirect(url_for('login', next=request.path))
# Otherwise, redirect to previous page and warn user
# they don't have permissions to access the survey.
flash("Sorry, you do not have permission to view that survey", 'error')
if request.referrer is not None:
redir_to = request.referrer
else:
redir_to = url_for('home')
return redirect(redir_to)
if not allowed_to_view:
return no_permission()
if request.method != 'POST':
return organisation_survey_view(
organisation_code, workflow,
workflow_name, organisationsurvey, allowed_to_edit)
if not allowed_to_edit:
return no_permission()
handlers = {
"collect": _survey_process_collect,
"send": _survey_process_send,
"review": _survey_process_review,
"comment": _survey_process_comment,
"finalreview": _survey_process_finalreview
}
workflow_name = workflow.WorkflowType.name
if workflow_name == "send":
if workflow.Workflow.id == organisationsurvey.currentworkflow_id:
_survey_process_send(
organisation_code, workflow, request, organisationsurvey)
else:
flash("Not possible to send survey to donor because it's "
"not at the current stage in the workflow. "
"Maybe you didn't submit the data, or maybe you "
"already sent it to the donor?", 'error')
elif workflow_name in handlers:
handlers[workflow_name](
organisation, workflow, request, organisationsurvey)
elif workflow_name == 'finalised':
return "finalised"
return redirect(url_for("organisations",
organisation_code=organisation_code))
def render_markdown(filename):
path = os.path.join(os.path.dirname(__file__), 'docs', filename)
with file(path) as f:
plaintext = f.read()
def _wrapped():
content = Markup(markdown.markdown(plaintext))
loggedinuser = current_user
return render_template('about_generic.html', **locals())
return _wrapped()
@app.route('/info/datacol')
def about_data_collection():
return render_markdown('2013_data_collection_guide.md')
@app.route('/info/independent')
def about_independent():
return render_markdown('independent_guide.md')
@app.route('/info/donor')
def about_donor():
return render_markdown('donor_guide.md')
| mk270/iatidq_ui | iatidataquality/surveys.py | Python | agpl-3.0 | 13,490 |
import numpy as np
import geopy
from io import BytesIO
from matplotlib import image as img
import requests
class Map(object):
def __init__(self, lat, long, satellite=True,
zoom=10, size=(400,400), sensor=False):
base="http://maps.googleapis.com/maps/api/staticmap?"
params=dict(
sensor= str(sensor).lower(),
zoom= zoom,
size= "x".join(map(str, size)),
center= ",".join(map(str, (lat, long) )),
style="feature:all|element:labels|visibility:off"
)
if satellite:
params["maptype"]="satellite"
self.image = requests.get(base, params=params).content
#Fetch our PNG image data
self.pixels = img.imread(BytesIO(self.image))
# Parse our PNG image as a numpy array
def green(self, threshold):
# Use NumPy to build an element-by-element logical array
greener_than_red = self.pixels[:,:,1] > threshold* self.pixels[:,:,0]
greener_than_blue = self.pixels[:,:,1] > threshold*self.pixels[:,:,2]
green = np.logical_and(greener_than_red, greener_than_blue)
return green
def count_green(self, threshold = 1.1):
return np.sum(self.green(threshold))
def show_green(data, threshold = 1.1):
green = self.green(threshold)
out = green[:,:,np.newaxis]*array([0,1,0])[np.newaxis,np.newaxis,:]
buffer = StringIO()
result = img.imsave(buffer, out, format='png')
return buffer.getvalue()
| jscott6/greengraph | greengraph/map.py | Python | mit | 1,522 |
import os
import jinja2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True
)
TRIPIT_USERNAME = 'yourmail'
TRIPIT_PASSWORD = 'yourpassword' | mcascallares/hand-luggage | config.py | Python | apache-2.0 | 256 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.SCADA.RemotePoint import RemotePoint
class RemoteControl(RemotePoint):
"""Remote controls are ouputs that are sent by the remote unit to actuators in the process.
"""
def __init__(self, actuatorMinimum=0.0, remoteControlled=False, actuatorMaximum=0.0, Control=None, *args, **kw_args):
"""Initialises a new 'RemoteControl' instance.
@param actuatorMinimum: The minimum set point value accepted by the remote control point.
@param remoteControlled: Set to true if the actuator is remotely controlled.
@param actuatorMaximum: The maximum set point value accepted by the remote control point.
@param Control: The Control for the RemoteControl point.
"""
#: The minimum set point value accepted by the remote control point.
self.actuatorMinimum = actuatorMinimum
#: Set to true if the actuator is remotely controlled.
self.remoteControlled = remoteControlled
#: The maximum set point value accepted by the remote control point.
self.actuatorMaximum = actuatorMaximum
self._Control = None
self.Control = Control
super(RemoteControl, self).__init__(*args, **kw_args)
_attrs = ["actuatorMinimum", "remoteControlled", "actuatorMaximum"]
_attr_types = {"actuatorMinimum": float, "remoteControlled": bool, "actuatorMaximum": float}
_defaults = {"actuatorMinimum": 0.0, "remoteControlled": False, "actuatorMaximum": 0.0}
_enums = {}
_refs = ["Control"]
_many_refs = []
def getControl(self):
"""The Control for the RemoteControl point.
"""
return self._Control
def setControl(self, value):
if self._Control is not None:
self._Control._RemoteControl = None
self._Control = value
if self._Control is not None:
self._Control.RemoteControl = None
self._Control._RemoteControl = self
Control = property(getControl, setControl)
| rwl/PyCIM | CIM14/IEC61970/SCADA/RemoteControl.py | Python | mit | 3,097 |
#!/usr/bin/python
import numpy as np
import pylab as plt
import seaborn as sns
sns.set_context("poster")
#with open("traj.dat") as f:
# data = f.read()
#
# data = data.split('\n')
#
# x = [row.split(' ')[0] for row in data]
# y = [row.split(' ')[1] for row in data]
#
# fig = plt.figure()
#
# ax1 = fig.add_subplot(111)
#
# ax1.set_title("Plot title...")
# ax1.set_xlabel('your x label..')
# ax1.set_ylabel('your y label...')
#
# ax1.plot(x,y, c='r', label='the data')
#
# leg = ax1.legend()
#fig = plt.figure()
plt.subplot(121)
#plt.ylim(-8,8)
data = np.genfromtxt(fname='q.dat')
#data = np.loadtxt('traj.dat')
for x in range(1,data.shape[1]):
plt.plot(data[:,0],data[:,x])
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
plt.xlabel('time')
#plt.ylabel('position')
#plt.title('traj')
#ax2 = plt.subplot(122)
#data = np.genfromtxt(fname='c.dat')
#data = np.loadtxt('traj.dat')
#for x in range(1,data.shape[1]):
# plt.plot(data[:,0],data[:,x])
#plt.xlabel('time')
#ax2.yaxis.tick_right()
#ax2.yaxis.set_ticks_position('both')
#plt.ylim(-0.2,5)
#plt.subplot(2,2,3)
#data = np.genfromtxt(fname='norm')
#plt.plot(data[:,0],data[:,1],'r-',linewidth=2)
#plt.ylabel('Norm')
#plt.ylim(0,2)
plt.legend()
plt.savefig('traj.pdf')
plt.show()
| binghongcha08/pyQMD | GWP/QTGB/resample/traj.py | Python | gpl-3.0 | 1,297 |
# -*- coding: utf-8 -*-
# test basic sqlite interactions
# based on http://qiita.com/mas9612/items/a881e9f14d20ee1c0703
# cf also https://docs.python.org/3/library/sqlite3.html
import sqlite3
dbname = "sqlite-dbs/yktest.sqlite3"
conn = sqlite3.connect(dbname)
conn.row_factory = sqlite3.Row
c = conn.cursor()
# drop test table if it exists for testing purposes
c.execute("DROP TABLE IF EXISTS pytest")
# test execute
create_table = "create table pytest (id INTEGER PRIMARY KEY NOT NULL, name VARCHAR(64))"
c.execute(create_table)
# test setting variables into sql with ? (placeholder) values
sql = "insert into pytest (id, name) values (?,?)"
pydata = (1, "Name-A")
c.execute(sql, pydata)
# test above with multiple values using executemany
pydatalist = [
(3, "Name-B"),
(7, "Name-V"),
(11, "Name-C")
]
c.executemany(sql, pydatalist)
conn.commit()
# test :name placeholders
sql = "insert into pytest (id, name) values (:id, :name)"
pydatadict = {"id": 123, "name": "Name-名前"}
c.execute(sql, pydatadict)
conn.commit()
# test select and see
select_sql = "select * from pytest"
res = c.execute(select_sql)
print("full data is:")
print(res)
print("data by rows:")
ret = []
ret2 = []
for row in res:
ret.append(tuple(row))
print(row.keys())
print(tuple(row))
ret2.append(dict(zip(row.keys(), tuple(row))))
print("data packed for return:")
print(ret)
print("list of dicts: ")
print(ret2)
# drop test table if it exists at end of test
c.execute("DROP TABLE IF EXISTS pytest")
# make sure to close any open connections
conn.close()
| yk0242/incubation | incubation-python/sqlite/sqlite-test1.py | Python | mit | 1,572 |
#!env /usr/bin/python3
import sys
import urllib.parse
import urllib.request
def main():
search = sys.argv[1]
url = 'http://rarbg.to/torrents.php?order=seeders&by=DESC&search='
url = url + search
print(url)
req = urllib.request.Request(url, headers={'User-Agent' : "Magic Browser"})
resp = urllib.request.urlopen(req)
respData = resp.read()
if __name__ == '__main__':
main()
| jadams/rarbg-get | rarbg-get.py | Python | mit | 409 |
from django.http import HttpResponse, HttpResponseRedirect
from django.views.decorators.http import require_GET
import requests
import json
from models import PdfFile, MediaFile
from annoying.decorators import render_to
from os.path import join
from django.conf import settings
@render_to('router/index.html')
def index(request):
return {
'readme': open(join(settings.PROJECT_DIR, 'README.md'), 'r').read()
}
def check_url_exists(url):
"""
Check if a URL exists by HEAD request
"""
if url is None:
return None
r = requests.head(url, allow_redirects=True)
if r.status_code == requests.codes.ok:
return r.url
else:
return None
return None
def json_response(data, status=200):
return HttpResponse(json.dumps(data, indent=4), content_type="application/json", status=status)
@require_GET
def hello_world(request):
return json_response({"message": "Hello, world!"})
@require_GET
def example_route(request, arg1, arg2):
"""
This text description for this API call
arg1 -- A first argument
arg2 -- A second argument
"""
return HttpResponseRedirect('http://example.org/%s/%s/' % (arg1, arg2))
@require_GET
def pdf(request, doi, type = None):
"""
Get PDF file locations in JSON format.
"""
pdf_types = ['figures','article']
if type:
types = []
types.append(type)
else:
types = pdf_types
data = []
notes = []
for pdf_type in types:
try:
pdf = PdfFile(doi, pdf_type)
# Check if URL exists
if check_url_exists(pdf.get_url()) is not None:
# Add data from the object
item = {}
item['doi'] = pdf.get_doi()
item['doi_id'] = pdf.get_doi_id()
item['file_type'] = pdf.file_type
item['url'] = pdf.get_url()
item['size'] = pdf.get_size_from_s3()
item['type'] = pdf.type
data.append(item)
else:
# Append notes
#notes.append('%s does not exist' % pdf.get_url())
pass
except:
return HttpResponse(status=404)
response_list = {}
response_list['data'] = data
# Add metadata
if len(notes) > 0:
response_list['notes'] = notes
response_list['results'] = len(data)
return json_response(response_list)
@require_GET
def pdf_by_type(request, doi, type):
"""
Get a PDF file URI
The pdf_type can be 'figures' or 'article'
"""
return pdf(request, doi, type)
@require_GET
def media(request, doi, xlink = None, type = None, redirect = None):
"""
Get media file locations in JSON format.
"""
data = []
file = None
# Given a DOI, xlink and type
if doi is not None and xlink is not None and type is not None:
file = MediaFile(doi, xlink, type)
if check_url_exists(file.get_url()) is not None:
# Add data
item = {}
item['type'] = file.type
item['doi'] = file.get_doi()
item['doi_id'] = file.get_doi_id()
item['url'] = file.get_url()
data.append(item)
response_list = {}
response_list['data'] = data
response_list['results'] = len(data)
query_params = request.GET
if (
(redirect is True or query_params.get('redirect') is not None)
and len(response_list['data']) == 1):
return HttpResponseRedirect(response_list['data'][0]['url'])
elif ((redirect is True or query_params.get('redirect') is not None)
and len(response_list['data']) < 1):
# No URL return and redirect, error 404
return HttpResponse(status=404)
else:
# Default with no redirect, return all the data
return json_response(response_list)
@require_GET
def media_file(request, doi, filename):
"""
Get a specific media file
and redirect to the file after locating the file at the third-party media provider
Specifically this is useful in displaying videos in eLife Lens
filename includes the name and file extension, e.g. 'elife00007v001.jpg' or 'elife00007v001.mp4'
"""
try:
xlink = filename.split(".")[0]
type = filename.split(".")[1]
except:
xlink = None
type = None
redirect = True
return media(request, doi, xlink, type, redirect)
@require_GET
def media_xlink_format(request, doi, xlink, type):
"""
Get a specific media file by specifying the xlink and type
type for videos can be 'jpg', 'mp4', 'ogv', 'webm'
redirect -- If set (to any value) redirect to the URL
"""
return media(request, doi, xlink, type)
| elifesciences/elife-api | src/router/views.py | Python | gpl-3.0 | 4,870 |
"""
sentry.utils.samples
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import os.path
import random
from datetime import datetime, timedelta
import six
from sentry.constants import DATA_ROOT
from sentry.event_manager import EventManager
from sentry.interfaces.user import User as UserInterface
from sentry.utils import json
epoch = datetime.utcfromtimestamp(0)
def milliseconds_ago(now, milliseconds):
ago = (now - timedelta(milliseconds=milliseconds))
return (ago - epoch).total_seconds()
def random_ip():
not_valid = [10, 127, 169, 172, 192]
first = random.randrange(1, 256)
while first in not_valid:
first = random.randrange(1, 256)
return '.'.join(
(
six.text_type(first), six.text_type(random.randrange(1, 256)),
six.text_type(random.randrange(1, 256)), six.text_type(random.randrange(1, 256))
)
)
def random_username():
return random.choice(
[
'jess',
'david',
'chris',
'eric',
'katie',
'ben',
'armin',
'saloni',
'max',
'meredith',
'matt',
'sentry',
]
)
def name_for_username(username):
return {
'ben': 'Ben Vinegar',
'chris': 'Chris Jennings',
'david': 'David Cramer',
'matt': 'Matt Robenolt',
'jess': 'Jess MacQueen',
'katie': 'Katie Lundsgaard',
'saloni': 'Saloni Dudziak',
'max': 'Max Bittker',
'meredith': 'Meredith Heller',
'eric': 'Eric Feng',
'armin': 'Armin Ronacher',
}.get(username, username.replace('_', ' ').title())
def generate_user(username=None, email=None, ip_address=None, id=None):
if username is None and email is None:
username = random_username()
email = '{}@example.com'.format(username)
return UserInterface.to_python(
{
'id': id,
'username': username,
'email': email,
'ip_address': ip_address or random_ip(),
'name': name_for_username(username),
}
).to_json()
def load_data(platform, default=None, timestamp=None, sample_name=None):
# NOTE: Before editing this data, make sure you understand the context
# in which its being used. It is NOT only used for local development and
# has production consequences.
# * bin/load-mocks to generate fake data for local testing
# * When a new project is created, a fake event is generated as a "starter"
# event so it's not an empty project.
# * When a user clicks Test Configuration from notification plugin settings page,
# a fake event is generated to go through the pipeline.
sample_name = sample_name or platform
data = None
for platform in (platform, default):
if platform is None:
continue
json_path = os.path.join(DATA_ROOT, 'samples', '%s.json' % (sample_name.encode('utf-8'), ))
if not os.path.exists(json_path):
continue
with open(json_path) as fp:
data = json.loads(fp.read())
break
if data is None:
return
if platform == 'csp':
return data
data['platform'] = platform
data['message'] = 'This is an example %s exception' % (sample_name, )
data['sentry.interfaces.User'] = generate_user(
ip_address='127.0.0.1',
username='sentry',
id=1,
email='sentry@example.com',
)
data['extra'] = {
'session': {
'foo': 'bar',
},
'results': [1, 2, 3, 4, 5],
'emptyList': [],
'emptyMap': {},
'length': 10837790,
'unauthorized': False,
'url': 'http://example.org/foo/bar/',
}
data['modules'] = {
'my.package': '1.0.0',
}
data['sentry.interfaces.Http'] = {
"cookies": 'foo=bar;biz=baz',
"url": "http://example.com/foo",
"headers": {
"Referer":
"http://example.com",
"Content-Type":
"application/json",
"User-Agent":
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36"
},
"env": {
'ENV': 'prod',
},
"query_string": "foo=bar",
"data": '{"hello": "world"}',
"method": "GET"
}
start = datetime.utcnow()
if timestamp:
try:
start = datetime.utcfromtimestamp(timestamp)
except TypeError:
pass
# Make breadcrumb timestamps relative to right now so they make sense
breadcrumbs = data.get('sentry.interfaces.Breadcrumbs')
if breadcrumbs is not None:
duration = 1000
values = breadcrumbs['values']
for value in reversed(values):
value['timestamp'] = milliseconds_ago(start, duration)
# Every breadcrumb is 1s apart
duration += 1000
return data
def create_sample_event(project, platform=None, default=None, raw=True, sample_name=None, **kwargs):
if not platform and not default:
return
timestamp = kwargs.get('timestamp')
data = load_data(platform, default, timestamp, sample_name)
if not data:
return
data.update(kwargs)
manager = EventManager(data)
manager.normalize()
return manager.save(project.id, raw=raw)
| jean/sentry | src/sentry/utils/samples.py | Python | bsd-3-clause | 5,584 |
#!/usr/bin/env python
import roslib
roslib.load_manifest('turtle_mix')
import rospy
import tf
import turtlesim.msg
def handle_turtle_pose(msg, turtlename):
br = tf.TransformBroadcaster()
br.sendTransform((msg.x, msg.y, 0),
tf.transformations.quaternion_from_euler(0, 0, msg.theta),
rospy.Time.now(),
turtlename,
"world")
if __name__ == '__main__':
rospy.init_node('turtle_tf_broadcaster')
turtlename = rospy.get_param('~turtle')
rospy.loginfo(turtlename)
rospy.Subscriber('/%s/pose' % turtlename,
turtlesim.msg.Pose,
handle_turtle_pose,
turtlename)
rospy.spin() | vuhpham/turtle_mix | scripts/turtle_broadcaster.py | Python | bsd-3-clause | 742 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pybloom import ScalableBloomFilter
from scrapy.exceptions import DropItem
from scrapy_redis.pipelines import RedisPipeline
class WishPipeline(object):
def __init__(self):
self.urls = ScalableBloomFilter(mode=ScalableBloomFilter.LARGE_SET_GROWTH)
def process_item(self, item, spider):
if item is None or item['url'] is None or item['url'] in self.urls:
raise DropItem("Duplicate item found.")
else:
self.urls.add(item['url'])
return item
class ToRedisPipeline(RedisPipeline):
def _process_item(self, item, spider):
self.server.rpush('{}:products'.format(spider.name), item['url'])
return item
| yangxue088/wish | pipelines.py | Python | mit | 883 |
import collections
# this will generate the emulator .s file
####### HELPERS/OPCODE HELPERS ##########################################
# given a string, inserts macros or format values from the 'override' definition, aligns comments
# returns new string
def format(string, **override):
formatDict = dict(_macros, **_aliases)
formatDict.update(override)
return alignComments(string.format(**formatDict))
# represents a 16-bit opcode
class Opcode(object):
def __init__(self, opcode):
assert 2**16 > opcode >= 0
self.opcode = opcode
self.po = opcode >> 8
self.so = opcode & 0xff
self.r = self.po & 0b111 # assumes that lower 3 bits of the primary opcode are a register
@staticmethod
def fromBytes(po, so):
assert 0 <= po < 256 and 0 <= so < 256
return Opcode(po << 8 | so)
# defines the asm code for this opcode
def define(self, code, **kwargs):
code = (
""" .thumb_func
handler_{opcode:04x}: """.format(opcode=self.opcode)
+ format(code.strip("\n"), **kwargs).lstrip(' ')
)
handlers[self.opcode] = code
# yields all opcodes that start with (byte+r), r in [0, 7]
def byteOpcodesWithRegister(byte):
for r in range(8):
for so in range(256):
yield Opcode.fromBytes(byte + r, so)
# yields all opcodes that start with (byte)
def byteOpcodes(byte):
for so in range(256):
yield Opcode.fromBytes(byte, so)
# append rodata
def rodata(code, **kwargs):
_rodata.append(format(code, **kwargs))
_rodata = []
# append data
def data(code, **kwargs):
_data.append(format(code, **kwargs))
_data = []
# append text
def text(code, **kwargs):
_text.append(format(code, **kwargs))
_text = []
def function(text="", rodata="", data=""):
globals()['text'](text)
globals()['rodata'](rodata)
globals()['data'](data)
# returns a new label with uniq name, with the given optional suffix
def label(suffix=""):
label = "L%d" % _currentLabel
_currentLabel += 1
if len(suffix) > 0:
label += "_" + suffix
return label
_currentLabel = 0
# adds a macro with the given name to the list of macros
# will format the given code
# macros need to be multiple lines
# commensts the name of the macro on the first line
# TODO - use asm macro definitions instead?
def macro(name, code):
code = format(code)
assert code.count("\n") == 0 # TODO
code += " - " if '@' in code else " @ "
code += name
assert name not in _macros
_macros[name] = code
_macros = dict()
# adds an expression macro (usually nam -> value)
# aliases will also be added to the global name space
def alias(name, exp):
globals()[name] = exp
_aliases[name] = exp
_aliases = collections.OrderedDict()
def shared_constant(name, value):
_shared_constants[name] = value
_shared_constants = collections.OrderedDict()
# given some asm code, will align the @ on the same level
def alignComments(code):
def hasCode(line):
return (len (line.split('@', 1)[0].strip()) > 0
if "@" in line else
len(line.strip()) > 0)
def hasComment(line):
return "@" in line
lines = code.split("\n")
try:
width = max(len(line.split("@",1)[0].rstrip()) + 1
for line in lines if hasCode(line) and hasComment(line))
except ValueError:
return code
return "\n".join(
(line)
if not hasComment(line) else
(" @ " + line.split('@', 1)[1].lstrip())
if not hasCode(line) else
(line.split("@", 1)[0].ljust(width) + "@" + line.split('@', 1)[1])
for line in lines
)
####### DEFINITIONS #####################################################
handlers = [None]*(1<<16) # opcode -> handler code
alias('handlerShift', 5) # number of bits per handler
registerAliases = collections.OrderedDict([
('eax', 'r7'), # mapping matches x86 and arm syscall registers
('ebx', 'r0'),
('ecx', 'r1'),
('edx', 'r2'),
('esi', 'r3'),
('edi', 'r4'),
('ebp', 'r5'),
('esp', 'r13'),
('scratch', 'r8'), # scratch register
('result', 'r9'), # last result as flag helper
('aux', 'r10'), # auxiliary flag helper
('eip', 'r6'), # holds address of current instruction
('word', 'r12'), # opcode word
('handlerBase','r11'), # set all handler bits to 1 to override
('nextHandler','r14'),
])
shared_constant('FEMU_EXIT_SUCCESS', 0)
shared_constant('FEMU_EXIT_FAILURE', 1)
shared_constant('FEMU_EXIT_UNIMPLEMENTED_OPCODE', 2)
shared_constant('FEMU_EXIT_INT3', 3)
globals().update((k, k) for k in registerAliases) # insert registers into global scope
eregs = [eax, ecx, edx, ebx, esp, ebp, esi, edi] # index -> ereg
macro('nextHandler1_0Byte', "uxth nextHandler, word") # extract lower bytes
macro('nextHandler1_1Byte', "ubfx nextHandler, word, 8, 16") # extract middle bytes
macro('nextHandler1_2Byte', "lsr nextHandler, word, #16") # extract upper bytes
macro('nextHandler2', "orr nextHandler, handlerBase, nextHandler, lsl {handlerShift}")
macro('nextWord_1Byte', "ldr word, [eip, 1]!")
macro('nextWord_2Byte', "ldr word, [eip, 2]!")
macro('nextWord_5Byte', "ldr word, [eip, 5]!")
macro('branchNext', "bx nextHandler")
####### HANDLERS ########################################################
for opcode in range(2**16):
Opcode(opcode).define("""@ missing opcode handler 0x{opcode:04x}
b notImplementedFunction
""", opcode=opcode)
for op in byteOpcodesWithRegister(0x58): # pop reg
instr = ('pop {{ scratch }}\n mov esp, scratch'
if op.r == 4 else # esp
'pop {{ {reg} }}')
op.define("""@ pop reg {reg}
{nextHandler1_1Byte}
{nextHandler2}
{nextWord_1Byte}
"""+instr+"""
{branchNext}
""", reg=eregs[op.r])
for op in byteOpcodesWithRegister(0x50): # push reg
instr = ('mov scratch, esp\n push {{ scratch }}'
if op.r == 4 else # esp
'push {{ {reg} }}')
op.define("""@ push reg {reg}
{nextHandler1_1Byte}
{nextHandler2}
{nextWord_1Byte}
"""+instr+"""
{branchNext}
""", reg=eregs[op.r])
for op in byteOpcodes(0x68): # push imm32
op.define("""@ push imm32
{nextWord_5Byte}
ldr scratch, [eip, -4]
{nextHandler1_0Byte}
{nextHandler2}
push {{ scratch }}
{branchNext}
""", reg=eregs[op.r])
for op in byteOpcodesWithRegister(0xB8): # mov reg, imm32
op.define("""@ mov {reg}, imm32
{nextWord_5Byte}
ldr scratch, [eip, -4]
{nextHandler1_0Byte}
{nextHandler2}
mov {reg}, scratch
{branchNext}
""", reg=eregs[op.r])
for op in byteOpcodes(0xA3): # mov ax, [imm32]
op.define("""@ mov eax, [imm32]
{nextWord_5Byte}
ldr scratch, [eip, -4]
{nextHandler1_0Byte}
{nextHandler2}
str eax, [scratch]
{branchNext}
""", reg=eregs[op.r])
for op in byteOpcodes(0xA1): # mov [imm32], ax
op.define("""@ mov [imm32], eax
{nextWord_5Byte}
ldr scratch, [eip, -4]
{nextHandler1_0Byte}
{nextHandler2}
ldr eax, [scratch]
{branchNext}
""", reg=eregs[op.r])
for op in byteOpcodes(0x90): # nop
op.define("""@ nop
{nextHandler1_1Byte}
{nextHandler2}
{nextWord_1Byte}
{branchNext}
""")
for op in byteOpcodes(0xCC): # int3
op.define("""@ int3
{nextWord_1Byte}
mov word, FEMU_EXIT_INT3
b femuEnd
""")
## FLAGS ############################
## ARM X86
# Sign/N 31 7
# Zero 30 6
# Carry 29 0
# Overflow 28 11
# parity - 2 (result)
# Adjust - 4 (aux)
for op in byteOpcodes(0x9C): # pushfd
op.define("""@ pushfd
b pushfd_impl
""")
function(text="""
.thumb_func
.type pushfd_impl, %function
pushfd_impl: @ pushfd
{nextHandler1_1Byte}
{nextHandler2}
{nextWord_1Byte}
ldr scratch, =mem_arm_sp @ load arm stack at scratch
ldr scratch, [scratch]
stmfd scratch!, {{r0-r2}} @ push registers (store multiple - full descending)
get_eflags
push {{ r0 }}
ldmfd scratch, {{r0-r2}} @ restore regs
{branchNext}
""")
for op in byteOpcodes(0x9D): # popfd
op.define("""@ pushfd
b popfd_impl
""")
## ARM X86
# Sign/N 31 7
# Zero 30 6
# Carry 29 0
# Overflow 28 11
# parity - 2 (result)
# Adjust - 4 (aux)
function(text="""
.global popfd_impl
.thumb_func
.type popfd_impl, %function
popfd_impl: @ popfd
{nextHandler1_1Byte}
{nextHandler2}
{nextWord_1Byte}
ldr scratch, =mem_arm_sp @ load arm stack at scratch
ldr scratch, [scratch]
stmfd scratch!, {{r0-r3}} @ push registers (store multiple - full descending)
pop {{ r0 }} @ get flags from stack in r0
@ register-stored flags
ubfx result, r0, 2, 1 @ p-flag -> result, by copying bit 2 into result
and aux, r0, 0x10 @ aux-flag -> aux (since result has 0 bit result xor aux will be aux)
@ memory-stored flags
and r3, r0, 0x0600 @ only DF, IF may be set (see line below for bit(1))
orr r3, r3, 0x0002 @ bit(1) will be set always
orr r3, r3, 0x0200 @ IF will be set always
ldr r1, =mem_eflags @ store in memory-stored flags
str r3, [r1]
@ NZCo flags -> CSPR
ubfx r2, r0,11, 1 @ r2 holds 0b000o
ubfx r1, r0, 6, 2 @ r1 holds 0b00NZ
bfi r2, r0, 1, 1 @ r2 holds 0b00Co
lsl r1, r1, 30 @ r1 is 0bNZ00...
orr r1, r1, r2, lsl 28 @ r1 is 0bNZCo0...
msr cpsr_f, r1 @ place r1 in status
ldmfd scratch, {{r0-r3}} @ restore regs
{branchNext}
""")
Opcode(0xcd80).define( # int 0x80
"""
{nextHandler1_2Byte}
svc 0 @ the registers are already in the right place, no mapping necessary
{nextHandler2}
{nextWord_2Byte}
{branchNext}
""")
####### GENERATOR FUNCTIONS #############################################
# one string of all the handlers
def generateOpcodeHandlers():
assert len(handlers) == (1<<16)
opcodes = [(((i & 0xff) << 8) | ((i & 0xff00) >> 8))
for i in range(len(handlers))] # shuffle bytes for endianness
return "\n".join("""
.align {handlerAlign}
{code}""".format(opcode=opcode,
handlerAlign=handlerShift,
code=handlers[opcode])
for i,opcode in enumerate(opcodes))
####### ASSEMBLER HEADER #############################################
def generateHeader():
aliases = "\n".join(
f" {name:11} .req {reg}"
for name, reg in registerAliases.items()
)
comma = ','
sharedConstants = "\n".join(
f" .equ {name+comma:20} {value}"
for name, value in _shared_constants.items()
)
return f"""
.syntax unified
.thumb
.extern writeHexByte
@ aliases
{aliases}
@ shared constants
{sharedConstants}
.global femuHandlers
.global fb @ breakpoint
@ register state
.global stored_eip
.global stored_eax
.global stored_ebx
.global stored_ecx
.global stored_edx
.global stored_esi
.global stored_edi
.global stored_ebp
.global stored_esp
.global stored_eflags
@ other state
.global stored_unimplemented_opcode
.align 15
@ macros
@ place (x86 registers, esp, eip) in store_(reg), stores cspr flags
@ frees up at least registers r0-r7 as well as cspr
@ switch to arm stack - uses scratch. note: does not write stored_eflags
@ intention: switch to C-code, running code that uses a lot of registers
.macro store_state
@ store state
ldr scratch, =stored_eip; str eip, [scratch]
ldr scratch, =stored_eax; str eax, [scratch]
ldr scratch, =stored_ebx; str ebx, [scratch]
ldr scratch, =stored_ecx; str ecx, [scratch]
ldr scratch, =stored_edx; str edx, [scratch]
ldr scratch, =stored_esi; str esi, [scratch]
ldr scratch, =stored_edi; str edi, [scratch]
ldr scratch, =stored_ebp; str ebp, [scratch]
ldr scratch, =stored_esp; str esp, [scratch]
mrs scratch, cpsr @ get status-register stored flag bits
ldr sp, =mem_cspr @ address where to store them
str scratch, [sp]
@ restore stack and return
ldr sp, =mem_arm_sp; ldr sp, [sp]
.endm
@ undoes store_state
@ load (x86 registers, esp, eip) from store_reg, load cspr flags
@ implied: switch to x86 stack) - uses scratch
.macro restore_state
ldr sp, =mem_cspr @ address where status is stored
ldr scratch, [sp]
msr cpsr_f, scratch @ get status-register stored flag bits
ldr scratch, =stored_eip; ldr eip, [scratch]
ldr scratch, =stored_eax; ldr eax, [scratch]
ldr scratch, =stored_ebx; ldr ebx, [scratch]
ldr scratch, =stored_ecx; ldr ecx, [scratch]
ldr scratch, =stored_edx; ldr edx, [scratch]
ldr scratch, =stored_esi; ldr esi, [scratch]
ldr scratch, =stored_edi; ldr edi, [scratch]
ldr scratch, =stored_ebp; ldr ebp, [scratch]
ldr scratch, =stored_esp; ldr esp, [scratch]
.endm
@ TODO macro store_flag_state? - restore_flag_state
@ macro reads eflags from cspr, aux, result, memory
@ and stores it in r0
@ uses r0-r2
.macro get_eflags
@ interleave the following:
@ r0: get memory-stored flags
@ r1: get aux flag
@ r2: get p flag
eor r2, result, result, lsr #4
ldr r0, =mem_eflags
eor r1, aux, result @ r1 holds ...0b???A ????
eor r2, r2, r2, lsl #2
ldr r0, [r0]
and r1, r1, 0x10 @ r1 holds 0b000A 0000
eor r2, r2, r2, lsr #1 @ r2 holds 0b?P??
@ add status-register stored flags
addMI r0, 1<<7 @ minus - get sign flag
addEQ r1, 1<<6 @ equal - get zero flag
and r2, r2, 0x4 @ -interleaved from before: r2 holds 0b0P00
addCS r0, 1<<0 @ carry set - get carry flag
addVS r1, 1<<11 @ v set - get overflow flag
@ combine the various results
orr r0, r2
orr r0, r1 @ r0 holds eflags
.endm
"""
# TODO: use 'addMI r0, 1<<7', 'addEQ r1, 1<<6', 'addCS r2, 1', 'addVS r3, 1<<11', interleave
## ARM X86
# Sign/N 31 7
# Zero 30 6
# Carry 29 0
# Overflow 28 11
# parity - 2 (result)
# Adjust - 4 (aux)
####### ASSEMBLER FUNCTIONS #############################################
function(
rodata = r"""
msg:
.ascii "starting emulation...\n"
""",
data = r"""
@ emulator state
mem_arm_sp: .word 0
mem_eflags: .word 0x0202 @ the direction flag and reserved bit 1
mem_cspr: .word 0 @ to store/restore cspr flag registers
@ register state
stored_eip: .word 0
stored_eax: .word 0
stored_ebx: .word 0
stored_ecx: .word 0
stored_edx: .word 0
stored_esi: .word 0
stored_edi: .word 0
stored_ebp: .word 0
stored_esp: .word 0
stored_eflags: .word 0
@other public values
stored_unimplemented_opcode: .word 0
""",
text = r"""
# femuRun(void* pc, void* sp) -> result status
.global femuRun
.thumb_func
.type femuRun, %function
femuRun:
push {{lr}}
push {{r4-r11}}
@ print message
push {{r0}}
push {{r1}}
mov r0, 1 @ stdout
ldr r1, =msg @ write buffer
mov r2, 22 @ size
mov r7, 4 @ write syscall
svc 0
pop {{r1}}
pop {{r0}}
@ store arm stack pointer
ldr r2, =mem_arm_sp
str sp, [r2]
@ set up eip, x86 stack pointer (esp)
mov eip, r0
mov esp, r1
@ set emulator helper registers
mov scratch, 0
mov result, 0
mov aux, 0
ldr handlerBase, =handler_0000
@ set up word, nextHandler
ldr word, [eip]
{nextHandler1_0Byte}
{nextHandler2}
@ set up emulated registers
mov eax, 0
mov ecx, 0
mov edx, 0
mov ebx, 0
mov ebp, 0
mov esi, 0
mov edi, 0
@ set up flags
msr cpsr_f, eax @ clear status flags
fb:
@ start emulation
{branchNext}
@ femuEnd expects return value to be stored in register 'word'
femuEnd:
store_state
get_eflags
ldr r1, =stored_eflags
str r0, [r1] @ store flags
mov r0, word @ store return value in r0
pop {{r4-r11}}
pop {{pc}}
""")
function(
rodata = r"""
unimplemented_msg:
.ascii "Unimplemented opcode: "
newline:
.ascii "\n"
""",
text = r"""
.thumb_func
notImplementedFunction:
store_state
mov r0, 1 @ stdout
ldr r1, =unimplemented_msg
mov r2, 22 @ size
mov r7, 4 @ write syscall
svc 0
ldr r0, =stored_unimplemented_opcode
ubfx r1, word, 0, 16
str r1, [r0]
mov r0, 1
and r1, word, 0xff
bl writeHexByte
mov r0, 1
ubfx r1, word, 8, 8
bl writeHexByte
mov r0, 1 @ stdout
ldr r1, =newline
mov r2, 1 @ size
mov r7, 4 @ write syscall
svc 0
restore_state
mov word, FEMU_EXIT_UNIMPLEMENTED_OPCODE
b femuEnd
""")
def generateSource(outputFile):
code = r"""
{header}
{opcodeHandlers}
{text}
.section .data
{data}
.section .rodata
{rodata}
.section text
""".format(header = generateHeader(),
opcodeHandlers = generateOpcodeHandlers(),
text = "\n".join(_text),
data = "\n".join(_data),
rodata = "\n".join(_rodata))
print("write", code.count("\n"), "lines to", outputFile)
with open(outputFile, "w") as f:
f.write(code)
##### GENERATE OTHER FILES #########################################################################
def generateGDBRegisterPrint(outputFilename):
def replace(name):
return (
name
.replace("handlerBase", "hBas")
.replace("nextHandler", "nxtH")
.replace("scratch", "scrat")
.replace("result", "reslt")
)
items = [(replace(name), reg) for name, reg in registerAliases.items()]
names = ["%s-%s" % (name, reg) for name, reg in items]
lengths = [max(8, len(name)) for name in names]
prints = []
prints.append('printf "' + '|'.join(name.rjust(lengths[i])
for i,name in enumerate(names)) + r'\n"' )
prints.append('printf "' + '|'.join("%08X".rjust(length + 6 - 10)
for length in lengths)
+ r'\n", ' + ', '.join('$' + reg for _, reg in items) + '\n')
for i, (name, reg) in enumerate(items):
length = lengths[i]
if i != 0:
prints.append("echo |")
prints.append("""if ${reg} >= 100000000
printf "{hash}"
else
printf "{space}%8d", ${reg}
end""".format(reg=reg, hash=length*'#', space=' '*(length-8)))
prints.append(r'echo \n%s\n' % ('-'*(sum(lengths) + 1*(len(lengths)-1))))
code = r"""
echo __ define print reg function ____\n
define reg
{printStatements}
refresh
end
define sr
si
reg
end
""".format(printStatements="\n ".join(prints))
#print(code)
print("write", code.count("\n"), "lines to", outputFilename)
with open(outputFilename, "w") as f:
f.write(code)
def generateSharedConstantsHeader(outputFilename):
code = (
"""
#ifndef __shared_constants_h__
#define __shared_constants_h__
""" +
"\n".join(
f"#define {name:20} {value}" for name, value in _shared_constants.items()
)
+ """
#endif /* defined(__shared_constants_h__) */
""")
print("write", code.count("\n"), "lines to", outputFilename)
with open(outputFilename, "w") as f:
f.write(code)
##### MAIN #########################################################################################
if __name__ == "__main__":
generateSource("gen/opcode-handlers.s")
generateGDBRegisterPrint("gen/register-gdb-print")
generateSharedConstantsHeader("gen/shared_constants.h")
| ant6n/femu | source/genemu.py | Python | gpl-2.0 | 20,025 |
"""
This file is part of geneticmonkeys.
Copyright (C) 2016 Yazan Obeidi
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from evolution import meosis, reproduce, mutate
__author__ = 'Yazan Obeidi'
__copyright__ = 'Copyright 2016, Yazan Obeidi'
__license__ = 'GPLv3'
__version__ = '0.0.2'
__maintainer__ = 'Yazan'
__status__ = 'development' | yazanobeidi/geneticmonkeys | geneticmonkeys/evolution/__init__.py | Python | gpl-3.0 | 959 |
def make_header(**attrs):
return """graph G {
forcelabels=false;
node [label="", shape=plaintext];
""" + '\n'.join('%s="%s";' % (k, v) for (k, v) in attrs.items())
footer = "}"
def node_string(node, **kwargs):
if 'fontsize' not in kwargs:
kwargs = kwargs.copy()
kwargs['fontsize'] = 14
params = ", ".join("%s=%s" % (key, value) for key, value in kwargs.items())
return '"%s" [%s]\n' % (node, params)
def pair_string(name1, name2, **kwargs):
params = ", ".join("%s=%s" % (key, value) for key, value in kwargs.items())
return '"%s" -- "%s"[%s]\n' % (name1, name2, params)
def safe_string(tpl):
try:
return ' '.join(tpl).replace('\\', '\\\\').replace('"', '\\"').encode('utf8', 'ignore')
except UnicodeDecodeError:
return 'UNICODE ERROR'
def output_pairs(labels, dist_matrix, dist_filter=lambda x: x != 1):
"""
labels - a hash of indices for array: integer -> string
dist_matrix - a numpy array of distances
dist_filter - a function that will be called on each distance. Distance is
only written if it returns true
"""
graph_rep = make_header()
N = len(labels)
for i in range(N):
for j in range(i+1, N):
if dist_filter(dist_matrix[i][j]):
graph_rep += pair_string(safe_string(labels[i]), safe_string(labels[j]), len=dist_matrix[i][j], weight=1)
graph_rep += footer
return graph_rep
def create_font_size_function(phrase_frequencies, min_size=12, max_size=30):
min_freq = min(phrase_frequencies.values())
max_freq = max(phrase_frequencies.values())
def font_size_from_frequency(freq):
return int((freq - min_freq) / float(max_freq - min_freq) * (max_size - min_size) + min_size)
return font_size_from_frequency
def output_pairs_dict(pair_similarity, enlarge_primary=False, heatmap_vals=None, true_scaling=False, phrase_frequencies=None, similarities=None, phrase_scores=None, n_layers=0, graph_attrs=None):
if not graph_attrs:
graph_attrs = {}
if n_layers:
graph_attrs['layers'] = ':'.join(map(str, range(1, n_layers+1)))
graph_rep = make_header(**graph_attrs)
graph_terms = set()
for term, lst in pair_similarity.items():
graph_terms.add(term)
graph_terms.update(term for term, val in lst)
if true_scaling and phrase_frequencies is not None:
font_size_from_frequency = create_font_size_function(phrase_frequencies)
min_freq, max_freq = min(phrase_frequencies.values()), max(phrase_frequencies.values())
def level_from_freq(freq):
level = (freq - min_freq) * n_layers / (max_freq - min_freq)
return max(min(level, n_layers - 1), 0) + 1
for term in graph_terms:
if true_scaling:
fontsize = font_size_from_frequency(phrase_frequencies[term])
elif enlarge_primary and term in pair_similarity:
fontsize = 18
else:
fontsize = 14
if heatmap_vals and term in heatmap_vals:
level = heatmap_vals[term]
else:
level = 0
kwargs = {'level': level, 'fontsize': fontsize}
if phrase_frequencies:
kwargs['freq'] = phrase_frequencies[term]
if n_layers:
kwargs['layer'] = level_from_freq(phrase_frequencies[term])
if phrase_scores:
kwargs['imp'] = phrase_scores[term]
graph_rep += node_string(safe_string(term), **kwargs)
for phrase1, pairs in pair_similarity.iteritems():
if similarities:
similarity_pairs = dict(similarities[phrase1])
for phrase2, distance in pairs:
kwargs = {'sim': similarity_pairs[phrase2]} if similarities else {}
graph_rep += pair_string(safe_string(phrase1), safe_string(phrase2), len=distance, weight=1, **kwargs)
graph_rep += footer
return graph_rep
| dpfried/mocs | lib/write_dot.py | Python | mit | 3,901 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveNetworkSecurityGroup(Model):
"""Effective network security group.
:param network_security_group: The ID of network security group that is
applied.
:type network_security_group:
~azure.mgmt.network.v2016_12_01.models.SubResource
:param association:
:type association:
~azure.mgmt.network.v2016_12_01.models.EffectiveNetworkSecurityGroupAssociation
:param effective_security_rules: A collection of effective security rules.
:type effective_security_rules:
list[~azure.mgmt.network.v2016_12_01.models.EffectiveNetworkSecurityRule]
"""
_attribute_map = {
'network_security_group': {'key': 'networkSecurityGroup', 'type': 'SubResource'},
'association': {'key': 'association', 'type': 'EffectiveNetworkSecurityGroupAssociation'},
'effective_security_rules': {'key': 'effectiveSecurityRules', 'type': '[EffectiveNetworkSecurityRule]'},
}
def __init__(self, network_security_group=None, association=None, effective_security_rules=None):
super(EffectiveNetworkSecurityGroup, self).__init__()
self.network_security_group = network_security_group
self.association = association
self.effective_security_rules = effective_security_rules
| AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/effective_network_security_group.py | Python | mit | 1,777 |
# -*- coding: utf-8 -*-
#
# Copyright 2015 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create an argparser from a Schema
>>>
"""
import argparse
import logging
from functools import singledispatch
from ..config import Config
from ..schema_section import SchemaSection
from ..times import DateTime, Date, Time, TimeDelta
from ..toolbox.unrepr import unrepr
from ..utils import get_validator_argument
from .. import validator as _validator
from .undefined import UNDEFINED
__author__ = "Simone Campagna"
__copyright__ = 'Copyright (c) 2015 Simone Campagna'
__license__ = 'Apache License Version 2.0'
__all__ = [
'argument_name',
'argument_dest',
'update_argparser',
'create_argparser',
'read_namespace',
]
LOG = logging.getLogger(__name__)
def _make_fqname(fqname):
"""Make an fqname tuple"""
if isinstance(fqname, tuple):
return fqname
elif fqname is None:
return ()
elif isinstance(fqname, str):
return tuple(fqname.split("."))
else:
raise TypeError("invalid fqname {!r}".format(fqname))
def argument_name(fqname):
"""Returns the argument name"""
return "--" + "-".join(_make_fqname(fqname))
def argument_dest(fqname):
"""Returns the argument dest"""
return "_".join(_make_fqname(fqname))
def _argdata(validator, defaults, fqname):
"""Creates standard args, kwargs for the parser.add_argument method"""
fqname = _make_fqname(fqname)
argname = argument_name(fqname)
args = (argname,)
dest = argument_dest(fqname)
has_doc, doc = get_validator_argument(validator, "doc")
if not has_doc:
doc = "set option {!r}".format(dest)
kwargs = {
"dest": dest,
"help": doc,
"metavar": fqname[-1].upper(),
"default": UNDEFINED,
}
if defaults:
has_default, default = get_validator_argument(validator, "default")
if has_default:
kwargs["default"] = default
return args, kwargs
def boolean(value):
"""Converts a string to boolean"""
value = value.lower()
if value in {'on', 'true'}:
return True
elif value in {'off', 'false'}:
return False
else:
try:
return bool(int(value))
except ValueError:
raise ValueError("invalid bool value {!r}".format(value)) from None
class _ArgType(object):
"""Standard argument type based on validator"""
# pylint: disable=too-few-public-methods
def __init__(self, validator, name):
self._validator = validator
self._name = name
self._gdict = {
'DateTime': DateTime,
'Date': Date,
'Time': Time,
'TimeDelta': TimeDelta,
}
def __call__(self, string):
try:
value = unrepr(string, globals_d=self._gdict)
return self._validator.validate(name=self._name, value=value, defined=True)
except:
LOG.exception("argument parsing error:")
raise ValueError(string)
def __repr__(self): # pragma: no cover
return type(self._validator).__name__
@singledispatch
def _argtype(validator, name):
"""Returns the argparse argument type"""
return _ArgType(validator, name)
@_argtype.register(_validator.Str)
@_argtype.register(_validator.StrChoice)
def _(validator, name): # flake8: noqa
# pylint: disable=function-redefined
# pylint: disable=unused-argument
"""Overrides argtype for Str validator"""
return str
@_argtype.register(_validator.DateTimeOption)
def _(validator, name): # flake8: noqa
# pylint: disable=function-redefined
# pylint: disable=unused-argument
"""Overrides argtype for Str validator"""
return DateTime
@_argtype.register(_validator.DateOption)
def _(validator, name): # flake8: noqa
# pylint: disable=function-redefined
# pylint: disable=unused-argument
"""Overrides argtype for Str validator"""
return Date
@_argtype.register(_validator.TimeOption)
def _(validator, name): # flake8: noqa
# pylint: disable=function-redefined
# pylint: disable=unused-argument
"""Overrides argtype for Str validator"""
return Time
@_argtype.register(_validator.TimeDeltaOption)
def _(validator, name): # flake8: noqa
# pylint: disable=function-redefined
# pylint: disable=unused-argument
"""Overrides argtype for Str validator"""
return TimeDelta
@_argtype.register(_validator.Bool)
def _(validator, name): # flake8: noqa
# pylint: disable=function-redefined
# pylint: disable=unused-argument
"""Overrides argtype for Bool validator"""
return boolean
@singledispatch
def _add_argument(validator, argparser, defaults, fqname):
# pylint: disable=unused-argument
"""Adds argument(s) to argparser based on the validator"""
pass # raise TypeError("cannot compile validator {!r}".format(validator))
@_add_argument.register(_validator.Int)
@_add_argument.register(_validator.IntList)
@_add_argument.register(_validator.IntTuple)
@_add_argument.register(_validator.Float)
@_add_argument.register(_validator.FloatList)
@_add_argument.register(_validator.FloatTuple)
@_add_argument.register(_validator.Str)
@_add_argument.register(_validator.StrList)
@_add_argument.register(_validator.StrTuple)
@_add_argument.register(_validator.Bool)
@_add_argument.register(_validator.BoolList)
@_add_argument.register(_validator.BoolTuple)
@_add_argument.register(_validator.DateTimeOption)
@_add_argument.register(_validator.DateOption)
@_add_argument.register(_validator.TimeOption)
@_add_argument.register(_validator.TimeDeltaOption)
def _(validator, argparser, defaults, fqname): # flake8: noqa
# pylint: disable=function-redefined
"""Standard _add_argument function"""
args, kwargs = _argdata(validator, defaults, fqname)
kwargs["type"] = _argtype(validator, name=kwargs["dest"])
argparser.add_argument(
*args,
**kwargs)
@_add_argument.register(_validator.IntChoice)
@_add_argument.register(_validator.FloatChoice)
@_add_argument.register(_validator.StrChoice)
@_add_argument.register(_validator.BoolChoice)
def _(validator, argparser, defaults, fqname): # flake8: noqa
# pylint: disable=function-redefined
"""Overridden _add_argument function for *Choice"""
args, kwargs = _argdata(validator, defaults, fqname)
kwargs["choices"] = get_validator_argument(validator, "choices")[1]
kwargs["type"] = _argtype(validator, name=kwargs["dest"])
argparser.add_argument(
*args,
**kwargs)
def update_argparser(schema, argparser, *, defaults=True, prefix=None, group_depth=1):
"""Update an argparse parser based on a schema
Parameters
----------
schema: :class:`SchemaSection`
the schema instance
argparser: :class:`argparse.ArgumentParser`
the argument parser
defaults: bool, optional
if True, set the validator default (defaults to True)
prefix: str, optional
a prefix for options (defaults to None)
group_depth: int, optional
maximum depth for argument grouping (defaults to 1)
"""
if not isinstance(schema, SchemaSection):
raise TypeError("{!r}: not a schema".format(schema))
fqname = _make_fqname(prefix)
return _impl_update_argparser(
schema=schema,
argparser=argparser,
defaults=defaults,
fqname=fqname,
group_depth=group_depth)
def _impl_update_argparser(schema, argparser, fqname, *, defaults=True, group_depth=1, group=None):
"""Update an argparse parser based on a schema (implementation)
Parameters
----------
schema: :class:`SchemaSection`
the schema instance
argparser: :class:`argparse.ArgumentParser`
the argument parser
fqname: tuple
fully qualified name
defaults: bool, optional
if True, set the validator default (defaults to True)
group_depth: int, optional
maximum depth for argument grouping (defaults to 1)
group: object, optional
argument group to be used
"""
subgroup_depth = group_depth
if group_depth == 0 and group is None: # pragma: no cover
group = argparser
if group_depth > 0:
subgroup_depth -= 1
for key, value in schema.items():
if key in {SchemaSection.__default_option__, SchemaSection.__default_section__}:
continue
elif isinstance(value, SchemaSection):
if group is None or group_depth != 0:
group_name = ".".join(fqname + (key,))
subgroup = argparser.add_argument_group("configuration options for package {!r}".format(group_name))
else:
subgroup = group
_impl_update_argparser(
value,
argparser,
defaults=defaults,
fqname=fqname + (key,),
group_depth=subgroup_depth,
group=subgroup)
else:
if group is None:
argument_group = argparser
else:
argument_group = group
_add_argument(value, argparser=argument_group, defaults=defaults, fqname=fqname + (key,))
def create_argparser(schema, *, defaults=True, prefix=None, group_depth=1, **kwargs):
r"""Creates an argparse parser based on a schema
Parameters
----------
schema: :class:`Schema`
the schema instance or source
argparser: :class:`argparse.ArgumentParser`
the argument parser
defaults: bool, optional
if True, set the validator default (defaults to True)
group_depth: int, optional
maximum depth for argument grouping (defaults to 1)
prefix: str
a prefix for options
\*\*kwargs: dict, optional
passed to the :class:`argparse.ArgumentParser` constructor
Returns
-------
:class:`argparse.ArgumentParser`
the argument parser
"""
argparser = argparse.ArgumentParser(**kwargs)
update_argparser(schema=schema, argparser=argparser, defaults=defaults, prefix=prefix, group_depth=group_depth)
return argparser
def read_namespace(schema, namespace, *, config=None, prefix=None, validate=True):
"""Read config data from a namespace
Parameters
----------
schema: :class:`Schema`
the schema instance or source
argparser: :class:`argparse.ArgumentParser`
the argument parser
config: :class:`Config`, optional
the config object to be modified and returned (defaults to None, which means
that a new empty Config is created)
prefix: str, optional
a prefix for options (defaults to None)
validate: bool, optional
if True performs validation
Returns
-------
tuple
a 2-tuple (config, validation_result); validation_result is None if validate == False
"""
if not isinstance(schema, SchemaSection):
raise TypeError("{!r}: not a Schema".format(schema))
if prefix:
if isinstance(prefix, str):
fqname = (prefix,)
else:
fqname = tuple(prefix)
else:
fqname = ()
if config is None:
config = Config()
_read_namespace_impl(schema=schema, namespace=namespace, config=config, fqname=fqname)
if validate:
validation_result = schema.validate(config)
else:
validation_result = None
return config, validation_result
def _read_namespace_impl(schema, namespace, *, config, fqname):
"""read_namespace implementation"""
for key, value in schema.items():
if key in {SchemaSection.__default_option__, SchemaSection.__default_section__}:
continue
elif isinstance(value, SchemaSection):
subconfig = config.setdefault(key, {})
_read_namespace_impl(schema=value, namespace=namespace, config=subconfig, fqname=fqname + (key,))
else:
dest = argument_dest(fqname + (key,))
val = getattr(namespace, dest, None)
if val is not UNDEFINED:
config[key] = val
| simone-campagna/daikon | zirkon/toolbox/argparser.py | Python | apache-2.0 | 12,587 |
import os
import json
import subprocess
from abc import ABCMeta, abstractmethod
class Task:
__metaclass__ = ABCMeta
def __init__(self, structure, task_params=None, workdir='.', executable=None):
self.structure = structure
self.workdir = workdir
if not os.path.isdir(self.workdir):
os.mkdir(self.workdir)
if task_params is not None:
self.task_params = task_params
else:
self.task_params = {}
self.executable = executable
self.finished = False
self.success = False
self.started = False
self.output = {}
self.report_dir = self.workdir + os.sep + 'REPORT'
@abstractmethod
def run(self):
pass
@abstractmethod
def plot(self):
pass
@abstractmethod
def report(self):
pass
@abstractmethod
def load(self):
pass
def save(self, filename=None):
if filename is None:
filename = self.workdir + os.sep + 'task.json'
wf = open(filename, 'w')
ret = {'task_params': self.task_params, 'output': self.output}
json.dump(ret, wf, sort_keys=True, indent=4, separators=(',', ': '))
wf.close()
def status(self):
if self.finished:
print('Task finished')
if self.started:
print('Task started')
if self.success:
print('Task completed successfully')
def report_end(self, html, file_format):
from lxml import etree
doctype = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">'
result = etree.tostring(html,
xml_declaration=True,
doctype=doctype,
encoding='utf-8',
standalone=False,
with_tail=False,
method='xml',
pretty_print=True)
wf = open(self.report_dir + os.sep + 'index.html', 'w')
wf.write(result)
if file_format != 'html':
cwd = os.getcwd()
os.chdir(self.report_dir)
stderr = open('pandoc_out.log', 'w')
stdout = open('pandoc_err.log', 'w')
sp = subprocess.Popen(['pandoc', 'index.html', '-o', 'visual.' + file_format], stderr=stderr, stdout=stdout)
os.chdir(cwd)
stderr.close()
stdout.close()
return sp
| MaterialsDiscovery/PyChemia | pychemia/code/tasks.py | Python | mit | 2,543 |
#!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import os
import numpy as np
import matplotlib.pyplot as plt
from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile
fileName = 'fluid/forces/0/forces.dat'
liftList = []
dragList = []
controlDict = ParsedParameterFile( 'fluid/system/controlDict' )
deltaT = controlDict['deltaT']
index = 0
with open ( fileName ) as FILE:
for line in FILE:
if index > 0:
subString = line[ line.find( '(((' ) + 3 :-1 ]
pressureForce = float( subString[ : subString.find( ' ' ) ] )
pressureForce = np.array( subString[ : subString.find( ')' ) ].split(' '), dtype = np.float64 )
subString = subString[ subString.find( '(' ) + 1 : ]
viscousForce = np.array( subString[ : subString.find( ')' ) ].split(' '), dtype = np.float64 )
lift = pressureForce[1] + viscousForce[1]
drag = pressureForce[0] + viscousForce[0]
liftList.append( lift )
dragList.append( drag )
index += 1
fileName = "fluid/probesSolid/solid/0/U"
U = open( fileName, 'r' ).readlines()
Ux = []
Uy = []
for line in U:
if "#" in line:
continue
line = line.strip()
time = float( line[ : line.find(" ") ] )
disp = line[ line.find(" ") : ].strip(" ()").split( " " )
index = 0
for u in disp:
disp[index] = float( u )
index += 1
Ux.append( disp[0] )
Uy.append( disp[1] )
timeList = np.arange( 1, len(liftList)+1 ) * deltaT
plt.figure( figsize=(20, 10) )
plt.subplot(221)
plt.grid()
plt.plot( timeList, liftList )
plt.xlabel( "Time [s]" )
plt.ylabel( "Lift [N]" )
(x1, x2, y1, y2) = plt.axis()
plt.axis( (timeList[0], x2, y1, y2) )
plt.subplot(222)
plt.grid()
plt.plot( timeList, dragList )
plt.xlabel( "Time [s]" )
plt.ylabel( "Drag [N]" )
(x1, x2, y1, y2) = plt.axis()
plt.axis( (timeList[0], x2, y1, y2) )
plt.subplot(223)
plt.plot( timeList, Ux )
plt.grid()
plt.xlabel( "Time [s]" )
plt.ylabel( "Displacement x [m]" )
(x1, x2, y1, y2) = plt.axis()
plt.axis( (timeList[0], x2, y1, y2) )
plt.subplot(224)
plt.plot( timeList, Uy )
plt.grid()
plt.xlabel( "Time [s]" )
plt.ylabel( "Displacement y [m]" )
(x1, x2, y1, y2) = plt.axis()
plt.axis( (timeList[0], x2, y1, y2) )
plt.savefig( "forces-displacements.pdf", bbox_inches = 'tight' )
plt.close()
| eterhofstede/FOAM-FSI | tutorials/fsi/cylinderFlap_FSI3/postprocess.py | Python | gpl-2.0 | 2,375 |
# -*- Mode: Python; py-indent-offset: 4 -*-
# this file contains code for loading up an override file. The override file
# provides implementations of functions where the code generator could not
# do its job correctly.
import fnmatch
import os
import re
import string
import sys
def class2cname(klass, method):
c_name = ''
for c in klass:
if c.isupper():
c_name += '_' + c.lower()
else:
c_name += c
return c_name[1:] + '_' + method
import_pat = re.compile(r'\s*import\s+(\S+)\.([^\s.]+)\s+as\s+(\S+)')
class Overrides:
def __init__(self, filename=None, path=[]):
self.modulename = None
self.ignores = {}
self.glob_ignores = []
self.type_ignores = {}
self.overrides = {}
self.overridden = {}
self.kwargs = {}
self.noargs = {}
self.onearg = {}
self.staticmethod = {}
self.classmethod = {}
self.startlines = {}
self.override_attrs = {}
self.override_slots = {}
self.headers = ''
self.body = ''
self.init = ''
self.imports = []
self.defines = {}
self.functions = {}
self.newstyle_constructors = {}
self.path = [os.path.abspath(x) for x in path]
if filename:
self.handle_file(filename)
def handle_file(self, filename):
oldpath = os.getcwd()
fp = None
for path in self.path:
os.chdir(oldpath)
os.chdir(path)
try:
fp = open(filename, 'r')
break
except:
os.chdir(oldpath)
if not fp:
raise Exception, "Couldn't find file %s" % filename
dirname = path
if dirname != oldpath:
os.chdir(dirname)
# read all the components of the file ...
bufs = []
startline = 1
lines = []
line = fp.readline()
linenum = 1
while line:
if line == '%%\n' or line == '%%':
if lines:
bufs.append((string.join(lines, ''), startline))
startline = linenum + 1
lines = []
else:
lines.append(line)
line = fp.readline()
linenum = linenum + 1
if lines:
bufs.append((string.join(lines, ''), startline))
if not bufs: return
for buf, startline in bufs:
self.__parse_override(buf, startline, filename)
os.chdir(oldpath)
def __parse_override(self, buffer, startline, filename):
pos = string.find(buffer, '\n')
if pos >= 0:
line = buffer[:pos]
rest = buffer[pos+1:]
else:
line = buffer ; rest = ''
words = string.split(line)
command = words[0]
if (command == 'ignore' or
command == 'ignore-' + sys.platform):
"ignore/ignore-platform [functions..]"
for func in words[1:]:
self.ignores[func] = 1
for func in string.split(rest):
self.ignores[func] = 1
elif (command == 'ignore-glob' or
command == 'ignore-glob-' + sys.platform):
"ignore-glob/ignore-glob-platform [globs..]"
for func in words[1:]:
self.glob_ignores.append(func)
for func in string.split(rest):
self.glob_ignores.append(func)
elif (command == 'ignore-type' or
command == 'ignore-type-' + sys.platform):
"ignore-type/ignore-type-platform [typenames..]"
for typename in words[1:]:
self.type_ignores[typename] = 1
for typename in string.split(rest):
self.type_ignores[typename] = 1
elif command == 'override':
"override function/method [kwargs|noargs|onearg] [staticmethod|classmethod]"
func = words[1]
if 'kwargs' in words[1:]:
self.kwargs[func] = 1
elif 'noargs' in words[1:]:
self.noargs[func] = 1
elif 'onearg' in words[1:]:
self.onearg[func] = True
if 'staticmethod' in words[1:]:
self.staticmethod[func] = True
elif 'classmethod' in words[1:]:
self.classmethod[func] = True
if func in self.overrides:
raise RuntimeError("Function %s is being overridden more than once" % (func,))
self.overrides[func] = rest
self.startlines[func] = (startline + 1, filename)
elif command == 'override-attr':
"override-slot Class.attr"
attr = words[1]
self.override_attrs[attr] = rest
self.startlines[attr] = (startline + 1, filename)
elif command == 'override-slot':
"override-slot Class.slot"
slot = words[1]
self.override_slots[slot] = rest
self.startlines[slot] = (startline + 1, filename)
elif command == 'headers':
"headers"
self.headers = '%s\n#line %d "%s"\n%s' % \
(self.headers, startline + 1, filename, rest)
elif command == 'body':
"body"
self.body = '%s\n#line %d "%s"\n%s' % \
(self.body, startline + 1, filename, rest)
elif command == 'init':
"init"
self.init = '%s\n#line %d "%s"\n%s' % \
(self.init, startline + 1, filename, rest)
elif command == 'modulename':
"modulename name"
self.modulename = words[1]
elif command == 'include':
"include filename"
for filename in words[1:]:
self.handle_file(filename)
for filename in string.split(rest):
self.handle_file(filename)
elif command == 'import':
"import module1 [\n module2, \n module3 ...]"
for line in string.split(buffer, '\n'):
match = import_pat.match(line)
if match:
self.imports.append(match.groups())
elif command == 'define':
"define funcname [kwargs|noargs|onearg] [classmethod|staticmethod]"
"define Class.method [kwargs|noargs|onearg] [classmethod|staticmethod]"
func = words[1]
klass = None
if func.find('.') != -1:
klass, func = func.split('.', 1)
if not self.defines.has_key(klass):
self.defines[klass] = {}
self.defines[klass][func] = rest
else:
self.functions[func] = rest
if 'kwargs' in words[1:]:
self.kwargs[func] = 1
elif 'noargs' in words[1:]:
self.noargs[func] = 1
elif 'onearg' in words[1:]:
self.onearg[func] = 1
if 'staticmethod' in words[1:]:
self.staticmethod[func] = True
elif 'classmethod' in words[1:]:
self.classmethod[func] = True
self.startlines[func] = (startline + 1, filename)
elif command == 'new-constructor':
"new-constructor GType"
gtype, = words[1:]
self.newstyle_constructors[gtype] = True
def is_ignored(self, name):
if self.ignores.has_key(name):
return 1
for glob in self.glob_ignores:
if fnmatch.fnmatchcase(name, glob):
return 1
return 0
def is_type_ignored(self, name):
return name in self.type_ignores
def is_overriden(self, name):
return self.overrides.has_key(name)
def is_already_included(self, name):
return self.overridden.has_key(name)
def override(self, name):
self.overridden[name] = 1
return self.overrides[name]
def define(self, klass, name):
self.overridden[class2cname(klass, name)] = 1
return self.defines[klass][name]
def function(self, name):
return self.functions[name]
def getstartline(self, name):
return self.startlines[name]
def wants_kwargs(self, name):
return self.kwargs.has_key(name)
def wants_noargs(self, name):
return self.noargs.has_key(name)
def wants_onearg(self, name):
return self.onearg.has_key(name)
def is_staticmethod(self, name):
return self.staticmethod.has_key(name)
def is_classmethod(self, name):
return self.classmethod.has_key(name)
def attr_is_overriden(self, attr):
return self.override_attrs.has_key(attr)
def attr_override(self, attr):
return self.override_attrs[attr]
def slot_is_overriden(self, slot):
return self.override_slots.has_key(slot)
def slot_override(self, slot):
return self.override_slots[slot]
def get_headers(self):
return self.headers
def get_body(self):
return self.body
def get_init(self):
return self.init
def get_imports(self):
return self.imports
def get_defines_for(self, klass):
return self.defines.get(klass, {})
def get_functions(self):
return self.functions
| alessandrod/gst-rtsputils-python | rtsputils/codegen/override.py | Python | gpl-3.0 | 9,313 |
"""
KeepNote
Listener (Observer) pattern
"""
#
# KeepNote
# Copyright (c) 2008-2009 Matt Rasmussen
# Author: Matt Rasmussen <rasmus@alum.mit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
class Listeners (object):
"""Maintains a list of listeners (functions) that are called when the
notify function is called.
"""
def __init__(self):
self._listeners = []
self._suppress = {}
def add(self, listener):
"""Add a listener function to the list"""
self._listeners.append(listener)
self._suppress[listener] = 0
def remove(self, listener):
"""Remove a listener function from list"""
self._listeners.remove(listener)
del self._suppress[listener]
def clear(self):
"""Clear listener list"""
self._listeners = []
self._suppress = {}
def notify(self, *args, **kargs):
"""Notify listeners"""
for listener in self._listeners:
if self._suppress[listener] == 0:
listener(*args, **kargs)
def suppress(self, listener=None):
"""Suppress notification"""
if listener is not None:
self._suppress[listener] += 1
else:
for l in self._suppress:
self._suppress[l] += 1
def resume(self, listener=None):
"""Resume notification"""
if listener is not None:
self._suppress[listener] -= 1
else:
for l in self._suppress:
self._suppress[l] -= 1
| brotchie/keepnote | keepnote/listening.py | Python | gpl-2.0 | 2,204 |
def format_date(dt):
return dt.strftime('%Y-%m-%d %H:%M:%S')
| aabed/mhn | server/mhn/common/templatetags.py | Python | lgpl-2.1 | 65 |
#!/usr/bin/env python
import sys
import os
import subprocess
message = '!log {user} {sudo_user}: Deployed {rev} {msg}'.format(
user=os.environ['USER'],
sudo_user=os.environ['SUDO_USER'],
rev=subprocess.check_output(["git", "rev-list", "HEAD",
"--max-count=1", "--format=oneline"]
).decode('utf-8').strip(),
msg=' '.join(sys.argv[1:])
)
print(message)
| wikimedia/labs-tools-wikibugs2 | log_to_irc.py | Python | mit | 434 |
# -*- coding:utf-8 -*-
from nsnqtlib.strategies.strategy import basestrategy
import pandas as pd
import tushare as ts
class privatestrategy(basestrategy):
'''
重写买入条件和卖出条件,
'''
def __init__(self,startdate=(2011, 1, 1),enddate=[],emafast=12,emaslow=26,demday=9):
self.pre_MA = False
self.curr_MA = False
super(privatestrategy, self).__init__(startdate, enddate)
# 获取需要交易股票列表
def import_stocklist(self, stocklistname):
df = pd.read_csv(str(stocklistname) + '.csv')
#df = pd.read_csv(str(stocklistname) + '.csv', parse_dates=['startdate'])
df['code'] = df['code'].astype('str')
count = 0
df_len = len(df.index)
while (count < df_len):
stock_name = str(df.iat[count, 0])
if len(stock_name) == 1:
stock_name = '00000' + stock_name
df.iat[count, 0] = stock_name
elif len(stock_name) == 2:
stock_name = '0000' + stock_name
df.iat[count, 0] = stock_name
elif len(stock_name) == 3:
stock_name = '000' + stock_name
df.iat[count, 0] = stock_name
elif len(stock_name) == 4:
stock_name = '00' + stock_name
df.iat[count, 0] = stock_name
elif len(stock_name) == 5:
stock_name = '0' + stock_name
df.iat[count, 0] = stock_name
count = count + 1
return df
'''
def _getdata(self,collection="600455.SH",db="ml_security_table"):
if db == "ml_security_table":
query = self.m.read_data(db,collection,filt={"date":{"$gt": self.startdate}})
out = self.formatlist
elif db == "tushare":
query = ts.get_hist_data(collection, start='2005-01-01', end='2016-11-18', )
out = self.formatlist
return self.formatquery(query,out)
'''
'''
def _getdata(self,collection="600455.SH",db="ml_security_table"):
#query = pd.read_csv(str(collection) + '.csv', parse_dates=['date'])
#print(query)
query = self.m.read_data(db,collection,filt={"date":{"$gt": self.startdate}})
out = self.formatlist
return self.formatquery(query,out)
'''
def historyreturn(self, collection, par):
trading_record = []
holding_record = []
#print(collection)
data = self._getdata(collection)
df = pd.DataFrame(data)
df.to_csv(collection+'.csv')
#print(data)
lst = [l for l in data[self.formatlist].fillna(0).values if l[1] != 0]
count = 0
for line in lst[:]:
isbuy = self.buy(lst, count, par)
for b in holding_record[:]:
issell, traderecord = self.sell(lst, count, b)
if issell:
holding_record.remove(b)
trading_record.append(traderecord)
print (traderecord)
if isbuy:
#holding_record.append((line, count, collection))
holding_record.append(([i for i in line], count, collection))
count += 1
return trading_record, holding_record
def looplist_historyreturn(self, df):
error_list = []
count = 0
df_len = len(df.index)
column_num = len(df.count())
while (count < df_len):
columncount = 1
par = []
while (columncount < column_num):
par.append(df.iat[count, columncount])
columncount = columncount + 1
print(par)
stock_name = str(df.iat[count, 0])
try:
tr,hr = self.historyreturn(stock_name, par)
self.trading_records.extend(tr)
self.holding_records.extend(hr)
except:
error_list.append(stock_name)
count = count + 1
print(error_list)
return self.trading_records,self.holding_records
def buy(self, lst, count, par):
''' input:
line: [] ,row data in every stock data,default is in self.formatlist = ["date","volume","close","high","low","open","pre_close"]
count: float, the number to the row since first row
ouput:
bool, can buy or not buy
[], buy record,if can't buy,is empty list
'''
vol_day = 10
price_day = 60
vol_weight = 1.2
dat = lst[count][0]
vol = lst[count][1]
close = lst[count][2]
high = lst[count][3]
low = lst[count][4]
open = lst[count][5]
pre_close = lst[count][6]
if count <= 60: return False
vol_data = [i[1] for i in lst[count - vol_day:count]]
maxprice = max([i[3]] for i in lst[count - price_day:count])[0]
minprice = min([i[4]] for i in lst[count - price_day:count])[0]
maxindex = [i for i in range(count - price_day, count) if lst[i][3] == maxprice][0]
'''
if self.buy_condition1(vol, vol_data, vol_weight) and \
self.buy_condition2(close, lst[count - 1][3]) and \
self.buy_condition3(close, maxprice) and \
self.buy_condition4(close, minprice) and \
self.buy_condition5(count, maxindex):
return True
'''
#and self.condition7(close, par[0])
if self.condition6(dat, par[1]) and self.MA_judge_result(lst, count) and self.condition9(close, pre_close):
#print(dat)
return True
return False
#print(self.waitbuy)
#and self.condition9(close, pre_close)
'''
if self.waitbuy == True: # and self.bought == False:
if self.condition8(close,low, pre_close):
self.bought = True
self.waitbuy = False
return True
return False
'''
def waitforbuy(self, dat, close, par):
if self.condition6(dat, par[1]) and \
self.condition7(close, par[0]):
return True
return False
def sell(self, lst, count, buyrecord):
currentday_high = lst[count][3]
gain_grads = 0.2
loss_grads = -0.05
dayout = 30
currentday_low = lst[count][4]
sell_date = lst[count][0]
close = lst[count][2]
high = lst[count][3]
low = lst[count][4]
buy_price = buyrecord[0][2]
hold_days = count - buyrecord[1]
buy_date = buyrecord[0][0]
collection = buyrecord[2]
if self.stopgain_condition(buy_price, currentday_high, gain_grads):
self.bought = False
gain_grads = (currentday_high - buy_price) / buy_price
sell_date = sell_date.strftime('%Y-%m-%d')
buy_date = buy_date.strftime('%Y-%m-%d')
#sell_date = changedateformat(sell_date)
return True, [collection, buy_date, sell_date, hold_days, gain_grads, '']
elif self.stoploss_condition(buy_price, currentday_low, loss_grads):
sell_date = sell_date.strftime('%Y-%m-%d')
buy_date = buy_date.strftime('%Y-%m-%d')
return True, [collection, buy_date, sell_date, hold_days, (close - buy_price) / buy_price, '']
elif self.holdingtime_condition(hold_days, dayout):
sell_date = sell_date.strftime('%Y-%m-%d')
buy_date = buy_date.strftime('%Y-%m-%d')
return True, [collection, buy_date, sell_date, hold_days, (close - buy_price) / buy_price, '']
elif self.Sellcondition1(lst,high, count, 30) and self.Sellcondition2(lst,high, low, close):
sell_date = sell_date.strftime('%Y-%m-%d')
buy_date = buy_date.strftime('%Y-%m-%d')
return True, [collection, buy_date, sell_date, hold_days, (close - buy_price) / buy_price, '']
return False, None
'''
elif self.holdingtime_condition(hold_days, dayout):
sell_date = sell_date.strftime('%Y-%m-%d')
buy_date = buy_date.strftime('%Y-%m-%d')
return True, [collection, buy_date, sell_date, hold_days, (close - buy_price) / buy_price, '']
'''
def stopgain_condition(self, buy_price, current_price, grads=0.1):
if (current_price - buy_price) / buy_price >= grads:
return True
return False
def stoploss_condition(self, buy_price, current_price, grads=-0.05):
if (current_price - buy_price) / buy_price <= grads:
return True
return False
def holdingtime_condition(self, hold_days, dayout=10):
if hold_days >= dayout:
return True
return False
def mean_volume(self, data):
m_vol = sum(data) / len(data)
return m_vol
def buy_condition1(self, vol, vol_data, vol_weight=1.2):
if vol >= vol_weight * self.mean_volume(vol_data):
return True
return False
def buy_condition2(self, close, last_high):
if close >= last_high:
return True
return False
def buy_condition3(self, close, high, grads=0.2):
if (high - close) / high >= grads:
return True
return False
def buy_condition4(self, close, low, grads=0.05):
if (close - low) / low <= grads:
return True
return False
def buy_condition5(self, currentday, highday, grads=60):
if currentday - highday >= grads:
return True
return False
def condition6(self, dat, startdate):
newdat = pd.to_datetime(dat)
newdat = newdat.strftime('%Y-%m-%d')
newstartdate = pd.to_datetime(startdate)
newstartdate = newstartdate.strftime('%Y-%m-%d')
# print(newdat)
# print(newstartdate)
if newdat > newstartdate:
#print(newdat)
return True
return False
def condition7(self, close, cashprice):
if close < cashprice:
return True
return False
def condition8(self, close, low, pre_close):
if low > pre_close:
return True
return False
def condition9(self, close, pre_close):
if (close - pre_close) / pre_close < 0.099:
return True
return False
def MA_judge_result(self, lst, count):
self.curr_MA = self.MA_condition(lst,count)
if self.pre_MA == False and self.curr_MA == True:
self.pre_MA = self.curr_MA
return True
self.pre_MA = self.curr_MA
return False
def MA_condition(self,lst,count):
if self.MA_result(lst,count,5) > self.MA_result(lst,count, 10) and \
self.MA_result(lst, count, 10) > self.MA_result(lst, count, 20) and \
self.MA_result(lst, count, 20) > self.MA_result(lst, count, 30):
#print(count)
return True
return False
def MA_result(self, lst,count, meanday):
meanlist = [i[2] for i in lst[count - meanday + 1:count + 1]]
return sum(meanlist) / meanday
def Sellcondition1(self, lst, high, count, maxday):
meanlist = [i[2] for i in lst[count - maxday + 1:count + 1]]
if high > max(meanlist):
return True
return False
def Sellcondition2(self, lst, high, low, close):
if high - low > 0:
if (close-low)/(high-close) < 0.2:
return True
return False
if __name__ == '__main__':
s = privatestrategy()
df_stocklist = s.import_stocklist("privatestock")
print(df_stocklist)
#s.setlooplist()
s.looplist_historyreturn(df_stocklist)
s.savetrading2csv()
s.saveholding2csv()
# report = reportforms(df)
# report.cumulative_graph()
# report.positiongain(100)
| harry0519/nsnqt | nsnqtlib/strategies/privatestrategy.py | Python | bsd-2-clause | 11,881 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.contrib.hooks.aws_hook import AwsHook
class RedshiftHook(AwsHook):
"""
Interact with AWS Redshift, using the boto3 library
"""
def get_conn(self):
return self.get_client_type('redshift')
# TODO: Wrap create_cluster_snapshot
def cluster_status(self, cluster_identifier):
"""
Return status of a cluster
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
"""
conn = self.get_conn()
try:
response = conn.describe_clusters(
ClusterIdentifier=cluster_identifier)['Clusters']
return response[0]['ClusterStatus'] if response else None
except conn.exceptions.ClusterNotFoundFault:
return 'cluster_not_found'
def delete_cluster(
self,
cluster_identifier,
skip_final_cluster_snapshot=True,
final_cluster_snapshot_identifier=''):
"""
Delete a cluster and optionally create a snapshot
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
:param skip_final_cluster_snapshot: determines cluster snapshot creation
:type skip_final_cluster_snapshot: bool
:param final_cluster_snapshot_identifier: name of final cluster snapshot
:type final_cluster_snapshot_identifier: str
"""
response = self.get_conn().delete_cluster(
ClusterIdentifier=cluster_identifier,
SkipFinalClusterSnapshot=skip_final_cluster_snapshot,
FinalClusterSnapshotIdentifier=final_cluster_snapshot_identifier
)
return response['Cluster'] if response['Cluster'] else None
def describe_cluster_snapshots(self, cluster_identifier):
"""
Gets a list of snapshots for a cluster
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
"""
response = self.get_conn().describe_cluster_snapshots(
ClusterIdentifier=cluster_identifier
)
if 'Snapshots' not in response:
return None
snapshots = response['Snapshots']
snapshots = filter(lambda x: x['Status'], snapshots)
snapshots.sort(key=lambda x: x['SnapshotCreateTime'], reverse=True)
return snapshots
def restore_from_cluster_snapshot(self, cluster_identifier, snapshot_identifier):
"""
Restores a cluster from its snapshot
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
:param snapshot_identifier: unique identifier for a snapshot of a cluster
:type snapshot_identifier: str
"""
response = self.get_conn().restore_from_cluster_snapshot(
ClusterIdentifier=cluster_identifier,
SnapshotIdentifier=snapshot_identifier
)
return response['Cluster'] if response['Cluster'] else None
def create_cluster_snapshot(self, snapshot_identifier, cluster_identifier):
"""
Creates a snapshot of a cluster
:param snapshot_identifier: unique identifier for a snapshot of a cluster
:type snapshot_identifier: str
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
"""
response = self.get_conn().create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier,
ClusterIdentifier=cluster_identifier,
)
return response['Snapshot'] if response['Snapshot'] else None
| zack3241/incubator-airflow | airflow/contrib/hooks/redshift_hook.py | Python | apache-2.0 | 4,181 |
# Resolver Library of Universal Mathematics System
# Copyright (C) 2016 Zhang Chang-kai #
# Contact via: phy.zhangck@gmail.com #
# General Public License version 3.0 #
'''Resolver Library of UnivMaths System'''
class Formulation(object):
'''Formula Information for Resolver'''
__slots__ = ('Formula', 'Truth', 'Tendency')
instances = list()
def __new__(cls, Formula, Truth, Tendency):
for instncs in Formulation.instances:
if instncs.Formula == Formula:
return instncs
return object.__new__(cls)
def __init__(self, Formula, Truth, Tendency):
self.Formula = Formula
self.Truth = Truth
self.Tendency = Tendency
if not self in Formulation.instances:
Formulation.instances.append(self)
def __repr__(self):
Tendency = lambda: "Positive" \
if self.Tendency is True else "Negative"
return "Formula: " + self.Formula + \
" Truth: " + str(self.Truth) + \
" Tendency: " + Tendency()
def __str__(self):
return self.__repr__()
@classmethod
def Reset(cls):
cls.instances = list()
BelongTo_True = r'(' + r'?P<TRUEFORMULA>' + \
r'[a-zA-Z\_][0-9a-zA-Z\_]*\s*' + r'\\in\s+' + \
r'[a-zA-Z\_][0-9a-zA-Z\_]*' + r')'
BelongTo_False = r'(' + r'?P<FALSEFORMULA>' + \
r'[a-zA-Z\_][0-9a-zA-Z\_]*\s*' + r'\\notin\s+' + \
r'[a-zA-Z\_][0-9a-zA-Z\_]*' + r')'
BelongTo_Op = r'([a-zA-Z\_][0-9a-zA-Z\_]*\s*)' + \
r'(?:\\in|\\notin)\s*' + \
r'([a-zA-Z\_][0-9a-zA-Z\_]*)'
BasicFormulation = \
[
BelongTo_True, BelongTo_False
]
BasicOperation = \
[
BelongTo_Op
]
# End of Resolver Library of UnivMathSys
| Phy-David-Zhang/UnivMathSys | Interpreter/rslvlib.py | Python | gpl-3.0 | 1,725 |
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
Read metadata from RAR archives
'''
import os
from calibre.ptempfile import PersistentTemporaryFile, TemporaryDirectory
from calibre.libunrar import extract_member, names
from calibre import CurrentDir
def get_metadata(stream):
from calibre.ebooks.metadata.archive import is_comic
from calibre.ebooks.metadata.meta import get_metadata
path = getattr(stream, 'name', False)
if not path:
pt = PersistentTemporaryFile('_rar-meta.rar')
pt.write(stream.read())
pt.close()
path = pt.name
path = os.path.abspath(path)
file_names = list(names(path))
if is_comic(file_names):
return get_metadata(stream, 'cbr')
for f in file_names:
stream_type = os.path.splitext(f)[1].lower()
if stream_type:
stream_type = stream_type[1:]
if stream_type in ('lit', 'opf', 'prc', 'mobi', 'fb2', 'epub',
'rb', 'imp', 'pdf', 'lrf', 'azw', 'azw1', 'azw3'):
with TemporaryDirectory() as tdir:
with CurrentDir(tdir):
stream = extract_member(path, match=None, name=f,
as_file=True)[1]
return get_metadata(stream, stream_type)
raise ValueError('No ebook found in RAR archive')
| yeyanchao/calibre | src/calibre/ebooks/metadata/rar.py | Python | gpl-3.0 | 1,451 |
# Copyright 2013 Sascha Peilicke
# Copyright 2016 Thomas Bechtold
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from distutils.core import Command
class DocCommand(Command):
description = "Generate manpage, HTML and PDF documentation"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
subprocess.call(["xsltproc", "--output", "doc/py2pack.html", "/usr/share/xml/docbook/stylesheet/nwalsh/current/html/docbook.xsl", "doc/src/py2pack.xml.in"])
subprocess.call(["xsltproc", "--output", "doc/py2pack.1", "/usr/share/xml/docbook/stylesheet/nwalsh/current/manpages/docbook.xsl", "doc/src/py2pack.xml.in"])
except:
pass
class SPDXUpdateCommand(Command):
description = "Update SDPX license map"
user_options = []
LICENSE_FILE = 'py2pack/spdx_license_map.p'
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# Not part of any requirements, could happen through setup(setup_requires=...)
import pickle
import lxml.html
import requests
response = requests.get('https://docs.google.com/spreadsheet/pub?key=0AqPp4y2wyQsbdGQ1V3pRRDg5NEpGVWpubzdRZ0tjUWc')
html = lxml.html.fromstring(response.text)
licenses = {}
for i, tr in enumerate(html.cssselect('table.waffle > tbody > tr')):
_, td_new, td_old = tr.getchildren()
licenses[td_old.text] = td_new.text
# also add the spdx license as key (i.e. key/value "Apache-2.0"->"Apache-2.0")
# Otherwise licenses for packages which already have a SPDX compatible license
# are not correctly recognized
licenses[td_new.text] = td_new.text
pickle.dump(licenses, open(SPDXUpdateCommand.LICENSE_FILE, 'wb'))
def get_cmdclass():
"""Dictionary of all distutils commands defined in this module.
"""
return {
"spdx_update": SPDXUpdateCommand,
}
| toabctl/py2pack | py2pack/setup.py | Python | apache-2.0 | 2,578 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-08-26 19:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0096_delete_activity'),
]
operations = [
migrations.AddField(
model_name='profile',
name='reason_for_registration',
field=models.CharField(choices=[('Learn', 'Learn'), ('Facilitate', 'Facilitate'), ('Join the community', 'Join the community')], default='Facilitate', max_length=30),
),
]
| p2pu/learning-circles | studygroups/migrations/0097_profile_reason_for_registration.py | Python | mit | 596 |
# Copyright (C) 2006, Red Hat, Inc.
# Copyright (C) 2007, One Laptop Per Child
# Copyright (C) 2009, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import sys
import hulahop
# this is for storing the gecko stuff (cache, cookies, plugins etc.)
gecko_path = os.environ.get('HOME', '.')
gecko_path = os.path.join(gecko_path, ".pyjd")
hulahop.startup(gecko_path)
from hulahop.webview import WebView
import gtk
import gtk.gdk
import gobject
import xpcom
from xpcom.nsError import *
from xpcom import components
from xpcom.components import interfaces
from progresslistener import ProgressListener
class ContentInvoker:
_com_interfaces_ = interfaces.nsIDOMEventListener
def __init__(self, node, event_fn):
self._node = node
self._event_fn = event_fn
def handleEvent(self, event):
self._event_fn(self._node, event, False)
class Browser(WebView):
def __init__(self, application, appdir):
WebView.__init__(self)
self.platform = 'hulahop'
self.progress = ProgressListener()
self.application = application
self.appdir = appdir
self.already_initialised = False
io_service_class = components.classes[ \
"@mozilla.org/network/io-service;1"]
io_service = io_service_class.getService(interfaces.nsIIOService)
# Use xpcom to turn off "offline mode" detection, which disables
# access to localhost for no good reason. (Trac #6250.)
io_service2 = io_service_class.getService(interfaces.nsIIOService2)
io_service2.manageOfflineStatus = False
self.progress.connect('loading-stop', self._loaded)
self.progress.connect('loading-progress', self._loading)
def _alert(self, txt):
print "_alert", txt
#self.get_prompt_svc().alert(None, "Alert", txt)
def close(w):
dialog.destroy()
dialog = gtk.Dialog("Alert", None, gtk.DIALOG_DESTROY_WITH_PARENT)
label = gtk.Label(txt)
dialog.vbox.add(label)
label.show()
button = gtk.Button("OK")
dialog.action_area.pack_start (button, True, True, 0)
button.connect("clicked", close)
button.show()
dialog.run ()
def get_prompt_svc(self):
prompt_svc_cls = components.classes[ \
"@mozilla.org/embedcomp/prompt-service;1"]
return prompt_svc_cls.createInstance(interfaces.nsIPromptService)
def load_app(self):
uri = self.application
if uri.find(":") == -1:
# assume file
uri = 'file://'+os.path.abspath(uri)
self.application = uri
self.load_uri(uri)
def do_setup(self):
WebView.do_setup(self)
self.progress.setup(self)
def _addXMLHttpRequestEventListener(self, node, event_name, event_fn):
listener = xpcom.server.WrapObject(ContentInvoker(node, event_fn),
interfaces.nsIDOMEventListener)
print event_name, listener
node.addEventListener(event_name, listener, False)
return listener
def addEventListener(self, node, event_name, event_fn):
listener = xpcom.server.WrapObject(ContentInvoker(node, event_fn),
interfaces.nsIDOMEventListener)
node.addEventListener(event_name, listener, True)
return listener
def mash_attrib(self, attrib_name):
return attrib_name
def _addWindowEventListener(self, event_name, event_fn, win=None):
if win is None:
win = self.window_root
listener = xpcom.server.WrapObject(ContentInvoker(win, event_fn),
interfaces.nsIDOMEventListener)
win.addEventListener(event_name, listener, True)
return listener
def getDOMParser(self):
xml_svc_cls = components.classes[ \
"@mozilla.org/xmlextras/domparser;1"]
return xml_svc_cls.createInstance(interfaces.nsIDOMParser)
def getXmlHttpRequest(self):
xml_svc_cls = components.classes[ \
"@mozilla.org/xmlextras/xmlhttprequest;1"]
return xml_svc_cls.createInstance(interfaces.nsIXMLHttpRequest)
def getUri(self):
return self.application
def getDomWindow(self):
return self.get_dom_window()
def getDomDocument(self):
return self.get_dom_window().document
def _loaded(self, progress_listener):
print "loaded"
if self.already_initialised:
return
self.already_initialised = True
dw = self.get_dom_window()
doc = dw.document
from __pyjamas__ import pygwt_processMetas, set_main_frame
from __pyjamas__ import set_gtk_module
set_main_frame(self)
set_gtk_module(gtk)
(pth, app) = os.path.split(self.application)
if self.appdir:
pth = os.path.abspath(self.appdir)
sys.path.append(pth)
#for m in pygwt_processMetas():
# minst = module_load(m)
# minst.onModuleLoad()
def _loading(self, progress_listener, progress):
pass
#print "loading", progress, self.getDomWindow().location.href
def _trigger_fake_button(self):
doc = self.getDomDocument()
wnd = self.getDomWindow()
element = self._hack_timer_workaround_bug_button
evt = doc.createEvent('MouseEvents')
evt.initMouseEvent("click", True, True, wnd, 1, 0, 0, 0, 0, False,
False, False, False, 0, element)
element.dispatchEvent(evt)
def _timer_callback_workaround(self, *args):
global timer_q
while timer_q:
fn = timer_q.pop()
fn()
def is_loaded():
return wv.already_initialised
global timer_q
timer_q = []
def add_timer_queue(fn):
timer_q.append(fn)
wv._trigger_fake_button()
#DOM.buttonClick(self.b.getElement())
# hope and pray that an event occurs!
#event = gtk.gdk.new()
#gtk.gdk.push(event)
def run(one_event=False, block=True):
if one_event:
if block or gtk.events_pending():
gtk.main_iteration()
sys.stdout.flush()
else:
while 1:
gtk.main_iteration()
sys.stdout.flush()
#gtk.main()
def setup(application, appdir=None, width=800, height=600):
gtk.gdk.threads_init()
win = gtk.Window(gtk.WINDOW_TOPLEVEL)
win.set_size_request(width, height)
win.connect('destroy', gtk.main_quit)
global wv
wv = Browser(application, appdir)
wv.show()
win.add(wv)
win.show()
wv.load_app()
while 1:
if is_loaded() and not gtk.events_pending():
return
run(one_event=True)
def module_load(m):
minst = None
exec """\
from %(mod)s import %(mod)s
minst = %(mod)s()
""" % ({'mod': m})
return minst
| certik/pyjamas | pyjd/hula.py | Python | apache-2.0 | 7,643 |
"""
Utilities for collecting and exporting group constants from a
branching file
"""
from itertools import product
from warnings import warn
from collections.abc import Iterable
from numpy import empty, nan, array, ndarray
from serpentTools import BranchingReader
__all__ = [
'BranchCollector',
'BranchedUniv',
]
class BranchedUniv(object):
"""
Class for storing cross sections for a single universe across branches
.. versionadded:: 0.7.0
Parameters
----------
univID: str or int
Unique ID for this universe
collector: :class:`BranchCollector`
Class that parses and places branched coefficient data
ndims: int or iterable
Number of perturbation dimensions
Attributes
----------
filePath: str
Location of the file that stores the data on this object
univID: str or int
Unique ID for this universe
collector: :class:`BranchCollector`
Class that parses and places branched coefficient data
xsTables: dict
Dictionary with keys representing specific values, e.g.
``'infTot'`` and ``'b1Diffcoeff'``. Corresponding values
are :class:`BranchedDataTable` objects that store
cross section and group constant data across perturbation states
"""
# Acts largely as a dictionary storing group constants
# under their cross section names from SERPENT
# Subclass of dictionary?
__slots__ = (
'filePath', 'univID', 'collector',
'_ndims', 'xsTables',
)
def __init__(self, univID, collector, ndims=None):
assert (
isinstance(collector, BranchCollector)
or issubclass(collector, BranchCollector))
self.filePath = collector.filePath
self.univID = univID
self.collector = collector
if ndims is None:
self._ndims = len(collector.axis[1:])
else:
if hasattr(ndims, '__iter__'):
ndims = len(ndims)
elif isinstance(ndims, (int, float)):
ndims = int(ndims)
assert ndims > 0
self._ndims = ndims
self.xsTables = {}
def __getitem__(self, key):
"""Access the xsTables dictionary"""
return self.xsTables[key]
def __setitem__(self, key, data):
shape = data.shape
assert len(shape) == self._ndims
for pertIndex, pertState in enumerate(self.states):
assert len(pertState) == shape[pertIndex], (
"{} {}".format(shape[pertIndex], pertState))
self.xsTables[key] = data
def items(self):
"""Iterate over names of cross sections and associated objects"""
for name, obj in self.xsTables.items():
yield name, obj
@property
def states(self):
"""
Iterable describing the names or values of each perturbation
branch. Length is equal to that of :attr:`perturbations`, and
the ``i``-th index of ``states`` indicates the values
perturbation ``perturbations[i]`` experiences.
See Also
--------
:attr:`BranchCollector.states`
"""
return self.collector.states
@states.setter
def states(self, value):
self.collector.states = value
@property
def axis(self):
"""
Tuple describing axis of underlying data
.. note::
When setting, the universe index of the
axis should not be changed. The changes
are passed on to :attr:`BranchCollector.axis`
with an indicator for universe placed in the
correct spot
Examples
--------
>>> col.axis
('Universe', 'BOR', 'TFU', 'Burnup', 'Group')
>>> u0 = col.universes[0]
>>> u0.axis == col.axis[1:]
True
>>> u0.axis = ['boron conc', 'fuel temp', 'burnup', 'group']
>>> u0.axis
('boron conc', 'fuel temp', 'burnup', 'group')
>>> col.axis
('Universe', 'boron conc', 'fuel temp', 'burnup', 'group')
See Also
--------
:class:`BranchCollector.axis`
"""
return self.collector.axis[1:]
@axis.setter
def axis(self, value):
if not isinstance(value, tuple):
value = tuple(value)
self.collector.axis = ("Universe", ) + value
@property
def burnups(self):
"""
Vector of burnups from coefficient file
See Also
--------
:attr:`BranchCollector.burnups`"""
return self.collector.burnups
@burnups.setter
def burnups(self, value):
self.collector.burnups = value
@property
def perturbations(self):
"""
Iterable indicating the specific perturbation types
See Also
--------
:attr:`BranchCollector.perturbations`
"""
return self.collector.perturbations
@perturbations.setter
def perturbations(self, value):
self.collector.perturbations = value
class BranchCollector(object):
"""
Main class that collects and arranges branched data
.. versionadded:: 0.7.0
Parameters
----------
source: str or :class:`~serpentTools.parsers.branching.BranchingReader`
Coefficient file to be read, or a
:class:`~serpentTools.parsers.branching.BranchingReader`
that has read the file of interest
Attributes
----------
filePath: str
Location of the read file
univIndex: tuple
Ordered tuple of universe as they appear in the first dimension
of all arrays in :attr:`xsTables`
universes: dict
Dictionary of universe-specific cross sections. Each entry
is a :class:`BranchedUniv` object that stores cross sections
for a single universe.
xsTables: dict
Dictionary of ``{k: x}`` pairs where ``k`` corresponds to
all cross sections processed and ``x`` are large multidimensional
cross sections. The structure is described with :attr:`axis`
"""
__slots__ = (
'filePath', '_branches', 'xsTables', 'universes', '_axis',
'_perturbations', 'univIndex', '_burnups', '_states', '_shapes',
)
def __init__(self, source):
if isinstance(source, BranchingReader):
reader = source
else:
# assume file path or readable
reader = BranchingReader(source)
reader.read()
self.filePath = reader.filePath
self._branches = reader.branches
self.xsTables = {}
self.universes = {}
self._perturbations = None
self._states = None
self._axis = None
self._burnups = None
@property
def perturbations(self):
"""
Iterable indicating the specific perturbation types
Can be set to any iterable, so long as the number of
perturbations is preserved. Ordering is important,
as changing this does not change the structure
of any group constants stored
Example
-------
>>> print(col.perturbations)
('BOR', 'TFU')
>>> col.perturbations = ['B', 'T'] # allowed
>>> col.perturbations = [
>>> 'boron conc', 'fuel temp', 'ctrl pos', # not allowed
>>> ]
ValueError("Current number of perturbations is 2, not 3")
"""
return self._perturbations
@perturbations.setter
def perturbations(self, value):
if self._perturbations is None:
raise AttributeError(
"Collect first to ensure correct data structure.")
if isinstance(value, str) or not isinstance(value, Iterable):
value = value,
if len(value) != len(self._perturbations):
raise ValueError(
"Current number of perturbations is {}, not {}"
.format(len(self._perturbations), len(value)))
self._perturbations = value
@property
def states(self):
"""
Iterable describing each perturbation branch.
Length is equal to that of :attr:`perturbations`, and
the ``i``-th index of ``states`` indicates the values
perturbation ``perturbations[i]`` experiences.
Can be set to any iterable such that the total number
of perturbations is preserved
Examples
--------
>>> col.states
(('B1000', 'B750', 'nom'), ('FT1200', 'FT600', 'nom'))
# set as numpy array
>>> states = numpy.array([
[1000., 750., 0.],
[1200., 600., 900.]
])
>>> col.states = states
>>> col.states
array([[1000., 750., 0],
[1200., 600., 900]])
# set as individual numpy vectors
>>> col.states = (states[0], states[1])
>>> col.states
(array([1000., 750., 0.,]), array([1200., 600., 900.,]))
# pass incorrect shape
>>> col.states = (
>>> (1000, 750, 0), (1200, 600, 900), (0, 1)
>>> )
ValueError("Current number of perturbations is 2, not 3")
# pass incorrect states for one perturbations
>>> cols.states = (
>>> (1000, 750, 500, 0), (1200, 600, 900)
>>> )
ValueError("Current number of perturbations for state BOR "
"is 3, not 4")
"""
return self._states
@states.setter
def states(self, value):
if self._states is None:
raise AttributeError(
"Collect first to ensure correct data structure.")
if len(value) != len(self._states):
raise ValueError(
"Current number of perturbations is {}, not {}"
.format(len(self._states), len(value)))
# check to make sure all perturbation vectors in the
# requested value are of the same length
for index, pertVec in enumerate(self._states):
if len(value[index]) != len(pertVec):
raise ValueError(
"Current number of perturbations for state {} is {}, "
"not {}"
.format(self._perturbations[index], len(pertVec),
len(value[index])))
self._states = value
@property
def axis(self):
"""
Tuple describing axis of underlying data
Each index contains a description of the changes in
group constant data along that axis.
Can be set to any iterable, but is converted to a
tuple to prevent in-place changes, such as appending
to a list or removing one item.
Passing an ordered object, :class:`list`,
:class:`tuple`, or :class:`numpy.array` is preferred,
as the conversion to :class:`tuple` can sort values
in un-ordered objects like :class:`set` or :class:`dict`
strangely.
Examples
--------
>>> col.axis
("Universe", "BOR", "TFU", "Burnup", "Group")
>>> infTot = col.xsTables['infTot']
>>> infTot.shape
(5, 3, 3, 3, 2)
# five universes, three BOR perturbations
# three TFU perturbations, three burnups,
# two energy groups
>>> col.axis = ['u', 'b', 't', 'bu', 'g']
>>> col.axis
('u', 'b', 't', 'bu', 'g')
# pass an unordered set
>>> col.axis = {'u', 'b', 't', 'bu', 'g'}
>>> col.axis
('bu', 'u', 't', 'g', 'b')
# incorrectly set axis
>>> col.axis = [1, 2, 3, 4]
ValueError("Current axis has 5 dimensions, not 4")
"""
if self._axis is None:
raise AttributeError("Axis not set. Collect first.")
return self._axis
@axis.setter
def axis(self, value):
if self._axis is None:
raise AttributeError(
"Collect first to ensure correct data structure.")
# coerce into tuple to enforce some immutability
if not isinstance(value, tuple):
value = tuple(value)
if len(value) != len(self._axis):
raise ValueError(
"Current axis has {} dimensions, not {}"
.format(len(self._axis), len(value)))
self._axis = value
@property
def burnups(self):
"""
Vector of burnups from coefficient file
Can be set to any iterable that has same number
of entries as existing burnup. Automatically
converts to :class:`numpy.array`
Examples
--------
>>> col.burnups
array([0., 1., 10.])
>>> col.burnups = array([0., 5.6, 56.])
>>> col.burnups
array([0., 5.6, 56.])
>>> col.burnups = [0, 1, 10]
# converted to array of integers
>>> col.burnups
array([0, 1, 10])
>>> col.burnups = [0, 1, 2, 3] # not allowed
ValueError("Current burnup vector has 3 items, not 3")
"""
if self._burnups is None:
raise AttributeError("Burnups not set. Collect first.")
return self._burnups
@burnups.setter
def burnups(self, value):
if self._burnups is None:
raise AttributeError(
"Collect first to ensure correct data structure.")
if not isinstance(value, ndarray):
value = array(value)
if value.size != self._burnups.size:
raise ValueError(
"Current burnup vector has {} items, not {}"
.format(self._burnups.size, value.size))
self._burnups = value
def collect(self, perturbations=None):
"""
Parse the contents of the file and collect cross sections
Parameters
----------
perturbations: tuple or None
Tuple where each entry is a state that is perturbed across
the analysis, e.g. ``("Tfuel", "RhoCool", "CR")``. These
must appear in the same order as they are ordered in the
coefficient file. If ``None``, then the number of
perturbations will be determined from the coefficient
file. This is used to set :attr:`pertubations` and
can be adjusted later
"""
# get number of perturbations from number of
# items in keys of reader branches
for key in self._branches:
break
if isinstance(key, str):
nReaderPerts = 1
else:
nReaderPerts = len(key)
if perturbations is None:
perturbations = tuple(
['p' + str(ii) for ii in range(nReaderPerts)])
elif (isinstance(perturbations, str)
or not isinstance(perturbations, Iterable)):
perturbations = perturbations,
else:
perturbations = tuple(perturbations)
assert len(perturbations) == nReaderPerts, "{} vs {}".format(
len(perturbations), nReaderPerts)
self._perturbations = perturbations
sampleBranchKey = self._getBranchStates()
sampleUniv = self._getUnivsBurnups(sampleBranchKey)
xsSizes = self._getXsSizes(sampleUniv)
# Create empty arrays for each xs type
# Will send off views of this to each universe container
numUniv = len(self.univIndex)
numBurnup = len(self._burnups)
for key, size in xsSizes.items():
shape = self._shapes + [numUniv, numBurnup, size]
self.xsTables[key] = empty(shape)
missing = self._populateXsTable()
if missing:
items = ["{}: {}".format(str(k), str(v))
for k, v in missing.items()]
msg = ("The following branch states and indexes are "
"unaccounted for:\n{}".format("\n".join(items)))
warn(msg, RuntimeWarning)
self._burnups = array(self._burnups)
self._populateUniverses()
del self._branches, self._shapes
def _getBranchStates(self):
branchSets = tuple([set() for _tpl in self.perturbations])
for branchKey in self._branches:
for stateIndex, state in enumerate(branchKey):
branchSets[stateIndex].add(state)
self._states = tuple([tuple(sorted(s)) for s in branchSets])
self._shapes = [len(s) for s in self._states]
return branchKey
def _getUnivsBurnups(self, branchKey):
branch = self._branches[branchKey]
univs = set()
_burnups = set()
for (unID, bu, ix, day), universe in branch.items():
univs.add(unID)
_burnups.add(bu if day is None else day)
self._burnups = tuple(sorted(_burnups))
self.univIndex = tuple(sorted(univs))
return universe
@staticmethod
def _getXsSizes(sampleUniv):
sizes = {}
for gcAttr in ('infExp', 'b1Exp', 'gc'):
attr = getattr(sampleUniv, gcAttr)
for key, value in attr.items():
sizes[key] = value.size
return sizes
def _populateXsTable(self):
missing = {}
# Create a map of enumerated tuples
branchMap = map(enumerate, self._states)
branchIndexer = empty(
(len(self._states), 2), order='F', dtype=object)
xsTables = self.xsTables
keys = set(xsTables.keys())
for branchMapItem in product(*branchMap):
branchIndexer[:, :] = branchMapItem
stateIndex = tuple(branchIndexer[:, 0].astype(int).tolist())
branchKey = tuple(branchIndexer[:, 1].tolist())
branch = self._branches.get(branchKey)
if branch is None:
missing[branchKey] = stateIndex
for submat in self.xsTables.values():
submat[stateIndex].fill(nan)
continue
univIterator = map(enumerate, (self.univIndex, self._burnups))
for (uIndex, univID), (bIndex, burnup) in product(*univIterator):
universe = branch.getUniv(univID, burnup)
thisSlice = stateIndex + (uIndex, bIndex)
for xsKey in keys:
xsTables[xsKey][thisSlice] = universe.get(xsKey)
return missing
def _populateUniverses(self):
self._axis = ("Universe", ) + self._perturbations + ("Burnup", "Group")
nPerts = len(self._perturbations)
newAxis = (
(nPerts,) + tuple(range(nPerts))
+ (nPerts + 1, nPerts + 2))
origKeys = self.xsTables.keys()
for key in origKeys:
self.xsTables[key] = self.xsTables[key].transpose(*newAxis)
univAxis = self._axis[1:]
# Create all the univIndex
for univID in self.univIndex:
self.universes[univID] = BranchedUniv(univID, self, univAxis)
for xsKey, xsMat in self.xsTables.items():
for univIndex, univID in enumerate(self.univIndex):
self.universes[univID][xsKey] = xsMat[univIndex]
@classmethod
def fromFile(cls, filePath, perturbations=None):
"""
Create a :class:`BranchCollector` from the contents of the file
Parameters
----------
filePath: str
Location of coefficient file to be read
perturbations: None or iterable
Ordering of perturbation types in coefficient file.
If ``None``, the number of perturbations will be inferred
from file. Otherwise, the number of perturbations must
match those in the file. This value can be changed
after the fact using :attr:`perturbations`,
with insight gained from :attr:`states`
Returns
-------
:class:`BranchCollector` object that has processed the contents
of the file.
"""
collector = BranchCollector(filePath)
collector.collect(perturbations)
return collector
| CORE-GATECH-GROUP/serpent-tools | serpentTools/xs.py | Python | mit | 19,877 |
#!/usr/bin/python
import sys,os
from math import sqrt
from copy import deepcopy
import json, pickle, cPickle
import os
import shutil
from advprint import AdvPrint
from info import Info
from process import Process
from detectorsettings import DetectorSettings
from evaluator import Evaluator, find_strongest_evaluators, find_strongest_zsig
from resultcollector import ResultCollector
class CheckMATE2:
""" This is the main object whose instance corresponds to a full CheckMATE run """
procList = list()
def __init__(self):
#global Info, AdvPrint
""" Initialisation of a CheckMATE object leads to an entire run of the CheckMATE procedure"""
# Initialisation steps
#Info.init()
Info.fill_standard_paths_and_files()
if len(sys.argv) == 1:
self.printUsage()
self.printLogo()
if len(sys.argv) == 2 and sys.argv[-1] != "-h":
Info.fill_info_from_file(sys.argv[1])
self.procList = Info.fill_processes_from_file(sys.argv[1])
else:
Info.fill_info_from_parameters()
self.procList = Info.fill_processes_from_parameters()
if Info.parameters["outputexists"] == "add":
self.load(Info.files['internal_processes'])
for p in self.procList:
p.checkInputConsistency()
self.user_param_check()
self.prepare_run()
# Running the event-based part
if self.procList == []:
AdvPrint.cerr_exit("No processes are loaded!")
for p in self.procList:
p.prepare()
p.run()
AdvPrint.cout("\n")
# Evaluate
if not Info.flags['skipevaluation']:
self.evaluate()
# Store internal status
Info.save(Info.files['internal_info'])
self.save(Info.files['internal_processes'])
def save(self, filename):
""" Stores the current status of this instance in a file """
contents = dict()
contents["procList"] = pickle.dumps(self.procList)
with open(filename, "w") as f:
json.dump(contents, f, indent=2)
def load(self, filename):
""" Loads contents for current instance from a valid file """
with open(filename, "r") as f:
contents = json.load(f)
try:
# processes = old processes plus added new ones of current run
newProcList = self.procList
self.procList = pickle.loads(contents["procList"])
for new_p in newProcList:
combined = False
for old_p in self.procList:
if old_p.name == new_p.name:
old_p.combine_processes(new_p)
combined = True
break
if not combined:
self.procList.append(new_p)
except KeyError:
AdvPrint.cerr_exit("Problem loading info file "+inputfile)
return self
def printLogo(self):
""" Obvious"""
AdvPrint.cout( """
____ _ _ __ __ _ _____ _____ ____
/ ___| |__ ___ ___| | _| \/ | / \|_ _| ____|___ \
| | | '_ \ / _ \/ __| |/ / |\/| | / _ \ | | | _| __) |
| |___| | | | __/ (__| <| | | |/ ___ \| | | |___ / __/
\____|_| |_|\___|\___|_|\_\_| |_/_/ \_\_| |_____|_____|
"""
) # TODO: add cite info?
def printUsage(self):
self.printLogo()
AdvPrint.cout( """
___
|__| _ | _
| |(_)\)/ |(_)
""")
AdvPrint.cout( "Method 1: Input Parameters")
AdvPrint.cout( "\trun -n {name_for_this_run} -a {analysis} -p {process} -xs {crosssection} -xse {crosssection error} -ev {eventfile}")
AdvPrint.cout( "Method 2: Input File")
AdvPrint.cout( "\trun {inputfile}")
AdvPrint.cout( "")
AdvPrint.cout( "Examples:")
AdvPrint.cout( "\t./CheckMATE -n testrun -a atlas_1405_7875 -p \"gg\" -xs \"1*FB\" -xse \"0.1 FB\" -ev /scratch/all/gluinopair.hepmc")
AdvPrint.cout( "\t./CheckMATE testparam.dat")
AdvPrint.cout( "")
AdvPrint.cout( "Type './CheckMATE -h' for more information about available parameters or check")
AdvPrint.cout( "the given 'testparam.dat' file for the desired structure of input files")
exit(1)
def user_param_check(self):
"""Prints settings on screen and awaits user confirmation"""
analysis_info = dict()
AdvPrint.cout("The following settings are used:")
AdvPrint.cout("Analyses: ")
for analysis in Info.analyses:
parameters = Info.get_analysis_parameters(analysis)
analysis_info = ""
if parameters["expectation_known"] == "n":
analysis_info += "[NO EXPECTATION KNOWN -> NO EXCLUSION TEST] "
analysis_info += parameters["short_info"]
AdvPrint.cout("\t"+analysis+" ("+analysis_info+")")
AdvPrint.cout("E_CM: "+str(Info.parameters["ecm"]))
AdvPrint.cout("Processes: ")
for process in self.procList:
process.printInfo()
AdvPrint.cout("")
AdvPrint.cout("Output Directory: ")
AdvPrint.cout("\t"+Info.paths['output'])
AdvPrint.cout("Additional Settings: ")
if Info.files['slha'] != "":
AdvPrint.cout("\t - SLHA file "+Info.files['slha']+" will be used for event generation")
if Info.parameters['invisiblePIDs'] != []:
AdvPrint.cout("\t - The following PIDs will be considered as invisible for the detector: "+str(Info.parameters['invisiblePIDs']).translate(None, "[]'"))
if Info.flags['skipanalysis']:
AdvPrint.cout("\t - No analysis step")
if Info.flags['skippythia']:
AdvPrint.cout("\t - No pythia step")
if Info.flags['skipevaluation']:
AdvPrint.cout("\t - No evaluation step")
if Info.flags['fullcls']:
AdvPrint.cout("\t - CLs of all signal regions will be explicitly calculated")
if Info.parameters['bestcls'] != 0:
AdvPrint.cout("\t - CLs of "+str(Info.parameters['bestcls'])+" best signal region will be explicitly calculated")
if Info.flags['likelihood']:
AdvPrint.cout("\t - Likelihood will be calculated for each signal region")
if Info.flags['no_mc_stat_err']:
AdvPrint.cout("\t - No Monte Carlo statistical uncertainty will be included in the evaluation")
if Info.flags['eff_tab']:
AdvPrint.cout("\t - Efficiency tables will be calculated for each signal region of every analysis run")
if Info.flags["controlregions"]:
AdvPrint.cout("\t - Analysing control regions")
if Info.parameters["outputexists"] == "overwrite":
AdvPrint.cout("\t - Old results will be deleted")
if Info.parameters["outputexists"] == "add":
AdvPrint.cout("\t - New results will be added to old ones")
if Info.parameters["randomseed"] != 0:
AdvPrint.cout("\t - Fixed random seed of "+str(Info.parameters["randomseed"]))
if Info.flags["write_delphes_events"]:
AdvPrint.cout("\t - delphes .root files will be written!")
if Info.flags["write_pythia_events"]:
AdvPrint.cout("\t - pythia .hepmc files will be written!")
if Info.parameters["EventResultFileColumns"] != ['analysis', 'sr', 'signal_normevents', 'signal_err_tot']:
AdvPrint.cout("\t - print columns "+str(Info.parameters['EventResultFileColumns']).translate(None, "[]'")+" in event result files!")
if Info.parameters["ProcessResultFileColumns"] != ['analysis', 'sr', 'signal_normevents', 'signal_err_tot']:
AdvPrint.cout("\t - print columns "+str(Info.parameters['ProcessResultFileColumns']).translate(None, "[]'")+" in process result files!")
if Info.parameters["TotalEvaluationFileColumns"] != ['analysis', 'sr', 'o', 'b', 'db', 's', 'ds', 's95obs', 's95exp', 'robscons', 'rexpcons']:
AdvPrint.cout("\t - print columns "+str(Info.parameters['TotalEvaluationFileColumns']).translate(None, "[]'")+" in total evaluation files!")
if Info.parameters["BestPerAnalysisEvaluationFileColumns"] != ['analysis', 'sr', 'o', 'b', 'db', 's', 'ds', 's95obs', 's95exp', 'robscons', 'rexpcons']:
AdvPrint.cout("\t - print columns "+str(Info.parameters['BestPerAnalysisEvaluationFileColumns']).translate(None, "[]'")+" in bert-per-analysis evaluation files!")
# Let user check correctness of parameters, unless in skipparamcheck.
if not Info.flags['skipparamcheck']:
while True:
c = raw_input("Is this correct? (y/n) ")
if c == "y":
break
elif c == "n":
exit(1)
AdvPrint.cout("")
def prepare_run(self):
"""Creates all output folders and files and generate cards that are needed"""
Info.prepare_config()
# preparation only needed if new run is started with "overwrite"
DetectorSettings.merge_settings()
if Info.parameters["outputexists"] == "overwrite" or not os.path.isdir(Info.paths['output']):
Info.prepare_output_directories()
DetectorSettings.update_delphes_files()
else:
# If the original run used modified Delphes settings, we need them for the added run as well
atlas_conf_new = os.path.join(Info.paths['output_delphes'], "modified_ATLAS_card.tcl")
cms_conf_new = os.path.join(Info.paths['output_delphes'], "modified_CMS_card.tcl")
if os.path.isfile(atlas_conf_new):
Info.files['delphes_global_config_ATLAS'] = atlas_conf_new
if os.path.isfile(cms_conf_new):
Info.files['delphes_global_config_CMS'] = cms_conf_new
def evaluate(self):
""" Performs statistical evaluation of the result """
AdvPrint.cout("Evaluating Results")
resultCollectors = self.get_resultCollectors()
# evaluate all results
evaluators = dict()
for analysis in resultCollectors:
evaluators[analysis] = dict()
# only process those results and those signal regions that are given in the reference file
for analysis in Info.analyses:
signal_regions = Info.get_analysis_parameters(analysis)["signal_regions"]
for sr in signal_regions:
evaluator = Evaluator(resultCollectors[analysis][sr])
# Calculate everything that should be calculated
# TODO: Beware analyses with unknown background
evaluator.calc_efficiencies()
evaluator.calc_r_values()
if Info.flags["likelihood"]:
evaluator.calc_likelihood()
if Info.flags["fullcls"]:
evaluator.calc_cls_values()
if Info.flags["zsig"]:
evaluator.calc_zsig()
evaluators[analysis][sr] = evaluator
if Info.parameters["bestcls"] != 0:
AdvPrint.cout("Calculating CLs for the "+str(Info.parameters["bestcls"])+" most sensitive signal regions!")
best_evaluators = find_strongest_evaluators(evaluators, Info.parameters["bestcls"])
# if "bestcls" is 1, find_strongest_evaluators does not return a list but just the single best
if Info.parameters["bestcls"] == 1:
best_evaluators = [best_evaluators]
for ev in best_evaluators:
ev.calc_cls_values()
# find best result
best_evaluator_per_analysis = dict()
for analysis in evaluators:
# Find bes of all SRs in analysis
best_evaluator_per_analysis[analysis] = find_strongest_evaluators(evaluators[analysis], 1)
best_evaluator = find_strongest_evaluators(best_evaluator_per_analysis, 1)
AdvPrint.set_cout_file(Info.files['output_totalresults'], True)
AdvPrint.mute()
for col in Info.parameters["TotalEvaluationFileColumns"]:
AdvPrint.cout(col+" ", "nlb")
AdvPrint.cout("")
for a in sorted(evaluators.keys()):
for sr in sorted(evaluators[a].keys()):
AdvPrint.cout(evaluators[a][sr].line_from_data(Info.parameters["TotalEvaluationFileColumns"]))
AdvPrint.format_columnated_file(Info.files['output_totalresults'])
AdvPrint.set_cout_file(Info.files['output_bestsignalregions'], True)
AdvPrint.mute()
for col in Info.parameters["BestPerAnalysisEvaluationFileColumns"]:
AdvPrint.cout(col+" ", "nlb")
AdvPrint.cout("")
# print analyses in alphabetic order
for a in sorted(best_evaluator_per_analysis.keys()):
AdvPrint.cout(best_evaluator_per_analysis[a].line_from_data(Info.parameters["BestPerAnalysisEvaluationFileColumns"]))
AdvPrint.format_columnated_file(Info.files['output_bestsignalregions'])
AdvPrint.set_cout_file("#None")
AdvPrint.unmute()
best_evaluator.check_warnings()
best_evaluator.print_result()
if Info.flags['zsig']:
_print_zsig(evaluators)
if Info.flags['likelihood']:
_print_likelihood(evaluators)
def get_resultCollectors(self):
""" Finds the results of all events within all processes and sums and averages them """
#setup resultCollector object
resultCollectors_tot = dict()
for analysis in Info.analyses:
resultCollectors_tot[analysis] = dict()
signal_regions = Info.get_analysis_parameters(analysis)["signal_regions"]
for sr in signal_regions:
resultCollectors_tot[analysis][sr] = ResultCollector("total", analysis, sr)
# loop over all associated processes
for proc in self.procList:
# process results are summed
resultCollectors_proc = proc.get_resultCollectors()
for analysis in resultCollectors_tot:
for sr in resultCollectors_tot[analysis]:
resultCollectors_tot[analysis][sr].add_and_sum(resultCollectors_proc[analysis][sr])
return resultCollectors_tot
def _print_zsig(evaluators):
for analysis, v in evaluators.iteritems():
with open(Info.files['output_evaluation_zsig'][analysis], "w") as of:
of.write("SR S B dB Z_exp\n")
for sr, ev in v.iteritems():
of.write(
sr+" "
+str(ev.resultCollector.signal_normevents)+" "
+str(float(ev.bkg))+" "
+str(float(ev.bkg_err))+" "
+str(ev.expected_zsig)+"\n"
)
best = find_strongest_zsig(evaluators)
with open(Info.files['output_bestsignificanceregions'], "w") as of:
of.write("analysis best S Zexp\n")
for b in best:
ev = b['evaluator']
of.write(
b['analysis']+" "
+b['sr']+" "
+str(ev.resultCollector.signal_normevents)+" "
+str(ev.expected_zsig)+"\n"
)
with open(Info.files['output_result_zsig'], "w") as of:
result_zexp = "Zexp_max = "+str(best[0]['evaluator'].expected_zsig)
of.write("Result for Zexp: "+result_zexp+"\n")
def _print_likelihood(evaluators):
tot_likeli = 0.
dict_likeli = {}
for analysis, v in evaluators.iteritems():
ana_likeli =0.
AdvPrint.set_cout_file(Info.files['output_evaluation_likelihood'][analysis], True)
AdvPrint.mute()
AdvPrint.cout("SR o b db s ds likeli")
for sr, ev in v.iteritems():
AdvPrint.cout(sr+" "
+str(float(ev.obs))+" "
+str(float(ev.bkg))+" "
+str(float(ev.bkg_err))+" "
+str(ev.resultCollector.signal_normevents)+" "
+str(ev.resultCollector.signal_err_tot)+" "
+str(ev.likelihood))
ana_likeli += ev.likelihood
AdvPrint.format_columnated_file(Info.files['output_evaluation_likelihood'][analysis])
AdvPrint.set_cout_file("#None")
AdvPrint.unmute()
dict_likeli[analysis] = ana_likeli
tot_likeli += ana_likeli
AdvPrint.set_cout_file(Info.files['likelihood'], True)
AdvPrint.mute()
AdvPrint.cout("Analysis -2lnL")
for a in dict_likeli:
AdvPrint.cout(a+" "+str(dict_likeli[a]))
AdvPrint.cout("\nTotal: "+str(tot_likeli))
AdvPrint.format_columnated_file(Info.files['likelihood'])
| HEPcodes/CheckMATE | tools/python/checkmate_core.py | Python | gpl-2.0 | 17,387 |
from flask import Flask
from flask_bootstrap import Bootstrap
from flask import render_template
import bootstrap
import xmlrpclib
from io import BytesIO
import base64
app = Flask(__name__)
import xmlrpclib
username = 'username' #the user
pwd = 'password' #the password of the user
dbname = 'ctpug' #the database
sock_common = xmlrpclib.ServerProxy ('http://127.0.0.1:8069/xmlrpc/common')
uid = sock_common.login(dbname, username, pwd)
#replace localhost with the address of the server
sock = xmlrpclib.ServerProxy('http://127.0.0.1:8069/xmlrpc/object')
def test_connection(username,pwd,dbname):
connection_reply = 'Connection to Odoo - '
args = [] #query clause
ids = sock.execute(dbname, uid, pwd, 'res.partner', 'search', args)
fields = ['name', 'id', 'email'] #fields to read
data = sock.execute(dbname, uid, pwd, 'res.partner', 'read', ids, fields)
if data[0].name == 'admin':
connection_reply + 'successful'
else:
connection_reply + 'not successful'
return connection_reply
def get_products(username,pwd,dbname):
args = [] #query clause
ids = sock.execute(dbname, uid, pwd, 'product.product', 'search', args)
fields = ['id', 'lst_price', 'qty_available', 'product_tmpl_id'] #fields to read
data = sock.execute(dbname, uid, pwd, 'product.product', 'read', ids, fields)
return data
def get_product_templates(username,pwd,dbname, args):
args = args or [] #query clause
ids = sock.execute(dbname, uid, pwd, 'product.template', 'search', args)
fields = ['id', 'name', 'image_medium'] #fields to read
data = sock.execute(dbname, uid, pwd, 'product.template', 'read', ids, fields)
return data
def get_company_currency(username,pwd,dbname):
args = []
ids = sock.execute(dbname, uid, pwd, 'res.company', 'search', [('id','=',1)])
fields = ['currency_id'] #fields to read
company = sock.execute(dbname, uid, pwd, 'res.company', 'read', ids, fields)
ids = sock.execute(dbname, uid, pwd, 'res.currency', 'search', [('id','=',company[0]['currency_id'][0])])
fields = ['symbol']
currency_symbol = sock.execute(dbname, uid, pwd, 'res.currency', 'read', ids, fields)
return currency_symbol[0]['symbol']
@app.route('/products')
def products():
product_output = 'List of products </br></br>'
product_product = get_products(username,pwd,dbname)
#product_template = get_product_templates(username,pwd,dbname)
count = 0
for x in product_product:
args = [('id', '=', x['product_tmpl_id'][0])]
product_template = get_product_templates(username,pwd,dbname,args)
#product_output = product_output + product_template[0]['name']
#product_output = ''+x['product_tmpl_id']
#for y in product_template:
#if x['product_tmpl_id'] == y['id']:
#product_output = '\n |' + product_output + str(x['id']) + y['name'] + "<img style='display:block; width:100px;height:100px;' id='base64image' src='data:image/jpeg;base64, %s'/>" % y['image_medium'] +' | \n'
if product_template[0]['image_medium']:
product_output += '\n' + str(product_product[count]['id']) +' ' + product_template[0]['name'] + ' ' + get_company_currency(username,pwd,dbname) + str(product_product[count]['lst_price']) + "<img style='display:block; width:100px;height:100px;' id='base64image' src='data:image/jpeg;base64, %s'/>" % product_template[0]['image_medium'] +' \n'
count += 1
return product_output
#return 'List of products %s' % data[0]['id']
@app.route('/')
def index():
connection_reply = 'Connection to Odoo - '
args = [] #query clauses
ids = sock.execute(dbname, uid, pwd, 'res.partner', 'search', args)
fields = ['name', 'id', 'email'] #fields to read
data = sock.execute(dbname, uid, pwd, 'res.partner', 'read', ids, fields)
#return 'Hello %s' %data[0]
if data[0]['id'] == 3:
connection_reply = '%s successful' % connection_reply
else:
connection_reply = '%s not successful' % connection_reply
return connection_reply
#return render_template('index.html', title='Home', connection_reply=connection_reply)
if __name__ == '__main__':
app.run(debug=True)
Bootstrap(app)
#return app
#self.settings()
#__main__.initiate_connection(username,pwd,dbname)
#__main__.test_connection(username,pwd,dbname)
| c-goosen/ctpug_11_july | flask/__init__.py | Python | cc0-1.0 | 4,174 |
import numpy as np
def Cov(xs, ys, meanx=None, meany=None):
'''mid compute covariance of xs and ys'''
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
| UpSea/midProjects | BasicOperations/09_NumPy/02_Numpy_02_covariance.py | Python | mit | 318 |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import os
import sys
def searchIndex(f):
fname = f.split('_')[-3]
if fname[:3] == 'L00':
# os.system("gunzip -c %s | sed -n '/@ST-E00244/p' | awk -F \":\" '{print $10}' > Undetermined_Index" % f) ##ymkd
os.system("gunzip -c %s | sed -n '/@ST-E00169/p' | awk -F \":\" '{print $10}' | awk -F \"+\" '{print $2}' > Undetermined_Index" % f) ##yjk
else:
os.system("gunzip -c %s | sed -n '/@NS500713/p' | awk -F \":\" '{print $10}' | awk -F \"+\" '{print $2}' > Undetermined_Index" % f) ##nextseq500
os.system("sort -o Undetermined_Index_sort Undetermined_Index")
os.system("uniq -c Undetermined_Index_sort > Undetermined_Index_sort_uniq")
os.system("sort -n -r -o Undetermined_Index_sort_uniq_sort Undetermined_Index_sort_uniq")
if fname[:3] == 'L00':
os.system("sed -n '1,20p' Undetermined_Index_sort_uniq_sort > Undetermined_Index_%s_top20.txt" % fname)
else:
os.system("sed -n '1,20p' Undetermined_Index_sort_uniq_sort > Undetermined_Index_20.txt")
os.system("rm -rf Undetermined_Index Undetermined_Index_sort Undetermined_Index_sort_uniq Undetermined_Index_sort_uniq_sort ")
def main():
f = sys.argv[1]
searchIndex(f)
if __name__ == "__main__":
main()
| longrw/mypython | Undetermined_Index_top20.py | Python | gpl-3.0 | 1,284 |
#! /usr/bin/python3
import re
err = "La contraseña no es segura"
msg = "Escriba una contraseña al menos 8 caracteres alfanumericos"
def ismayor8(a):
"""
Compara si es mayor a 8 caracteres
"""
if (len(a) < 8):
return False
return True
def minus(a):
"""
compara si existe alguna letra minuscula
"""
patron = ('[a-z]')
flag = False
for letra in a:
if (re.match(patron, letra)):
flag = True
return flag
def mayus(a):
"""
Compara si existe alguna letra mayuscula
"""
patron = ('[A-Z]')
flag = False
for letra in a:
if (re.match(patron, letra)):
flag = True
return flag
def unnum(a):
"""
Compara si existe algun número
"""
patron = ('[0-9]')
flag = False
for letra in a:
if (re.match(patron, letra)):
flag = True
return flag
def alfanumeric(a):
"""
Compara si la cadena es alfanumerica
"""
if (a.isalnum()):
return True
else:
return False
def vpass():
"""
Validamos contraseña
"""
salida = False
while salida is False:
try:
print (msg, end='\n')
paswd = str(input('passwd: '))
if (ismayor8(paswd)):
if (alfanumeric(paswd)):
if (minus(paswd) and mayus(paswd) and unnum(paswd)):
salida = True
else:
print (err, end='\n')
else:
print (err, end='\n')
except (KeyboardInterrupt, EOFError):
print (msg, end='\n')
return salida
| IntelBUAP/Python3 | Evaluaciones/tuxes/eva2/validapass.py | Python | gpl-2.0 | 1,660 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import json
import unittest
import luigi
import luigi.format
import luigi.contrib.hadoop
import luigi.contrib.hdfs
import luigi.contrib.mrrunner
import luigi.notifications
import minicluster
import mock
from luigi.mock import MockTarget
from luigi.six import StringIO
from nose.plugins.attrib import attr
luigi.notifications.DEBUG = True
luigi.contrib.hadoop.attach(minicluster)
class OutputMixin(luigi.Task):
use_hdfs = luigi.BoolParameter(default=False)
def get_output(self, fn):
if self.use_hdfs:
return luigi.contrib.hdfs.HdfsTarget('/tmp/' + fn, format=luigi.format.get_default_format() >> luigi.contrib.hdfs.PlainDir)
else:
return MockTarget(fn)
class HadoopJobTask(luigi.contrib.hadoop.JobTask, OutputMixin):
def job_runner(self):
if self.use_hdfs:
return minicluster.MiniClusterHadoopJobRunner()
else:
return luigi.contrib.hadoop.LocalJobRunner()
class Words(OutputMixin):
def output(self):
return self.get_output('words')
def run(self):
f = self.output().open('w')
f.write('kj kj lkj lkj ljoi j iljlk jlk jlk jk jkl jlk jlkj j ioj ioj kuh kjh\n')
f.write('kjsfsdfkj sdjkf kljslkj flskjdfj jkkd jjfk jk jk jk jk jk jklkjf kj lkj lkj\n')
f.close()
class WordCountJob(HadoopJobTask):
def mapper(self, line):
for word in line.strip().split():
self.incr_counter('word', word, 1)
yield word, 1
def reducer(self, word, occurences):
yield word, sum(occurences)
def requires(self):
return Words(self.use_hdfs)
def output(self):
return self.get_output('wordcount')
class WordFreqJob(HadoopJobTask):
def init_local(self):
self.n = 0
for line in self.input_local().open('r'):
word, count = line.strip().split()
self.n += int(count)
def mapper(self, line):
for word in line.strip().split():
yield word, 1.0 / self.n
def combiner(self, word, occurrences):
yield word, sum(occurrences)
def reducer(self, word, occurences):
yield word, sum(occurences)
def requires_local(self):
return WordCountJob(self.use_hdfs)
def requires_hadoop(self):
return Words(self.use_hdfs)
def output(self):
return self.get_output('luigitest-2')
def extra_files(self):
fn = os.listdir('.')[0] # Just return some file, doesn't matter which
return [(fn, 'my_dir/my_file')]
def init_remote(self):
open('my_dir/my_file') # make sure it exists
class MapOnlyJob(HadoopJobTask):
def mapper(self, line):
for word in line.strip().split():
yield (word,)
def requires_hadoop(self):
return Words(self.use_hdfs)
def output(self):
return self.get_output('luigitest-3')
class UnicodeJob(HadoopJobTask):
def mapper(self, line):
yield u'test', 1
yield b'test', 1
def reducer(self, word, occurences):
yield word, sum(occurences)
def requires(self):
return Words(self.use_hdfs)
def output(self):
return self.get_output('luigitest-4')
class UseJsonAsDataInteterchangeFormatJob(HadoopJobTask):
data_interchange_format = "json"
def mapper(self, line):
yield "json", {"data type": "json"}
def reducer(self, _, vals):
yield "", json.dumps(list(vals)[0])
def requires(self):
""" Two lines from Word.task will cause two `mapper` call. """
return Words(self.use_hdfs)
def output(self):
return self.get_output('luigitest-5')
class FailingJobException(Exception):
pass
class FailingJob(HadoopJobTask):
def init_hadoop(self):
raise FailingJobException('failure')
def output(self):
return self.get_output('failing')
class MyStreamingJob(luigi.contrib.hadoop.JobTask):
param = luigi.Parameter()
def read_wordcount_output(p):
count = {}
for line in p.open('r'):
k, v = line.strip().split()
count[k] = v
return count
class CommonTests(object):
@staticmethod
def test_run(test_case):
job = WordCountJob(use_hdfs=test_case.use_hdfs)
luigi.build([job], local_scheduler=True)
c = read_wordcount_output(job.output())
test_case.assertEqual(int(c['jk']), 6)
@staticmethod
def test_run_2(test_case):
job = WordFreqJob(use_hdfs=test_case.use_hdfs)
luigi.build([job], local_scheduler=True)
c = read_wordcount_output(job.output())
test_case.assertAlmostEquals(float(c['jk']), 6.0 / 33.0)
@staticmethod
def test_map_only(test_case):
job = MapOnlyJob(use_hdfs=test_case.use_hdfs)
luigi.build([job], local_scheduler=True)
c = []
for line in job.output().open('r'):
c.append(line.strip())
test_case.assertEqual(c[0], 'kj')
test_case.assertEqual(c[4], 'ljoi')
@staticmethod
def test_unicode_job(test_case):
job = UnicodeJob(use_hdfs=test_case.use_hdfs)
luigi.build([job], local_scheduler=True)
c = []
for line in job.output().open('r'):
c.append(line)
# Make sure unicode('test') isnt grouped with str('test')
# Since this is what happens when running on cluster
test_case.assertEqual(len(c), 2)
test_case.assertEqual(c[0], "test\t2\n")
@staticmethod
def test_use_json_as_data_interchange_format_job(test_case):
job = UseJsonAsDataInteterchangeFormatJob(use_hdfs=test_case.use_hdfs)
luigi.build([job], local_scheduler=True)
c = []
for line in job.output().open('r'):
c.append(line)
test_case.assertEqual(c, ['{"data type": "json"}\n'])
@staticmethod
def test_failing_job(test_case):
job = FailingJob(use_hdfs=test_case.use_hdfs)
success = luigi.build([job], local_scheduler=True)
test_case.assertFalse(success)
@attr('apache')
class MapreduceLocalTest(unittest.TestCase):
use_hdfs = False
def run_and_check(self, args):
run_exit_status = luigi.run(['--local-scheduler', '--no-lock'] + args)
return run_exit_status
def test_run(self):
CommonTests.test_run(self)
def test_run_2(self):
CommonTests.test_run_2(self)
def test_map_only(self):
CommonTests.test_map_only(self)
def test_unicode_job(self):
CommonTests.test_unicode_job(self)
def test_use_json_as_data_interchange_format_job(self):
CommonTests.test_use_json_as_data_interchange_format_job(self)
def test_failing_job(self):
CommonTests.test_failing_job(self)
def test_instantiate_job(self):
# See https://github.com/spotify/luigi/issues/738
MyStreamingJob('param_value')
def test_cmd_line(self):
class DummyHadoopTask(luigi.contrib.hadoop.JobTask):
param = luigi.Parameter()
def run(self):
if 'mypool' not in ''.join(self.jobconfs()):
raise ValueError("noooooo")
self.assertTrue(self.run_and_check(['DummyHadoopTask', '--param', 'myparam', '--pool', 'mypool']))
self.assertTrue(self.run_and_check(['DummyHadoopTask', '--param', 'myparam', '--hadoop-pool', 'mypool']))
def setUp(self):
MockTarget.fs.clear()
@attr('minicluster')
class MapreduceIntegrationTest(minicluster.MiniClusterTestCase):
""" Uses the Minicluster functionality to test this against Hadoop """
use_hdfs = True
def test_run(self):
CommonTests.test_run(self)
def test_run_2(self):
CommonTests.test_run_2(self)
def test_map_only(self):
CommonTests.test_map_only(self)
# TODO(erikbern): some really annoying issue with minicluster causes
# test_unicode_job to hang
def test_failing_job(self):
CommonTests.test_failing_job(self)
@attr('apache')
class CreatePackagesArchive(unittest.TestCase):
def setUp(self):
sys.path.append(os.path.join('test', 'create_packages_archive_root'))
def tearDown(self):
sys.path.remove(os.path.join('test', 'create_packages_archive_root'))
def _assert_module(self, add):
add.assert_called_once_with('test/create_packages_archive_root/module.py',
'module.py')
def _assert_package(self, add):
add.assert_any_call('test/create_packages_archive_root/package/__init__.py', 'package/__init__.py')
add.assert_any_call('test/create_packages_archive_root/package/submodule.py', 'package/submodule.py')
add.assert_any_call('test/create_packages_archive_root/package/submodule_with_absolute_import.py', 'package/submodule_with_absolute_import.py')
add.assert_any_call('test/create_packages_archive_root/package/submodule_without_imports.py', 'package/submodule_without_imports.py')
add.assert_any_call('test/create_packages_archive_root/package/subpackage/__init__.py', 'package/subpackage/__init__.py')
add.assert_any_call('test/create_packages_archive_root/package/subpackage/submodule.py', 'package/subpackage/submodule.py')
add.assert_any_call('test/create_packages_archive_root/package.egg-info/top_level.txt', 'package.egg-info/top_level.txt')
assert add.call_count == 7
def _assert_package_subpackage(self, add):
add.assert_any_call('test/create_packages_archive_root/package/__init__.py', 'package/__init__.py')
add.assert_any_call('test/create_packages_archive_root/package/subpackage/__init__.py', 'package/subpackage/__init__.py')
add.assert_any_call('test/create_packages_archive_root/package/subpackage/submodule.py', 'package/subpackage/submodule.py')
assert add.call_count == 3
@mock.patch('tarfile.open')
def test_create_packages_archive_module(self, tar):
module = __import__("module", None, None, 'dummy')
luigi.contrib.hadoop.create_packages_archive([module], '/dev/null')
self._assert_module(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package(self, tar):
package = __import__("package", None, None, 'dummy')
luigi.contrib.hadoop.create_packages_archive([package], '/dev/null')
self._assert_package(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package_submodule(self, tar):
package_submodule = __import__("package.submodule", None, None, 'dummy')
luigi.contrib.hadoop.create_packages_archive([package_submodule], '/dev/null')
self._assert_package(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package_submodule_with_absolute_import(self, tar):
package_submodule_with_absolute_import = __import__("package.submodule_with_absolute_import", None, None, 'dummy')
luigi.contrib.hadoop.create_packages_archive([package_submodule_with_absolute_import], '/dev/null')
self._assert_package(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package_submodule_without_imports(self, tar):
package_submodule_without_imports = __import__("package.submodule_without_imports", None, None, 'dummy')
luigi.contrib.hadoop.create_packages_archive([package_submodule_without_imports], '/dev/null')
self._assert_package(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package_subpackage(self, tar):
package_subpackage = __import__("package.subpackage", None, None, 'dummy')
luigi.contrib.hadoop.create_packages_archive([package_subpackage], '/dev/null')
self._assert_package_subpackage(tar.return_value.add)
@mock.patch('tarfile.open')
def test_create_packages_archive_package_subpackage_submodule(self, tar):
package_subpackage_submodule = __import__("package.subpackage.submodule", None, None, 'dummy')
luigi.contrib.hadoop.create_packages_archive([package_subpackage_submodule], '/dev/null')
self._assert_package_subpackage(tar.return_value.add)
class MockProcess(object):
def __init__(self, err_lines, returncode):
err = ''.join(err_lines)
self.__err_len = len(err)
self.stderr = StringIO(err)
self.__rc = returncode
self.returncode = None
def poll(self):
if self.stderr.tell() == self.__err_len:
self.returncode = self.__rc
return self.returncode
class KeyboardInterruptedMockProcess(MockProcess):
def __init__(self, err_lines):
super(KeyboardInterruptedMockProcess, self).__init__(err_lines, 0)
def poll(self):
if super(KeyboardInterruptedMockProcess, self).poll() is not None:
raise KeyboardInterrupt
@attr('apache')
class JobRunnerTest(unittest.TestCase):
def setUp(self):
self.tracking_urls = []
def track(self, url):
self.tracking_urls.append(url)
def _run_and_track(self, err_lines, returncode):
with mock.patch('luigi.contrib.hadoop.subprocess') as subprocess:
subprocess.Popen.return_value = MockProcess(err_lines, returncode)
_, err = luigi.contrib.hadoop.run_and_track_hadoop_job([], self.track)
self.assertEqual(err, ''.join(err_lines))
def test_tracking_url_yarn(self):
url = 'http://example.jobtracker.com:8080/proxy/application_1234_5678/'
yarn_lines = [
"INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1234_5678\n",
"INFO impl.YarnClientImpl: Submitted application application_1234_5678\n",
"INFO mapreduce.Job: The url to track the job: %s\n" % url,
"INFO mapreduce.Job: Running job: job_1234_5678\n",
"INFO mapreduce.Job: Job job_1234_5678 running in uber mode : false\n",
"INFO mapreduce.Job: Job job_1234_5678 completed successfully\n",
]
self._run_and_track(yarn_lines, 0)
self.assertEqual([url], self.tracking_urls)
def test_tracking_url_old_version(self):
url = 'http://tracker.com/1234_5678'
err_lines = [
'INFO tracking url: %s\n' % url,
]
self._run_and_track(err_lines, 0)
self.assertEqual([url], self.tracking_urls)
def test_multiple_tracking_urls(self):
urls = [
'http://tracking/1',
'http://tracking/2',
'http://tracking/3',
]
err_lines = [
'running...\n',
'The url to track the job: %s\n' % urls[0],
'done\n',
'running another stage...\n',
'The url to track the job: %s\n' % urls[1],
'done\n',
'running another stage...\n',
'The url to track the job: %s\n' % urls[2],
'done\n',
]
self._run_and_track(err_lines, 0)
self.assertEqual(urls, self.tracking_urls)
def test_tracking_url_captured_on_fail(self):
url = 'http://tracking/'
err_lines = [
'The url to track the job: %s\n' % url,
]
with self.assertRaises(luigi.contrib.hadoop.HadoopJobError):
self._run_and_track(err_lines, 1)
self.assertEqual([url], self.tracking_urls)
def _run_and_track_with_interrupt(self, err_lines):
proc = KeyboardInterruptedMockProcess(err_lines)
with mock.patch('luigi.contrib.hadoop.subprocess') as subprocess:
subprocess.Popen.return_value = proc
with self.assertRaises(KeyboardInterrupt):
luigi.contrib.hadoop.run_and_track_hadoop_job([], proc)
return subprocess
def test_kill_job_on_interrupt(self):
job_id = 'job_1234_5678'
err_lines = [
'FlowStep: [SomeJob()] submitted hadoop job: %s\n' % job_id,
'some other line\n',
]
subprocess = self._run_and_track_with_interrupt(err_lines)
subprocess.call.assert_called_once_with(['mapred', 'job', '-kill', job_id])
def test_kill_last_mapreduce_on_interrupt(self):
job_id = 'job_1234_5678'
err_lines = [
'FlowStep: [SomeJob()] submitted hadoop job: job_0000_0000\n',
'FlowStep: [SomeJob()] submitted hadoop job: %s\n' % job_id,
'some other line\n',
]
subprocess = self._run_and_track_with_interrupt(err_lines)
subprocess.call.assert_called_once_with(['mapred', 'job', '-kill', job_id])
def test_kill_application_on_interrupt(self):
application_id = 'application_1234_5678'
err_lines = [
'YarnClientImpl: Submitted application %s\n' % application_id,
'FlowStep: [SomeJob()] submitted hadoop job: job_1234_5678\n',
]
subprocess = self._run_and_track_with_interrupt(err_lines)
subprocess.call.assert_called_once_with(['yarn', 'application', '-kill', application_id])
def test_kill_last_application_on_interrupt(self):
application_id = 'application_1234_5678'
err_lines = [
'YarnClientImpl: Submitted application application_0000_0000\n',
'FlowStep: [SomeJob()] submitted hadoop job: job_0000_0000\n',
'YarnClientImpl: Submitted application %s\n' % application_id,
'FlowStep: [SomeJob()] submitted hadoop job: job_1234_5678\n',
]
subprocess = self._run_and_track_with_interrupt(err_lines)
subprocess.call.assert_called_once_with(['yarn', 'application', '-kill', application_id])
| adaitche/luigi | test/contrib/hadoop_test.py | Python | apache-2.0 | 18,230 |
"""
The Chute Builder interprets the chute specification, which comes from a
paradrop.yaml file or from a JSON object via a cloud update. The Chute
Builder then produces a valid Chute ojbect with one or more Service
objects.
There are different versions of the chute specification in code
repositories, and so the intent is to encapsulate this complexity within
the ChuteBuilder implementation. Other code modules, especially the
execution plan pipeline, should be able to rely on a relatively stable
Chute and Service object interface.
+----------------------------+ +---------------+
| | interpret | |
| Chute Specification | <-----------------+ Chute Builder |
| serialized as YAML or JSON | | |
| in a variety of formats | ++-------------++
| | | |
+----------------------------+ | construct |
| |
v |
|
+----------------+ +-------+ |
| | | | |
| Execution Plan | | Chute | v
| -------------- +-----------------------> | | has 1+
| function 1 | +-------o---+---------+
| ... | consume | |
| function N | | Service |
| +-----------------------------------> | |
+----------------+ +---------+
"""
import six
from .chute import Chute
from .service import Service
WIRELESS_OPTIONS = set([
"ssid",
"key",
"nasid",
"acct_server",
"acct_secret",
"acct_interval",
"hidden",
"isolate",
"maxassoc"
])
def fix_interface_type(interface):
orig_type = interface.get("type")
orig_mode = interface.get("mode", "ap")
if orig_type == "wifi":
return "wifi-{}".format(orig_mode)
else:
return orig_type
def fix_wireless_options(interface):
wireless = interface.get("wireless", {})
for key, value in six.iteritems(interface):
if key in WIRELESS_OPTIONS:
wireless[key] = value
for key, value in six.iteritems(interface.get("options", {})):
if key in WIRELESS_OPTIONS:
wireless[key] = value
return wireless
class ChuteBuilder(object):
"""
Build a composite chute object from a chute specification.
Implementations of ChuteBuilder are responsible for interpreting
the chute specification, which comes from a paradrop.yaml file or
JSON object via cloud update. The ChuteBuilder then produces a valid
Chute object with one or more Service objects.
"""
def configure_chute(self, spec):
for field in ["name", "version", "description"]:
value = spec.get(field, "unknown")
setattr(self.chute, field, value)
def create_chute(self, spec):
return NotImplemented
def create_services(self, spec):
return NotImplemented
def get_chute(self):
return self.chute
class SingleServiceChuteBuilder(ChuteBuilder):
"""
Build a pre-0.12 single-service chute.
** Example configuration**:
.. sourcecode:: yaml
name: seccam
description: A Paradrop chute that performs motion detection using a simple WiFi camera.
version: 1
net:
wifi:
type: wifi
intfName: wlan0
dhcp:
start: 4
limit: 250
lease: 12h
ssid: seccam42
key: paradropseccam
options:
isolate: true
maxassoc: 100
web:
port: 5000
"""
# Fields that should be present in updates but not chute objects.
UpdateSpecificArgs = ['deferred']
def create_chute(self, spec):
self.chute = Chute()
self.chute.name = spec.get("name")
self.chute.description = spec.get("description", None)
self.chute.environment = spec.get("environment", {})
self.chute.owner = spec.get("owner", None)
self.chute.version = spec.get("version", None)
config = spec.get("config", {})
self.chute.config = config
self.chute.web = config.get("web", {})
# Old chutes did not specify the web service because it was implied.
if "port" in self.chute.web:
self.chute.web['service'] = "main"
def create_services(self, spec):
service = Service(chute=self.chute, name="main")
service.command = spec.get("command", None)
service.image = spec.get("use", None)
service.source = "."
service.type = spec.get("type", "normal")
config = spec.get("config", {})
# Covert old-style structure with arbitrary names to new-style that is
# indexed by interface name.
interfaces = {}
for iface in config.get("net", {}).values():
iface['type'] = fix_interface_type(iface)
if iface['type'].startswith("wifi"):
iface['wireless'] = fix_wireless_options(iface)
interfaces[iface['intfName']] = iface
requests = {}
requests['as-root'] = spec.get("as_root", False)
try:
requests['port-bindings'] = config['host_config']['port_bindings']
except KeyError:
requests['port-bindings'] = {}
# Find extra build options, particularly for light chutes.
build = {}
for key in ["image_source", "image_version", "packages"]:
if key in config:
build[key] = config[key]
service.build = build
service.environment = config.get("environment", {})
service.interfaces = interfaces
service.requests = requests
self.chute.add_service(service)
class MultiServiceChuteBuilder(ChuteBuilder):
"""
Build a modern multi-service chute.
** Example configuration**:
.. sourcecode:: yaml
name: seccam
description: A Paradrop chute that performs motion detection using a simple WiFi camera.
version: 1
services:
main:
type: light
source: .
image: python2
command: python -u seccam.py
environment:
IMAGE_INTERVAL: 2.0
MOTION_THRESHOLD: 40.0
SECCAM_MODE: detect
interfaces:
wlan0:
type: wifi-ap
dhcp:
leasetime: 12h
limit: 250
start: 4
wireless:
ssid: seccam42
key: paradropseccam
hidden: false
isolate: true
requirements:
hwmode: 11g
requests:
as-root: true
port-bindings:
- external: 81
internal: 81
db:
type: image
image: mongo:3.0
web:
service: main
port: 5000
"""
def create_chute(self, spec):
self.chute = Chute({})
for field in ["name", "version", "description", "owner"]:
value = spec.get(field, "unknown")
setattr(self.chute, field, value)
self.chute.environment = spec.get("environment", {})
self.chute.web = spec.get("web", {})
def create_services(self, spec):
for name, spec in six.iteritems(spec.get("services", {})):
service = Service(chute=self.chute, name=name)
service.command = spec.get("command", None)
service.image = spec.get("image", None)
service.source = spec.get("source", ".")
service.type = spec.get("type", "normal")
service.build = spec.get("build", {})
service.environment = spec.get("environment", {})
service.interfaces = spec.get("interfaces", {})
service.requests = spec.get("requests", {})
# Make sure the intfName field exists for code that depends on it.
for name, iface in six.iteritems(service.interfaces):
iface['intfName'] = name
self.chute.add_service(service)
def build_chute(spec):
if 'services' in spec:
builder = MultiServiceChuteBuilder()
else:
builder = SingleServiceChuteBuilder()
builder.create_chute(spec)
builder.create_services(spec)
return builder.get_chute()
def rebuild_chute(spec, updates):
for field in ["name", "version", "description", "owner", "environment", "web"]:
if field in updates:
spec[field] = updates[field]
return build_chute(spec)
| ParadropLabs/Paradrop | paradrop/daemon/paradrop/core/chute/builder.py | Python | apache-2.0 | 8,996 |
import pika
import yaml
import threading
import sys
# The topic to which service instantiation requests
# of the GK are published
SERVICE_CREATE_TOPIC = 'service.instances.create'
# The topic to which available vims are published
INFRA_ADAPTOR_AVAILABLE_VIMS = 'infrastructure.management.compute.list'
# The topic to which service instance deploy replies of the Infrastructure Adaptor are published
INFRA_ADAPTOR_INSTANCE_DEPLOY_REPLY_TOPIC = "infrastructure.service.deploy"
# This method reads the VNFDs and NSD from file and returns a dictionary containing them. The network service
# consists of three network functions: firewall, iperf and tcpdump.
def buildRequest():
path_descriptors = 'resources/test_descriptors/'
nsd_descriptor = open(path_descriptors + 'sonata-demo.yml','r')
vnfd1_descriptor = open(path_descriptors + 'firewall-vnfd.yml','r')
vnfd2_descriptor = open(path_descriptors + 'iperf-vnfd.yml','r')
vnfd3_descriptor = open(path_descriptors + 'tcpdump-vnfd.yml','r')
service_request = {}
service_request['NSD'] = yaml.load(nsd_descriptor)
service_request['VNFD1'] = yaml.load(vnfd1_descriptor)
service_request['VNFD2'] = yaml.load(vnfd2_descriptor)
service_request['VNFD3'] = yaml.load(vnfd3_descriptor)
return yaml.dump(service_request)
# Methiod simulating the GK. It creates a RabbitMQ connection and sends the GK request under the
# 'service.instances.create' topic. After that, it waits for replies with same topic, consuming the
# channels queue.
def send_gk_request():
request = buildRequest()
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='son-kernel',
type='topic')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='son-kernel',
queue=queue_name,
routing_key=SERVICE_CREATE_TOPIC)
channel.basic_consume(gk_receive,
queue=queue_name,
no_ack=False)
channel.basic_publish(exchange='son-kernel', routing_key=SERVICE_CREATE_TOPIC, body=request, properties=pika.BasicProperties(reply_to=queue_name, correlation_id='123213'))
channel.start_consuming()
# Callback method of the GK called when a message with 'service.instances.create' topic. It waits until the response either:
# - indicates that the network service is deployed: it finishes the script.
# - indicates that there was an error deploying the network service: it finishes the script with an error.
def gk_receive(ch, method, properties, body):
message = body.decode("utf-8")
response = yaml.load(message)
if 'error' in response and 'status' in response and response['status'] == 'READY':
print ("Test finished.")
sys.exit(0)
elif 'error' in response and response['error'] is not None:
print (response)
print ("Test failed.")
sys.exit(-1)
# Method simulating the IA. It creates a RabbitMQ connection and waits for requests with
# 'infrastructure.management.compute.list'
def listen_resource_availability():
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='son-kernel',
type='topic')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='son-kernel',
queue=queue_name,
routing_key=INFRA_ADAPTOR_AVAILABLE_VIMS)
channel.basic_consume(available_vims,
queue=queue_name,
no_ack=True)
channel.start_consuming()
# Method simulating the IA. It creates a RabbitMQ connection and waits for requests with
# 'infrastructure.service.deploy' topic.
def listen_deploy_ns():
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='son-kernel',
type='topic')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='son-kernel',
queue=queue_name,
routing_key=INFRA_ADAPTOR_INSTANCE_DEPLOY_REPLY_TOPIC)
channel.basic_consume(deployment_request_received,
queue=queue_name,
no_ack=True)
channel.start_consuming()
# Main method. Creates 3 threads:
# - 1 thread for simulating the GK, which does the deployment request and waits for a response.
# - 1 therad for IA, waiting for requests for compute resoources availabilty
# - 1 thread for IA, waiting for requests for service deployment.
def main():
t = threading.Thread(target=listen_resource_availability)
t.daemon = True
t.start()
t2 = threading.Thread(target=listen_deploy_ns)
t2.daemon = True
t2.start()
t_gk = threading.Thread(target=send_gk_request)
t_gk.daemon = True
t_gk.start()
t_gk.join()
# Callback method called when a message with 'infrastructure.management.compute.list' topic is sent.
# It reads from file the list of available vims (which are meaningless, it's only for testing) and
# returns them through RabbitMQ with the same topic. This response should be read by SLM.
def available_vims(ch, method, properties, body):
message = body.decode("utf-8")
if message == '{}':
vim_file = open('resources/vims.yml','r')
dict = yaml.load(vim_file)
ch.basic_publish(exchange='son-kernel', routing_key=INFRA_ADAPTOR_AVAILABLE_VIMS, body=yaml.dump(dict), properties=pika.BasicProperties(correlation_id=properties.correlation_id))
# Callback method called when a message with 'infrastructure.service.deploy' topic is sent.
# It reads the nsr and vnfrs whoch the IA should build after deploying the service, and send them
# through the RabbitMQ connection with the same topic. This response should be read by SLM.
def deployment_request_received(ch, method, properties, body):
msg = yaml.load(body)
if 'request_status' not in msg:
ia_nsr = yaml.load(open('resources/test_records/ia-nsr.yml','r'))
#print ("Sending message: " + yaml.dump(ia_nsr))
ch.basic_publish(exchange='son-kernel', routing_key=INFRA_ADAPTOR_AVAILABLE_VIMS, body=yaml.dump(ia_nsr), properties=pika.BasicProperties(correlation_id=properties.correlation_id))
if __name__ == '__main__':
main()
| sonata-nfv/son-tests | int-slm-repositories/scripts/gk_ia_simulator.py | Python | apache-2.0 | 6,651 |
#
# Copyright 2001 - 2016 Ludek Smid [http://www.ospace.net/]
#
# This file is part of Outer Space.
#
# Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import ige
import ige.Authentication
from ige.IMarshal import IMarshal, IPacket
from ige import ServerStatusException, log
import httplib, urllib
import exceptions
import time
from binascii import hexlify
import threading
from ige.Const import OID_ADMIN
MSG_CMD_BEGIN = -1000
MSG_CMD_END = -1001
class IClientException(Exception):
pass
class IClient:
def __init__(self, server, proxy, msgHandler, idleHandler, clientIdent, keepAliveTime = 180):
self.clientIdent = clientIdent
self.gameID = None
self.server = server
self.logged = 0
self.sid = None
self.httpConn = None
self.keepAliveTime = keepAliveTime
self.proxy = proxy
self.msgHandler = msgHandler
self.idleHandler = idleHandler
self.lastCommand = time.time()
self.hostID = 'FILLMEWITHREALVALUE'
self.statsBytesIn = 0
self.statsBytesOut = 0
self.lastClientVersion = None
def connect(self):
# to enable sending commands
self.connected = 1
# create connection
log.debug('Connecting to the server', self.server)
# send hello message
log.debug('Sending hello')
try:
self.sid, self.challenge = self.hello(self.clientIdent)
except:
log.warning('Cannot connect to the server.')
self.connected = 0
raise IClientException('Cannot connect to the server.')
log.debug(self.sid, self.challenge)
def login(self, gameID, login, password):
self.gameID = gameID.encode("ascii")
# hash password with challenge
passwd = ige.Authentication.encode(password, self.challenge)
#@log.debug(login, password, passwd)
try:
IProxy('login', None, self)(login, passwd, self.hostID)
except:
log.warning('login failed')
return 0
log.debug('login succeeded')
self.logged = 1
return 1
def logout(self):
self.logged = 0
return IProxy('logout', None, self)()
def shutdown(self):
self.logged = 0
return IProxy('shutdown', None, self)()
def hello(self, clientID):
return IProxy('hello', None, self)(clientID)
def getVersion(self):
return IProxy('getVersion', None, self)()
def getAccountData(self):
return IProxy('getAccountData', None, self)()
def getRegisteredGames(self):
return IProxy('getRegisteredGames', None, self)()
def cleanupSessions(self):
return IProxy('cleanupSessions', None, self)()
def reloadAccounts(self):
return IProxy('reloadAccounts', None, self)()
def createAccount(self, login, password, nick, email):
safePassword = ige.Authentication.encode(password, self.challenge)
return IProxy('createAccount', None, self)(login, safePassword, nick, email)
def exportAccounts(self):
return IProxy('exportAccounts', None, self)()
def changePassword(self, old, new):
safeOld = ige.Authentication.encode(old, self.challenge)
safeNew = ige.Authentication.encode(new, self.challenge)
return IProxy('changePassword', None, self)(safeOld, safeNew)
def getBookingAnswers(self):
return IProxy('getBookingAnswers', None, self)()
def getBookingOffers(self):
return IProxy('getBookingOffers', None, self)()
def toggleBooking(self, bookID, password):
return IProxy('toggleBooking', None, self)(bookID, password)
def createPrivateBooking(self, bookID, password):
return IProxy('createPrivateBooking', None, self)(bookID, password)
def deletePrivateBooking(self, bookID):
return IProxy('deletePrivateBooking', None, self)(bookID)
def selectPlayer(self, playerID):
return IProxy('%s.selectPlayer' % self.gameID, None, self)(playerID)
def selectAdmin(self):
return IProxy('%s.selectPlayer' % self.gameID, None, self)(OID_ADMIN)
def createNewPlayer(self, galaxyID):
return IProxy('%s.createNewPlayer' % self.gameID, None, self)(galaxyID)
def takeOverAIPlayer(self, playerID):
return IProxy('%s.takeOverAIPlayer' % self.gameID, None, self)(playerID)
def takeOverPirate(self, playerID, password):
return IProxy('%s.takeOverPirate' % self.gameID, None, self)(playerID, password)
def getActivePositions(self):
return IProxy('%s.getActivePositions' % self.gameID, None, self)()
def getStartingPositions(self):
return IProxy('%s.getStartingPositions' % self.gameID, None, self)()
def processTurn(self):
return IProxy('%s.processTurn' % self.gameID, None, self)()
def processTurns(self, turns):
return IProxy('%s.processTurn' % self.gameID, None, self)(turns)
def backup(self, basename):
return IProxy('%s.backup' % self.gameID, None, self)(basename)
def commitDatabases(self):
return IProxy('%s.commitDatabases' % self.gameID, None, self)()
def getTurnData(self):
return IProxy('%s.getTurnData' % self.gameID, None, self)()
def turnFinished(self):
return IProxy('%s.turnFinished' % self.gameID, None, self)()
def doKeepAlive(self):
return ((time.time() - self.lastCommand) > self.keepAliveTime) and self.logged
def keepAlive(self):
return IProxy('ping', None, self)()
def __nonzero__(self):
return 1
def __getattr__(self, name):
if self.gameID:
return IProxy('%s.execute' % self.gameID, name, self)
else:
return IProxy(name, None, self)
class ProtocolException(Exception):
def __init__(self, url, errcode, errmsg, headers):
self.url = url
self.errcode = errcode
self.errmsg = errmsg
self.headers = headers
def __repr__(self):
return "<ProtocolError for %s: %s %s>" % (self.url, self.errcode, self.errmsg)
def __str__(self):
return '%s %s' % (self.errcode, self.errmsg)
class IProxy:
def __init__(self, method, command, client):
self.client = client
self.method = method
self.command = command
self.marshal = IMarshal()
def __call__(self, *args):
if self.client.msgHandler:
self.client.msgHandler(MSG_CMD_BEGIN, None)
# retry 'turn in progress' and server restart situations
retries = 10
ok = 0
while retries > 0:
try:
result = self.processCall(args)
ok = 1
break
except ServerStatusException, e:
log.warning("Cannot complete request - retrying...")
retries -= 1
time.sleep(1)
# this was commented out
except Exception, e:
log.warning("Cannot complete request")
if self.client.msgHandler:
self.client.msgHandler(MSG_CMD_END, None)
raise e
if self.client.msgHandler:
self.client.msgHandler(MSG_CMD_END, None)
if ok:
return result
else:
raise IClientException('Cannot send request to the server')
def processCall(self, args):
if not self.client.connected:
raise IClientException('Not connected.')
# record time of command
self.client.lastCommand = time.time()
# packet init
packet = IPacket()
packet.sid = self.client.sid
packet.method = self.method
if self.command:
packet.params = [self.command]
packet.params.extend(args)
else:
packet.params = args
log.debug('calling', packet.method, packet.params)
# encode
# V11
# data = self.marshal.encode(packet).encode('utf-8')
data = self.marshal.encode(packet)
self.client.statsBytesOut += len(data)
#@log.debug('->', data)
# make call
# init connection
if self.client.proxy:
# use urllib
if not self.client.httpConn:
log.debug('Using proxy', self.client.proxy)
self.client.httpConn = urllib.FancyURLopener({'http': self.client.proxy})
else:
if self.client.httpConn:
h = self.client.httpConn
else:
h = httplib.HTTPConnection(self.client.server)
self.client.httpConn = h
try:
if self.client.proxy:
fh = self.client.httpConn.open('http://%s/IGERPC' % self.client.server, data)
# use thread to read response and invoke idle handler
# regularly
reader = Reader(fh.read)
reader.start()
while reader.isAlive():
reader.join(0.1)
if self.client.idleHandler:
self.client.idleHandler()
if reader.exception:
raise reader.exception
else:
rspData = reader.result
# end of thread dispatcher
fh.close()
else:
h.putrequest('POST', '/IGERPC')
# required by HTTP/1.1
h.putheader('Host', self.client.server)
# required by IGE-RPC
h.putheader("Content-Type", "text/plain")
h.putheader("Content-Length", str(len(data)))
h.endheaders()
h.send(data)
# use thread to read response and invoke idle handler
# regularly
reader = Reader(h.getresponse)
reader.start()
while reader.isAlive():
reader.join(0.1)
if self.client.idleHandler:
self.client.idleHandler()
if reader.exception:
raise reader.exception
else:
response = reader.result
# end of thread dispatcher
if response.status != 200:
raise ProtocolException(self.client.server + '/IGERPC',
response.status, response.reason, response.msg)
# use thread to read response and invoke idle handler
# regularly
reader = Reader(response.read)
reader.start()
while reader.isAlive():
reader.join(0.1)
if self.client.idleHandler:
self.client.idleHandler()
if reader.exception:
raise reader.exception
else:
rspData = reader.result
# end of thread dispatcher
except Exception, e:
log.warning('Cannot send request to the server')
self.client.logged = 0
self.client.connected = 0
raise IClientException('Cannot send request to the server')
#@log.debug('<-', rspData)
# V11
# packet = self.marshal.decode(unicode(rspData, "utf-8"))
self.client.statsBytesIn += len(rspData)
packet = self.marshal.decode(rspData)
if packet.exception:
#@log.debug('raising exception', packet.exception)
exception = eval('%s()' % packet.exception[0])
exception.args = packet.exception[1]
raise exception
# process messages in packet.messages
if self.client.msgHandler and packet.messages:
for message in packet.messages:
log.debug('got message', message)
self.client.msgHandler(*message)
elif packet.messages:
log.debug('throwing away messages', packet.messages)
log.debug("Stats: %d B IN / %d B OUT" % (self.client.statsBytesIn, self.client.statsBytesOut))
return packet.result
class Reader(threading.Thread):
def __init__(self, callable):
threading.Thread.__init__(self)
self.callable = callable
self.result = None
self.exception = None
def run(self):
try:
self.result = self.callable()
except Exception, e:
self.exception = e
| ospaceteam/outerspace | server/lib/igeclient/IClient.py | Python | gpl-2.0 | 13,106 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import glob
import sys
import tarfile
import zipfile
import funciones
ruta = funciones.CreaDirectorioTrabajo()
def main():
FicheroZip = '%s\\github.zip'%ruta
funciones.BoraFichero(FicheroZip)
zf = zipfile.ZipFile(FicheroZip, mode='a')
lista = glob.glob("%s\\*.github"%ruta)
try:
for i in lista:
#zf.write(i) # ruta ichero y nombre con el que se guarda, este ultimo solo cojo el nombre, no los directorios
zf.write(i,i.split('\\')[-1], compress_type=zipfile.ZIP_DEFLATED) # ruta ichero y nombre con el que se guarda, este ultimo solo cojo el nombre, no los directorios
finally:
print('finalmente')
zf.close()
return FicheroZip
def ComprimeTar():
FicheroTar = '%s\\github.tar.bz2'%ruta
funciones.BoraFichero(FicheroTar)
tar = tarfile.open(FicheroTar, 'w')
#tar = tarfile.open(FicheroTar, 'w:bz2')
lista = glob.glob("%s\\*.github"%ruta)
try:
for i in lista:
#tar.write(i) # ruta ichero y nombre con el que se guarda, este ultimo solo cojo el nombre, no los directorios
tar.add(i, i.split('\\')[-1]) # ruta ichero y nombre con el que se guarda, este ultimo solo cojo el nombre, no los directorios
finally:
#print 'finalmente'
tar.close()
return FicheroTar
#ComprimeTar()
| procamora/Testeador-de-Red | Compresion.py | Python | gpl-3.0 | 1,264 |
'''
* @author [Zizhao Zhang]
* @email [zizhao@cise.ufl.edu]
* @create date 2017-05-25 02:20:32
* @modify date 2017-05-25 02:20:32
* @desc [description]
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import os, sys
import numpy as np
import scipy.misc as misc
from model import UNet
from utils import dice_coef, dice_coef_loss
from loader import dataLoader, deprocess
from PIL import Image
from utils import VIS, mean_IU
# configure args
from opts import *
from opts import dataset_mean, dataset_std # set them in opts
vis = VIS(save_path=opt.load_from_checkpoint)
# configuration session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# define data loader
img_shape = [opt.imSize, opt.imSize]
test_generator, test_samples = dataLoader(opt.data_path+'/val/', 1, img_shape, train_mode=False)
# define model, the last dimension is the channel
label = tf.placeholder(tf.int32, shape=[None]+img_shape)
with tf.name_scope('unet'):
model = UNet().create_model(img_shape=img_shape+[3], num_class=opt.num_class)
img = model.input
pred = model.output
# define loss
with tf.name_scope('cross_entropy'):
cross_entropy_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=pred))
saver = tf.train.Saver() # must be added in the end
''' Main '''
init_op = tf.global_variables_initializer()
sess.run(init_op)
with sess.as_default():
# restore from a checkpoint if exists
try:
saver.restore(sess, opt.load_from_checkpoint)
print ('--> load from checkpoint '+opt.load_from_checkpoint)
except:
print ('unable to load checkpoint ...')
sys.exit(0)
dice_score = 0
for it in range(0, test_samples):
x_batch, y_batch = next(test_generator)
# tensorflow wants a different tensor order
feed_dict = {
img: x_batch,
label: y_batch
}
loss, pred_logits = sess.run([cross_entropy_loss, pred], feed_dict=feed_dict)
pred_map = np.argmax(pred_logits[0], axis=2)
score = vis.add_sample(pred_map, y_batch[0])
im, gt = deprocess(x_batch[0], dataset_mean, dataset_std, y_batch[0])
vis.save_seg(pred_map, name='{0:}_{1:.3f}.png'.format(it, score), im=im, gt=gt)
print ('[iter %f]: loss=%f, meanIU=%f' % (it, loss, score))
vis.compute_scores() | zizhaozhang/unet-tensorflow-keras | eval.py | Python | mit | 2,521 |
# Python Calculator
print " _____ _ _ _ " #Unnecessary but cool ASCII art.
print " / ____| | | | | | | "
print "| | __ _| | ___ _ _| | __ _| |_ ___ _ __ "
print "| | / _` | |/ __| | | | |/ _` | __/ _ \| '__|"
print "| |___| (_| | | (__| |_| | | (_| | || (_) | | "
print " \_____\__,_|_|\___|\__,_|_|\__,_|\__\___/|_| "
print " Version 1.0 "
print ""
l = 0
while l == 0: #Creates a loop, so the process can repeat as long as l = 0.
print "1 - Add"
print "2 - Subtract"
print "3 - Multiply"
print "4 - Divide"
print "5 - Quit"
input = raw_input("What would you like to do? ")
if float(input) == 1: #Addition
float1 = raw_input("Enter the first number: ")
float2 = raw_input("Enter the second number: ")
float3 = float(float1) + float(float2)
print "" #Extra spacing to make the result more visible
print float3
print ""
elif float(input) == 2: #Subtraction
float1 = raw_input("Enter the first number: ")
float2 = raw_input("Enter the second number: ")
float3 = float(float1) - float(float2)
print ""
print float3
print ""
elif float(input) == 3: #Multiplication
float1 = raw_input("Enter the first number: ")
float2 = raw_input("Enter the second number: ")
float3 = float(float1) * float(float2)
print ""
print float3
print ""
elif float(input) == 4: #Division
float1 = raw_input("Enter the first number: ")
float2 = raw_input("Enter the second number: ")
float3 = float(float1) / float(float2)
print ""
print float3
print ""
else: #When anything other than 1-4 is entered.
l = 1 #Breaks the loop, causing the program to end.
| ErikB97/Simple-Calculator | calculator.py | Python | mit | 1,698 |
# Copyright 2019, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BoltOn Optimizer for Bolt-on method."""
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.compat.v1.python.ops import math_ops
from tensorflow_privacy.privacy.bolt_on.losses import StrongConvexMixin
_accepted_distributions = ['laplace'] # implemented distributions for noising
class GammaBetaDecreasingStep(
optimizer_v2.learning_rate_schedule.LearningRateSchedule):
"""Computes LR as minimum of 1/beta and 1/(gamma * step) at each step.
This is a required step for privacy guarantees.
"""
def __init__(self):
self.is_init = False
self.beta = None
self.gamma = None
def __call__(self, step):
"""Computes and returns the learning rate.
Args:
step: the current iteration number
Returns:
decayed learning rate to minimum of 1/beta and 1/(gamma * step) as per
the BoltOn privacy requirements.
"""
if not self.is_init:
raise AttributeError('Please initialize the {0} Learning Rate Scheduler.'
'This is performed automatically by using the '
'{1} as a context manager, '
'as desired'.format(self.__class__.__name__,
BoltOn.__class__.__name__))
dtype = self.beta.dtype
one = tf.constant(1, dtype)
return tf.math.minimum(
tf.math.reduce_min(one / self.beta),
one / (self.gamma * math_ops.cast(step, dtype)))
def get_config(self):
"""Return config to setup the learning rate scheduler."""
return {'beta': self.beta, 'gamma': self.gamma}
def initialize(self, beta, gamma):
"""Setups scheduler with beta and gamma values from the loss function.
Meant to be used with .fit as the loss params may depend on values passed to
fit.
Args:
beta: Smoothness value. See StrongConvexMixin
gamma: Strong Convexity parameter. See StrongConvexMixin.
"""
self.is_init = True
self.beta = beta
self.gamma = gamma
def de_initialize(self):
"""De initialize post fit, as another fit call may use other parameters."""
self.is_init = False
self.beta = None
self.gamma = None
class BoltOn(optimizer_v2.OptimizerV2):
"""Wrap another tf optimizer with BoltOn privacy protocol.
BoltOn optimizer wraps another tf optimizer to be used
as the visible optimizer to the tf model. No matter the optimizer
passed, "BoltOn" enables the bolt-on model to control the learning rate
based on the strongly convex loss.
To use the BoltOn method, you must:
1. instantiate it with an instantiated tf optimizer and StrongConvexLoss.
2. use it as a context manager around your .fit method internals.
This can be accomplished by the following:
optimizer = tf.optimizers.SGD()
loss = privacy.bolt_on.losses.StrongConvexBinaryCrossentropy()
bolton = BoltOn(optimizer, loss)
with bolton(*args) as _:
model.fit()
The args required for the context manager can be found in the __call__
method.
For more details on the strong convexity requirements, see:
Bolt-on Differential Privacy for Scalable Stochastic Gradient
Descent-based Analytics by Xi Wu et. al.
"""
def __init__(
self, # pylint: disable=super-init-not-called
optimizer,
loss,
dtype=tf.float32,
):
"""Constructor.
Args:
optimizer: Optimizer_v2 or subclass to be used as the optimizer (wrapped).
loss: StrongConvexLoss function that the model is being compiled with.
dtype: dtype
"""
if not isinstance(loss, StrongConvexMixin):
raise ValueError('loss function must be a Strongly Convex and therefore '
'extend the StrongConvexMixin.')
self._private_attributes = [
'_internal_optimizer',
'dtype',
'noise_distribution',
'epsilon',
'loss',
'class_weights',
'input_dim',
'n_samples',
'layers',
'batch_size',
'_is_init',
]
self._internal_optimizer = optimizer
self.learning_rate = GammaBetaDecreasingStep() # use the BoltOn Learning
# rate scheduler, as required for privacy guarantees. This will still need
# to get values from the loss function near the time that .fit is called
# on the model (when this optimizer will be called as a context manager)
self.dtype = dtype
self.loss = loss
self._is_init = False
def get_config(self):
"""Reroutes to _internal_optimizer. See super/_internal_optimizer."""
return self._internal_optimizer.get_config()
def project_weights_to_r(self, force=False):
"""Normalize the weights to the R-ball.
Args:
force: True to normalize regardless of previous weight values. False to
check if weights > R-ball and only normalize then.
Raises:
Exception: If not called from inside this optimizer context.
"""
if not self._is_init:
raise Exception('This method must be called from within the optimizer\'s '
'context.')
radius = self.loss.radius()
for layer in self.layers:
weight_norm = tf.norm(layer.kernel, axis=0)
if force:
layer.kernel = layer.kernel / (weight_norm / radius)
else:
layer.kernel = tf.cond(
tf.reduce_sum(tf.cast(weight_norm > radius, dtype=self.dtype)) > 0,
lambda k=layer.kernel, w=weight_norm, r=radius: k / (w / r), # pylint: disable=cell-var-from-loop
lambda k=layer.kernel: k # pylint: disable=cell-var-from-loop
)
def get_noise(self, input_dim, output_dim):
"""Sample noise to be added to weights for privacy guarantee.
Args:
input_dim: the input dimensionality for the weights
output_dim: the output dimensionality for the weights
Returns:
Noise in shape of layer's weights to be added to the weights.
Raises:
Exception: If not called from inside this optimizer's context.
"""
if not self._is_init:
raise Exception('This method must be called from within the optimizer\'s '
'context.')
loss = self.loss
distribution = self.noise_distribution.lower()
if distribution == _accepted_distributions[0]: # laplace
per_class_epsilon = self.epsilon / (output_dim)
l2_sensitivity = (2 * loss.lipchitz_constant(self.class_weights)) / (
loss.gamma() * self.n_samples * self.batch_size)
unit_vector = tf.random.normal(
shape=(input_dim, output_dim),
mean=0,
seed=1,
stddev=1.0,
dtype=self.dtype)
unit_vector = unit_vector / tf.math.sqrt(
tf.reduce_sum(tf.math.square(unit_vector), axis=0))
beta = l2_sensitivity / per_class_epsilon
alpha = input_dim # input_dim
gamma = tf.random.gamma([output_dim],
alpha,
beta=1 / beta,
seed=1,
dtype=self.dtype)
return unit_vector * gamma
raise NotImplementedError('Noise distribution: {0} is not '
'a valid distribution'.format(distribution))
def from_config(self, *args, **kwargs): # pylint: disable=arguments-differ
"""Reroutes to _internal_optimizer. See super/_internal_optimizer."""
return self._internal_optimizer.from_config(*args, **kwargs)
def __getattr__(self, name):
"""Get attr.
return _internal_optimizer off self instance, and everything else
from the _internal_optimizer instance.
Args:
name: Name of attribute to get from this or aggregate optimizer.
Returns:
attribute from BoltOn if specified to come from self, else
from _internal_optimizer.
"""
if name == '_private_attributes' or name in self._private_attributes:
return getattr(self, name)
optim = object.__getattribute__(self, '_internal_optimizer')
try:
return object.__getattribute__(optim, name)
except AttributeError as e:
raise AttributeError(
"Neither '{0}' nor '{1}' object has attribute '{2}'"
''.format(self.__class__.__name__,
self._internal_optimizer.__class__.__name__, name)) from e
def __setattr__(self, key, value):
"""Set attribute to self instance if its the internal optimizer.
Reroute everything else to the _internal_optimizer.
Args:
key: attribute name
value: attribute value
"""
if key == '_private_attributes':
object.__setattr__(self, key, value)
elif key in self._private_attributes:
object.__setattr__(self, key, value)
else:
setattr(self._internal_optimizer, key, value)
def _resource_apply_dense(self, *args, **kwargs): # pylint: disable=arguments-differ
"""Reroutes to _internal_optimizer. See super/_internal_optimizer."""
return self._internal_optimizer._resource_apply_dense(*args, **kwargs) # pylint: disable=protected-access
def _resource_apply_sparse(self, *args, **kwargs): # pylint: disable=arguments-differ
"""Reroutes to _internal_optimizer. See super/_internal_optimizer."""
return self._internal_optimizer._resource_apply_sparse(*args, **kwargs) # pylint: disable=protected-access
def get_updates(self, loss, params):
"""Reroutes to _internal_optimizer. See super/_internal_optimizer."""
out = self._internal_optimizer.get_updates(loss, params)
self.project_weights_to_r()
return out
def apply_gradients(self, *args, **kwargs): # pylint: disable=arguments-differ
"""Reroutes to _internal_optimizer. See super/_internal_optimizer."""
out = self._internal_optimizer.apply_gradients(*args, **kwargs)
self.project_weights_to_r()
return out
def minimize(self, *args, **kwargs): # pylint: disable=arguments-differ
"""Reroutes to _internal_optimizer. See super/_internal_optimizer."""
out = self._internal_optimizer.minimize(*args, **kwargs)
self.project_weights_to_r()
return out
def _compute_gradients(self, *args, **kwargs): # pylint: disable=arguments-differ,protected-access
"""Reroutes to _internal_optimizer. See super/_internal_optimizer."""
return self._internal_optimizer._compute_gradients(*args, **kwargs) # pylint: disable=protected-access
def get_gradients(self, *args, **kwargs): # pylint: disable=arguments-differ
"""Reroutes to _internal_optimizer. See super/_internal_optimizer."""
return self._internal_optimizer.get_gradients(*args, **kwargs)
def __enter__(self):
"""Context manager call at the beginning of with statement.
Returns:
self, to be used in context manager
"""
self._is_init = True
return self
def __call__(self, noise_distribution, epsilon, layers, class_weights,
n_samples, batch_size):
"""Accepts required values for bolton method from context entry point.
Stores them on the optimizer for use throughout fitting.
Args:
noise_distribution: the noise distribution to pick. see
_accepted_distributions and get_noise for possible values.
epsilon: privacy parameter. Lower gives more privacy but less utility.
layers: list of Keras/Tensorflow layers. Can be found as model.layers
class_weights: class_weights used, which may either be a scalar or 1D
tensor with dim == n_classes.
n_samples: number of rows/individual samples in the training set
batch_size: batch size used.
Returns:
self, to be used by the __enter__ method for context.
"""
if epsilon <= 0:
raise ValueError('Detected epsilon: {0}. '
'Valid range is 0 < epsilon <inf'.format(epsilon))
if noise_distribution not in _accepted_distributions:
raise ValueError('Detected noise distribution: {0} not one of: {1} valid'
'distributions'.format(noise_distribution,
_accepted_distributions))
self.noise_distribution = noise_distribution
self.learning_rate.initialize(
self.loss.beta(class_weights), self.loss.gamma())
self.epsilon = tf.constant(epsilon, dtype=self.dtype)
self.class_weights = tf.constant(class_weights, dtype=self.dtype)
self.n_samples = tf.constant(n_samples, dtype=self.dtype)
self.layers = layers
self.batch_size = tf.constant(batch_size, dtype=self.dtype)
return self
def __exit__(self, *args):
"""Exit call from with statement.
Used to:
1.reset the model and fit parameters passed to the optimizer
to enable the BoltOn Privacy guarantees. These are reset to ensure
that any future calls to fit with the same instance of the optimizer
will properly error out.
2.call post-fit methods normalizing/projecting the model weights and
adding noise to the weights.
Args:
*args: encompasses the type, value, and traceback values which are unused.
"""
self.project_weights_to_r(True)
for layer in self.layers:
input_dim = layer.kernel.shape[0]
output_dim = layer.units
noise = self.get_noise(
input_dim,
output_dim,
)
layer.kernel = tf.math.add(layer.kernel, noise)
self.noise_distribution = None
self.learning_rate.de_initialize()
self.epsilon = -1
self.batch_size = -1
self.class_weights = None
self.n_samples = None
self.input_dim = None
self.layers = None
self._is_init = False
| tensorflow/privacy | tensorflow_privacy/privacy/bolt_on/optimizers.py | Python | apache-2.0 | 14,094 |
#!/usr/bin/env python
# vim: sw=2 ts=2
import click
import os
import sys
hexboard_sizes = ['tiny', 'xsmall', 'small', 'medium', 'large', 'xlarge']
@click.command()
### Cluster options
@click.option('--cluster-id', default='demo', show_default=True,
help='Cluster identifier (used for prefixing/naming various items created in AWS')
@click.option('--num-nodes', type=click.INT, default=1, show_default=True,
help='Number of application nodes')
@click.option('--num-infra', type=click.IntRange(1,3), default=1,
show_default=True, help='Number of infrastructure nodes')
@click.option('--hexboard-size', type=click.Choice(hexboard_sizes),
help='Override Hexboard size calculation (tiny=32, xsmall=64, small=108, medium=266, large=512, xlarge=1026)',
show_default=True)
@click.option('--console-port', default='8443', type=click.IntRange(1,65535), help='OpenShift web console port',
show_default=True)
@click.option('--api-port', default='8443', type=click.IntRange(1,65535), help='OpenShift API port',
show_default=True)
@click.option('--deployment-type', default='openshift-enterprise', help='openshift deployment type',
show_default=True)
@click.option('--default-password', default='openshift3',
help='password for all users', show_default=True)
### Smoke test options
@click.option('--run-smoke-tests', is_flag=True, help='Run workshop smoke tests')
@click.option('--num-smoke-test-users', default=5, type=click.INT,
help='Number of smoke test users', show_default=True)
@click.option('--run-only-smoke-tests', is_flag=True, help='Run only the workshop smoke tests')
### AWS/EC2 options
@click.option('--region', default='us-east-1', help='ec2 region',
show_default=True)
@click.option('--ami', default='ami-10251c7a', help='ec2 ami',
show_default=True)
@click.option('--master-instance-type', default='m4.large', help='ec2 instance type',
show_default=True)
@click.option('--infra-instance-type', default='m4.large', help='ec2 instance type',
show_default=True)
@click.option('--node-instance-type', default='m4.large', help='ec2 instance type',
show_default=True)
@click.option('--keypair', default='default', help='ec2 keypair name',
show_default=True)
### DNS options
@click.option('--r53-zone', help='route53 hosted zone (must be pre-configured)')
@click.option('--app-dns-prefix', default='apps', help='application dns prefix',
show_default=True)
### Subscription and Software options
@click.option('--rhsm-user', help='Red Hat Subscription Management User')
@click.option('--rhsm-pass', help='Red Hat Subscription Management Password',
hide_input=True,)
@click.option('--skip-subscription-management', is_flag=True,
help='Skip subscription management steps')
@click.option('--use-certificate-repos', is_flag=True,
help='Uses certificate-based yum repositories for the AOS content. Requires providing paths to local certificate key and pem files.')
@click.option('--certificate-file', help='Certificate file for the yum repository',
show_default=True)
@click.option('--certificate-key', help='Certificate key for the yum repository',
show_default=True)
### Miscellaneous options
@click.option('--no-confirm', is_flag=True,
help='Skip confirmation prompt')
@click.option('--debug-playbook',
help='Specify a path to a specific playbook to debug with all vars')
@click.option('--cleanup', is_flag=True,
help='Deletes environment')
@click.help_option('--help', '-h')
@click.option('-v', '--verbose', count=True)
def launch_demo_env(num_nodes,
num_infra,
hexboard_size=None,
region=None,
ami=None,
no_confirm=False,
master_instance_type=None,
node_instance_type=None,
infra_instance_type=None,
keypair=None,
r53_zone=None,
cluster_id=None,
app_dns_prefix=None,
deployment_type=None,
console_port=443,
api_port=443,
rhsm_user=None,
rhsm_pass=None,
skip_subscription_management=False,
certificate_file=None,
certificate_key=None,
use_certificate_repos=False,
run_smoke_tests=False,
num_smoke_test_users=None,
run_only_smoke_tests=False,
default_password=None,
debug_playbook=None,
cleanup=False,
verbose=0):
# Force num_masters = 3 because of an issue with API startup and ELB health checks and more
num_masters = 3
# If not running cleanup need to prompt for the R53 zone:
if r53_zone is None:
r53_zone = click.prompt('R53 zone')
# Cannot run cleanup with no-confirm
if cleanup and no_confirm:
click.echo('Cannot use --cleanup and --no-confirm as it is not safe.')
sys.exit(1)
# If skipping subscription management, must have cert repos enabled
# If cleaning up, this is ok
if not cleanup:
if skip_subscription_management and not use_certificate_repos:
click.echo('Cannot skip subscription management without using certificate repos.')
sys.exit(1)
# If using subscription management, cannot use certificate repos
if not skip_subscription_management and use_certificate_repos:
click.echo('Must skip subscription management when using certificate repos')
sys.exit(1)
# Prompt for RHSM user and password if not skipping subscription management
if not skip_subscription_management:
# If the user already provided values, don't bother asking again
if rhsm_user is None:
rhsm_user = click.prompt("RHSM username?")
if rhsm_pass is None:
rhsm_pass = click.prompt("RHSM password?", hide_input=True, confirmation_prompt=True)
# Prompt for certificate files if using certicicate repos
if use_certificate_repos and not cleanup:
if certificate_file is None:
certificate_file = click.prompt("Certificate file absolute location? (eg: /home/user/folder/file.pem)")
if certificate_key is None:
certificate_key = click.prompt("Certificate key absolute location? (eg: /home/user/folder/file.pem)")
# Override hexboard size calculation
if hexboard_size is None:
if num_nodes <= 1:
hexboard_size = 'tiny'
elif num_nodes < 3:
hexboard_size = 'xsmall'
elif num_nodes < 5:
hexboard_size = 'small'
elif num_nodes < 9:
hexboard_size = 'medium'
elif num_nodes < 15:
hexboard_size = 'large'
else:
hexboard_size = 'xlarge'
# Calculate various DNS values
host_zone="%s.%s" % (cluster_id, r53_zone)
wildcard_zone="%s.%s.%s" % (app_dns_prefix, cluster_id, r53_zone)
# Display information to the user about their choices
click.echo('Configured values:')
click.echo('\tcluster_id: %s' % cluster_id)
click.echo('\tami: %s' % ami)
click.echo('\tregion: %s' % region)
click.echo('\tmaster instance_type: %s' % master_instance_type)
click.echo('\tnode_instance_type: %s' % node_instance_type)
click.echo('\tinfra_instance_type: %s' % infra_instance_type)
click.echo('\tkeypair: %s' % keypair)
click.echo('\tnodes: %s' % num_nodes)
click.echo('\tinfra nodes: %s' % num_infra)
click.echo('\tmasters: %s' % num_masters)
click.echo('\tconsole port: %s' % console_port)
click.echo('\tapi port: %s' % api_port)
click.echo('\tdeployment_type: %s' % deployment_type)
click.echo('\thexboard_size: %s' % hexboard_size)
click.echo('\tr53_zone: %s' % r53_zone)
click.echo('\tapp_dns_prefix: %s' % app_dns_prefix)
click.echo('\thost dns: %s' % host_zone)
click.echo('\tapps dns: %s' % wildcard_zone)
# Don't bother to display subscription manager values if we're skipping subscription management
if not skip_subscription_management:
click.echo('\trhsm_user: %s' % rhsm_user)
click.echo('\trhsm_pass: *******')
if use_certificate_repos:
click.echo('\tcertificate_file: %s' % certificate_file)
click.echo('\tcertificate_key: %s' % certificate_key)
if run_smoke_tests or run_only_smoke_tests:
click.echo('\tnum smoke users: %s' % num_smoke_test_users)
click.echo('\tdefault password: %s' % default_password)
click.echo("")
if run_only_smoke_tests:
click.echo('Only smoke tests will be run.')
if debug_playbook:
click.echo('We will debug the following playbook: %s' % (debug_playbook))
if not no_confirm and not cleanup:
click.confirm('Continue using these values?', abort=True)
# Special confirmations for cleanup
if cleanup:
click.confirm('Delete the cluster %s' % cluster_id, abort=True)
click.confirm('ARE YOU REALLY SURE YOU WANT TO DELETE THE CLUSTER %s' % cluster_id, abort=True)
click.confirm('Press enter to continue', abort=True, default=True)
if debug_playbook:
playbooks = [debug_playbook]
elif run_only_smoke_tests:
playbooks = ['playbooks/projects_setup.yml']
elif cleanup:
playbooks = ['playbooks/cleanup.yml']
else:
playbooks = ['playbooks/cloudformation_setup.yml','playbooks/openshift_setup.yml', 'playbooks/projects_setup.yml']
for playbook in playbooks:
# hide cache output unless in verbose mode
devnull='> /dev/null'
if verbose > 0:
devnull=''
# refresh the inventory cache to prevent stale hosts from
# interferring with re-running
command='inventory/aws/hosts/ec2.py --refresh-cache %s' % (devnull)
os.system(command)
# remove any cached facts to prevent stale data during a re-run
command='rm -rf .ansible/cached_facts'
os.system(command)
command='ansible-playbook -i inventory/aws/hosts -e \'cluster_id=%s \
ec2_region=%s \
ec2_image=%s \
ec2_keypair=%s \
ec2_master_instance_type=%s \
ec2_infra_instance_type=%s \
ec2_node_instance_type=%s \
r53_zone=%s \
r53_host_zone=%s \
r53_wildcard_zone=%s \
console_port=%s \
api_port=%s \
num_app_nodes=%s \
num_infra_nodes=%s \
num_masters=%s \
hexboard_size=%s \
deployment_type=%s \
rhsm_user=%s \
rhsm_pass=%s \
skip_subscription_management=%s \
use_certificate_repos=%s \
certificate_file=%s \
certificate_key=%s \
run_smoke_tests=%s \
run_only_smoke_tests=%s \
num_smoke_test_users=%s \
default_password=%s\' %s' % (cluster_id,
region,
ami,
keypair,
master_instance_type,
infra_instance_type,
node_instance_type,
r53_zone,
host_zone,
wildcard_zone,
console_port,
api_port,
num_nodes,
num_infra,
num_masters,
hexboard_size,
deployment_type,
rhsm_user,
rhsm_pass,
skip_subscription_management,
use_certificate_repos,
certificate_file,
certificate_key,
run_smoke_tests,
run_only_smoke_tests,
num_smoke_test_users,
default_password,
playbook)
if verbose > 0:
command += " -" + "".join(['v']*verbose)
click.echo('We are running: %s' % command)
status = os.system(command)
if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
return os.WEXITSTATUS(status)
# if the last run playbook didn't explode, assume cluster provisioned successfully
# but make sure that user was not just running tests or cleaning up
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == 0:
if not debug_playbook and not run_only_smoke_tests and not cleanup:
click.echo('Your cluster provisioned successfully. The console is available at https://openshift.%s:%s' % (host_zone, console_port))
click.echo('You can SSH into a master using the same SSH key with: ssh -i /path/to/key.pem openshift@openshift-master.%s' % (host_zone))
if cleanup:
click.echo('Your cluster, %s, was de-provisioned and removed successfully.' % (cluster_id))
if __name__ == '__main__':
# check for AWS access info
if os.getenv('AWS_ACCESS_KEY_ID') is None or os.getenv('AWS_SECRET_ACCESS_KEY') is None:
print 'AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY **MUST** be exported as environment variables.'
sys.exit(1)
launch_demo_env(auto_envvar_prefix='OSE_DEMO')
| thoraxe/demo-ansible | run.py | Python | apache-2.0 | 12,962 |
import os
from cffi import FFI
ffi = FFI()
ffi.cdef("""
// Original source could be found here:
// https://github.com/krb5/krb5/blob/master/src/lib/gssapi/generic/gssapi.hin
typedef uint32_t gss_uint32;
typedef gss_uint32 OM_uint32;
typedef struct gss_OID_desc_struct {
OM_uint32 length;
void *elements;
} gss_OID_desc, *gss_OID;
typedef struct gss_buffer_desc_struct {
size_t length;
void *value;
} gss_buffer_desc, *gss_buffer_t;
// TODO investigate why we can not inspect gss_name_t
struct gss_name_struct;
typedef struct gss_name_struct * gss_name_t;
//typedef struct gss_name_struct {
// size_t length;
// char *value;
// gss_OID type;
//} gss_name_desc, *gss_name_t;
struct gss_cred_id_struct;
typedef struct gss_cred_id_struct * gss_cred_id_t;
struct gss_ctx_id_struct;
typedef struct gss_ctx_id_struct * gss_ctx_id_t;
typedef struct gss_channel_bindings_struct {
OM_uint32 initiator_addrtype;
gss_buffer_desc initiator_address;
OM_uint32 acceptor_addrtype;
gss_buffer_desc acceptor_address;
gss_buffer_desc application_data;
} *gss_channel_bindings_t;
#define GSS_C_GSS_CODE ...
#define GSS_C_MECH_CODE ...
#define GSS_C_NO_NAME ...
#define GSS_C_NO_BUFFER ...
#define GSS_C_NO_OID ...
#define GSS_C_NO_OID_SET ...
#define GSS_C_NO_CONTEXT ...
#define GSS_C_NO_CREDENTIAL ...
#define GSS_C_NO_CHANNEL_BINDINGS ...
#define GSS_C_NO_OID ...
#define GSS_C_NO_CHANNEL_BINDINGS ...
#define GSS_C_NULL_OID ...
#define GSS_C_INDEFINITE ...
extern gss_OID GSS_C_NT_HOSTBASED_SERVICE;
OM_uint32
gss_import_name(
OM_uint32 *, /* minor_status */
gss_buffer_t, /* input_name_buffer */
gss_OID, /* input_name_type(used to be const) */
gss_name_t *); /* output_name */
OM_uint32
gss_init_sec_context(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* claimant_cred_handle */
gss_ctx_id_t *, /* context_handle */
gss_name_t, /* target_name */
gss_OID, /* mech_type (used to be const) */
OM_uint32, /* req_flags */
OM_uint32, /* time_req */
gss_channel_bindings_t, /* input_chan_bindings */
gss_buffer_t, /* input_token */
gss_OID *, /* actual_mech_type */
gss_buffer_t, /* output_token */
OM_uint32 *, /* ret_flags */
OM_uint32 *); /* time_rec */
OM_uint32
gss_display_status(
OM_uint32 *, /* minor_status */
OM_uint32, /* status_value */
int, /* status_type */
gss_OID, /* mech_type (used to be const) */
OM_uint32 *, /* message_context */
gss_buffer_t); /* status_string */
OM_uint32
gss_release_buffer(
OM_uint32 *, /* minor_status */
gss_buffer_t); /* buffer */
""")
C = ffi.verify(
"""
#include <gssapi/gssapi.h>
#include <gssapi/gssapi_generic.h>
#include <gssapi/gssapi_krb5.h>
""",
# include_dirs=['/usr/include/gssapi'], # This is not required
libraries=['gssapi_krb5'])
class GSSInternalError(Exception):
pass
class GSSError(Exception):
pass
class CredentialsCacheNotFound(GSSError):
pass
# TODO find better name
class ServerNotFoundInKerberosDatabase(GSSError):
pass
class KerberosServerNotFound(GSSError):
"""Usually have following message: Cannot resolve servers for KDC in realm
'SOME.REALM'"""
pass
def _gss_buffer_to_str(gss_buffer):
out_str = ffi.string(ffi.cast('char *', gss_buffer.value))
C.gss_release_buffer(ffi.new('OM_uint32 *'), gss_buffer)
return out_str
def _str_to_gss_buffer(in_str):
return ffi.new('gss_buffer_t', [len(in_str), ffi.new('char[]', in_str)])
def validate_gss_status(major_value, minor_value):
if major_value == 0:
return
minor_status_p = ffi.new('OM_uint32 *')
message_ctx_p = ffi.new('OM_uint32 *')
status_str_buf = ffi.new('gss_buffer_t')
mech_type = ffi.new('gss_OID', [C.GSS_C_NO_OID])
major_status = C.gss_display_status(
minor_status_p, major_value, C.GSS_C_GSS_CODE, mech_type,
message_ctx_p, status_str_buf)
if major_status != 0:
raise GSSInternalError(
'Failed to get GSS major display status for last API call')
major_status_str = _gss_buffer_to_str(status_str_buf)
mech_type = ffi.new('gss_OID', [C.GSS_C_NULL_OID])
major_status = C.gss_display_status(
minor_status_p, minor_value, C.GSS_C_MECH_CODE, mech_type,
message_ctx_p, status_str_buf)
if major_status != 0:
raise GSSInternalError(
'Failed to get GSS minor display status for last API call')
minor_status_str = _gss_buffer_to_str(status_str_buf)
# TODO investigate how to de-allocate memory
# TODO replace hardcoded integers into constants/flags from cffi
if major_value == 851968 and minor_value == 2529639107:
# TODO In addition to minor_value check we need to check that kerberos
# client is installed.
raise CredentialsCacheNotFound(
minor_status_str
+ '. Make sure that Kerberos Linux Client was installed. '
+ 'Run "sudo apt-get install krb5-user" for Debian/Ubuntu Linux.')
elif major_value == 851968 and minor_value == 2529638919:
raise ServerNotFoundInKerberosDatabase(minor_status_str)
elif major_value == 851968 and minor_value == 2529639132:
raise KerberosServerNotFound(
minor_status_str
+ '. Make sure that Kerberos Server is reachable over network. '
+ 'Try use ping or telnet tools in order to check that.')
else:
# __main__.GSSError: (('An unsupported mechanism was requested', 65536)
# ,('Unknown error', 0))
# __main__.GSSError: (('A required output parameter could not be
# written', 34078720), ('Unknown error', 0))
raise GSSError((major_status_str, major_value), (
minor_status_str, minor_value))
def authenticate_gss_client_init(service, principal):
if not service:
raise GSSError('Service was not provided. Please specify '
'service in "service@server-host" format')
if not principal:
raise GSSError('Principal was not provided. Please specify '
'principal in "username@realm" format')
minor_status_p = ffi.new('OM_uint32 *')
service_buf = _str_to_gss_buffer(service)
out_server_name_p = ffi.new('gss_name_t *')
major_status = C.gss_import_name(
minor_status_p, service_buf,
C.GSS_C_NT_HOSTBASED_SERVICE, # ffi.cast('gss_OID', C.GSS_C_NO_OID),
out_server_name_p)
validate_gss_status(major_status, minor_status_p[0])
# gss_flags = C.GSS_C_MUTUAL_FLAG | C.GSS_C_SEQUENCE_FLAG |
# C.GSS_C_CONF_FLAG | C.GSS_C_INTEG_FLAG
gss_flags = 0
input_token = ffi.new('gss_buffer_t')
output_token = ffi.new('gss_buffer_t')
ret_flags = ffi.new('OM_uint32 *')
major_status = C.gss_init_sec_context(
minor_status_p, ffi.NULL, ffi.cast(
'gss_ctx_id_t *', C.GSS_C_NO_CONTEXT), out_server_name_p[0],
ffi.cast('gss_OID', C.GSS_C_NO_OID),
gss_flags,
0,
# ffi.cast('gss_channel_bindings_t', C.GSS_C_NO_CHANNEL_BINDINGS),
ffi.NULL,
input_token,
# ffi.cast('gss_OID *', C.GSS_C_NO_OID),
ffi.NULL,
output_token,
ret_flags,
# ffi.cast('OM_uint32 *', C.GSS_C_INDEFINITE))
ffi.NULL)
validate_gss_status(major_status, minor_status_p[0])
if __name__ == '__main__':
krb_service = os.environ.get('WINRM_KRB_SERVICE', 'HTTP@server-host')
krb_principal = os.environ.get('WINRM_KRB_PRINCIPAL', 'username@realm')
# FIXME: Investigate how to pass server name and fix following error
# __main__.GSSError: (('A required output parameter could not be written',
# 34078720), ('Unknown error', 0))
authenticate_gss_client_init(krb_service, krb_principal)
| GitHubFriction/pywinrm | winrm/tests/kerberos/test_cffi_mini.py | Python | mit | 8,021 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-07 23:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cocomapapp', '0005_auto_20161203_2224'),
]
operations = [
migrations.AddField(
model_name='tag',
name='hidden',
field=models.BooleanField(default=False),
),
]
| bounswe/bounswe2016group11 | cocomapapp/migrations/0006_tag_hidden.py | Python | apache-2.0 | 452 |
import os
import signal
import sys
import time
import subprocess
import wx
import wx.stc as stc
import re
import Queue
from sim_doc_base import DocBase
import sim_doc_lexer as doc_lexer
#test_cmd = {"break 30\n", "break 40\n", "run\n", "continue\n",
# "step\n", "step\n", "step\n", "step\n", "step\n", "step\n", "step\n", "quit\n",}
#-- Markers for editor marker margin
MARKNUM_BREAK_POINT = 1
MARKVAL_BREAK_POINT = 1
MARKNUM_CURRENT_LINE = 2
MARKVAL_CURRENT_LINE = 4
#---------------------------------------------------------------------------------------------------
class Doc(DocBase):
def __init__(self, parent, file_path):
DocBase.__init__(self, parent, file_path)
doc_lexer.c_lexer(self)
path, ext = file_path.split('.')
if ext == 'c':
self.breakpoints = []
#-- connect event with doc margin click to toggle breakpoint
self.Bind(stc.EVT_STC_MARGINCLICK, self.OnDocMarginClick)
else:
self.breakpoints = None
#-------------------------------------------------------------------
def get_breakpoints(self):
if self.breakpoints is None:
return None
else:
return str(self.breakpoints)
#-------------------------------------------------------------------
def add_breakpoints(self, b_lst):
if self.breakpoints is None:
return
for line in b_lst:
if not line in self.breakpoints:
self.MarkerAdd(line - 1, MARKNUM_BREAK_POINT)
self.breakpoints.append(line)
#-------------------------------------------------------------------
def clear_cur_line_marker(self, line):
self.MarkerDelete(line, MARKNUM_CURRENT_LINE)
#-------------------------------------------------------------------
def toggle_breakpoint(self, line):
markers = self.MarkerGet(line)
c_line = line + 1
if not c_line in self.breakpoints:
self.MarkerAdd(line, MARKNUM_BREAK_POINT)
self.breakpoints.append(c_line)
else:
self.MarkerDelete(line, MARKNUM_BREAK_POINT)
self.breakpoints.remove(c_line)
#--print(self.breakpoints)
#-------------------------------------------------------------------
def OnDocMarginClick(self, event):
if self.breakpoints is None:
return
line = self.LineFromPosition(event.GetPosition())
margin = event.GetMargin()
#log(margin)
if margin == 1 :
#log("toggle")
self.toggle_breakpoint(line)
elif margin == 2 :
if wx.GetKeyState(wx.WXK_SHIFT) and wx.GetKeyState(wx.WXK_CONTROL) :
FoldSome()
else:
level = self.GetFoldLevel(line)
if hasbit(level, stc.STC_FOLDLEVELHEADERFLAG) :
self.ToggleFold(line) | athenajc/XideSDCC | ide/sim/sim_doc.py | Python | gpl-2.0 | 2,983 |
import dicom.UID
import dicom.dataset
import requests
import requests_toolbelt
import datetime
import numpy as np
import time
from PIL import Image
import math
import uuid
import os
def load_img(in_img):
img = Image.open(in_img)
scale_factor = math.ceil(img.height/256)
img=img.convert('RGB').resize((img.width//scale_factor,img.height//scale_factor))
pixel_array = np.array(img)
return pixel_array
def create_dicom(pixel_array, pat_name="Hackathon"):
# based on https://stackoverflow.com/questions/14350675/create-pydicom-file-from-numpy-array
uid_suffix = '1.3.6.1.4.9'
rand_uuuid = uuid.uuid4()
study_uid = '.'.join(str(f) for f in rand_uuuid.fields)
full_suffix = uid_suffix+'.'+study_uid
file_meta = dicom.dataset.Dataset()
file_meta.MediaStorageSOPClassUID = 'Secondary Capture Image Storage'
file_meta.MediaStorageSOPInstanceUID = '1.3.6.1.4.1.9590.100.1.1.111165684411017669021768385720736873789'
file_meta.ImplementationClassUID = '1.3.6.1.4.1.9590.100.1.0.100.4.0'
ds = dicom.dataset.FileDataset('', {},file_meta = file_meta,preamble=b"\0"*128)
ds.Modality = 'WSD'
ds.ContentDate = str(datetime.date.today()).replace('-','')
ds.ContentTime = str(time.time()) #milliseconds since the epoch
ds.StudyInstanceUID = full_suffix
ds.SeriesInstanceUID = full_suffix+'.1'
ds.SOPInstanceUID = full_suffix+'.1.1'
ds.SOPClassUID = 'Secondary Capture Image Storage'
ds.SecondaryCaptureDeviceManufctur = 'HirukoSTOW'
ds.PatientName = pat_name
ds.PatientID = str(rand_uuuid.int)
ds.PatientSex = 'M'
ds.StudyDescription = 'STOW'
ds.SeriesDescription = 'Picture'
ds.InstanceNumber = 1
## These are the necessary imaging components of the FileDataset object.
ds.SamplesPerPixel = 3
ds.PhotometricInterpretation = "RGB"
ds.PixelRepresentation = 0
ds.HighBit = 7
ds.BitsStored = 8
ds.BitsAllocated = 8
ds.SmallestImagePixelValue = b'\x00'
ds.LargestImagePixelValue = b'\xff'
ds.Columns = pixel_array.shape[1]
ds.Rows = pixel_array.shape[0]
if pixel_array.dtype != np.uint8:
pixel_array = pixel_array.astype(np.uint8)
ds.PixelData = pixel_array.tobytes()
return ds
def send_file(file_name):
with open('../key.txt') as f:
apikey=f.read()
with open(file_name,'rb') as f:
url='http://api.hackathon.siim.org/dicomweb/studies'
headers = { "apikey":apikey}
multi_part = requests_toolbelt.multipart.encoder.MultipartEncoder(
fields={'file':('img.dcm', f, "application/dicom")}
)
headers['Content-Type']=multi_part.content_type.replace('multipart/form-data','multipart/related')+'; type=application/dicom'
r=requests.post(url,data=multi_part, headers=headers)
return r.status_code == 200, r.content
def upload_img(img_name, pat_name):
pixels = load_img(img_name)
dicom_object = create_dicom(pixels, pat_name)
temp_filename = dicom_object.StudyInstanceUID+'.dcm'
full_temp_filename = os.path.join(os.path.dirname(img_name) ,temp_filename)
dicom_object.save_as(full_temp_filename)
succ, msg = send_file(full_temp_filename)
os.remove(full_temp_filename)
return succ, msg
if __name__ == "__main__":
print(upload_img("../test.jpg"))
| ImExHS/hackathon2017 | store_img.py | Python | mit | 3,319 |
#!/usr/bin/python
# coding=utf-8
# region License
# Findeco is dually licensed under GPLv3 or later and MPLv2.
#
################################################################################
# Copyright (c) 2012 Klaus Greff <klaus.greff@gmx.net>
# This file is part of Findeco.
#
# Findeco is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# Findeco is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Findeco. If not, see <http://www.gnu.org/licenses/>.
################################################################################
#
################################################################################
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#endregion #####################################################################
from __future__ import division, print_function, unicode_literals
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext
from django.test import TestCase
import json
from findeco.tests.helpers import assert_is_error_response
from node_storage import get_root_node
from node_storage.factory import create_slot, create_user, create_textNode
from node_storage.factory import create_vote, create_structureNode
from node_storage.factory import create_argument
from ..api_validation import errorResponseValidator
class LoadTextTest(TestCase):
def setUp(self):
self.hans = create_user('hans')
self.hugo = create_user('hugo')
self.root = get_root_node()
self.slot1 = create_slot('Wahlprogramm')
self.root.append_child(self.slot1)
self.structureNode1 = create_structureNode(
'LangerWahlprogrammTitel',
text="Einleitungstext",
authors=[self.hans])
self.slot1.append_child(self.structureNode1)
self.slot11 = create_slot('Transparenz')
self.structureNode1.append_child(self.slot11)
self.structureNode11 = create_structureNode(
'Traaaansparenz',
text="Transparenz ist wichtig.",
authors=[self.hans])
self.slot11.append_child(self.structureNode11)
self.slot111 = create_slot('Ebene_3')
self.structureNode11.append_child(self.slot111)
self.structureNode111 = create_structureNode(
'Eeeebeneee 3',
authors=[self.hans])
self.slot111.append_child(self.structureNode111)
self.slot1111 = create_slot('Ebene_4')
self.structureNode111.append_child(self.slot1111)
self.structureNode1111 = create_structureNode(
'Eeeebeneee 4',
authors=[self.hans])
self.slot1111.append_child(self.structureNode1111)
self.slot11111 = create_slot('Ebene_5')
self.structureNode1111.append_child(self.slot11111)
self.structureNode11111 = create_structureNode(
'Eeeebeneee 5',
authors=[self.hans])
self.slot11111.append_child(self.structureNode11111)
self.slot111111 = create_slot('Ebene_6')
self.structureNode11111.append_child(self.slot111111)
self.structureNode111111 = create_structureNode(
'Eeeebeneee 6',
authors=[self.hans])
self.slot111111.append_child(self.structureNode111111)
self.slot1111111 = create_slot('Ebene_7')
self.structureNode111111.append_child(self.slot1111111)
self.textnode1111111 = create_textNode(
'Traaaansparenz',
text="Auf Ebene 7.",
authors=[self.hans])
self.slot1111111.append_child(self.textnode1111111)
self.slot12 = create_slot('Bildung')
self.structureNode1.append_child(self.slot12)
self.textnode12 = create_textNode(
'Biiildung',
authors=[self.hans])
self.slot12.append_child(self.textnode12)
self.slot13 = create_slot('Datenschutz')
self.structureNode1.append_child(self.slot13)
self.textnode13 = create_textNode(
'Daaatenschutz', text="Blubb.", authors=[self.hans])
self.slot13.append_child(self.textnode13)
self.textnode13_a1 = create_argument(
self.textnode13, arg_type='con', title='Dagegen',
text="...denn ihr seid dafür", authors=[self.hugo])
self.slot2 = create_slot('Grundsatzprogramm')
self.root.append_child(self.slot2)
self.textnode2 = create_textNode(
'LangerGrundsatzTitel', authors=[self.hugo])
self.slot2.append_child(self.textnode2)
self.slot3 = create_slot('Organisatorisches')
self.root.append_child(self.slot3)
self.textnode31 = create_textNode('Langweilig1', authors=[self.hans])
self.textnode32 = create_textNode('Langweilig2', authors=[self.hugo])
self.textnode33 = create_textNode(
'Langweilig3', authors=[self.hans, self.hugo])
self.slot3.append_child(self.textnode31)
self.slot3.append_child(self.textnode32)
self.slot3.append_child(self.textnode33)
create_vote(self.hans, [self.textnode33])
self.top_slots = [self.slot1, self.slot2, self.slot3]
self.child_slots = [self.slot11, self.slot12, self.slot13]
self.short_titles = ['Wahlprogramm', 'Grundsatzprogramm',
'Organisatorisches']
self.full_titles = ['LangerWahlprogrammTitel', 'LangerGrundsatzTitel',
'Langweilig3']
self.authors = [[self.hans], [self.hugo], [self.hans, self.hugo]]
self.maxDiff = None
def test_textnode_gives_correct_text(self):
response = self.client.get(
reverse('load_text',
kwargs=dict(path="Wahlprogramm.1/Datenschutz.1")))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(data['loadTextResponse']['paragraphs'][0]['wikiText'],
"== [[/Wahlprogramm.1/Datenschutz.1|Daaatenschutz]] ==\nBlubb.")
def test_structurenode_gives_correct_text(self):
response = self.client.get(
reverse('load_text', kwargs=dict(path="Wahlprogramm.1")))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
paragraphs = data['loadTextResponse']['paragraphs']
self.assertEqual(paragraphs[0]['wikiText'],
"== [[/Wahlprogramm.1|LangerWahlprogrammTitel]] ==\nEinleitungstext")
self.assertEqual(paragraphs[0]['path'], "Wahlprogramm.1")
self.assertEqual(paragraphs[1]['wikiText'], "=== [[/Wahlprogramm.1/Transparenz.1|Traaaansparenz]] ===\nTransparenz ist wichtig.")
self.assertEqual(paragraphs[2]['wikiText'], "==== [[/Wahlprogramm.1/Transparenz.1/Ebene_3.1|Eeeebeneee 3]] ====\n")
self.assertEqual(paragraphs[3]['wikiText'], "===== [[/Wahlprogramm.1/Transparenz.1/Ebene_3.1/Ebene_4.1|Eeeebeneee 4]] =====\n")
self.assertEqual(paragraphs[4]['wikiText'], "====== [[/Wahlprogramm.1/Transparenz.1/Ebene_3.1/Ebene_4.1/Ebene_5.1|Eeeebeneee 5]] ======\n")
self.assertEqual(paragraphs[5]['wikiText'], "====== [[/Wahlprogramm.1/Transparenz.1/Ebene_3.1/Ebene_4.1/Ebene_5.1/Ebene_6.1|Eeeebeneee 6]] ======\n")
self.assertEqual(paragraphs[6]['wikiText'], "====== [[/Wahlprogramm.1/Transparenz.1/Ebene_3.1/Ebene_4.1/Ebene_5.1/Ebene_6.1/Ebene_7.1|Traaaansparenz]] ======\nAuf Ebene 7.")
self.assertEqual(paragraphs[7]['wikiText'], "=== [[/Wahlprogramm.1/Bildung.1|Biiildung]] ===\n")
self.assertEqual(paragraphs[8]['wikiText'], "=== [[/Wahlprogramm.1/Datenschutz.1|Daaatenschutz]] ===\nBlubb.")
def test_load_text_on_argument_gives_argument_text(self):
response = self.client.get(
reverse('load_text',
kwargs=dict(path="Wahlprogramm.1/Datenschutz.1.con.1")))
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
expected_response = {
'loadTextResponse': {
'paragraphs': [{
'wikiText':"== [[/Wahlprogramm.1/Datenschutz.1.con.1|Dagegen]] ==\n...denn ihr seid dafür",
'path':"Wahlprogramm.1/Datenschutz.1.con.1",
'isFollowing':0,
'isFlagging':0,
'authorGroup': [{
'displayName':"hugo",
'description':""
}]
}],
'isFollowing': 0,
'isFlagging': 0
}
}
self.assertEqual(data, expected_response)
def test_multiple_loads_give_same_text(self):
url = reverse('load_text',
kwargs=dict(path="Wahlprogramm.1/Datenschutz.1"))
response = self.client.get(url)
data_first = json.loads(response.content)
for i in range(3):
response = self.client.get(url)
data = json.loads(response.content)
self.assertEqual(data, data_first)
def test_on_illegal_path_gives_error_response(self):
illegal_paths = ['Wahlprogramm.1/foo',
'Wahlprogramm.1/foo.1.pro']
for p in illegal_paths:
response = self.client.get(
reverse('load_text', kwargs=dict(path=p)))
assert_is_error_response(response, "_IllegalPath") | Qwlouse/Findeco | findeco/tests/test_load_text.py | Python | gpl-3.0 | 9,903 |
from django.contrib import admin
from django import forms
from django.utils.translation import ugettext_lazy as _
from ovp_projects.models import Work
class WorkAdmin(admin.ModelAdmin):
fields = [
('id', 'project'),
'weekly_hours',
'description',
'can_be_done_remotely',
]
list_display = [
'id', 'project', 'weekly_hours', 'can_be_done_remotely'
]
list_filter = []
list_editable = ['can_be_done_remotely']
search_fields = ['project__name', 'project__organization__name']
readonly_fields = ['id']
raw_id_fields = []
class WorkInline(admin.TabularInline):
model = Work
admin.site.register(Work, WorkAdmin)
| OpenVolunteeringPlatform/django-ovp-projects | ovp_projects/admin/work.py | Python | agpl-3.0 | 654 |
# coding=utf-8
# Copyright 2021 RLDSCreator Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for rlds_creator.environment_handler."""
import datetime
import json
import os
import statistics
import time
from typing import Optional
import zipfile
from absl.testing import absltest
from absl.testing import parameterized
import dm_env
import mock
import numpy as np
import PIL.Image
from rlds_creator import client_pb2
from rlds_creator import constants
from rlds_creator import environment
from rlds_creator import environment_handler
from rlds_creator import episode_storage
from rlds_creator import episode_storage_factory
from rlds_creator import replay
from rlds_creator import storage
from rlds_creator import study_pb2
from rlds_creator import test_utils
from rlds_creator.envs import procgen_env
USER_EMAIL = test_utils.USER_EMAIL
OTHER_USER_EMAIL = 'other@test.com'
CONFIG = {'arg': 'value'}
# A 2x2 image.
SAMPLE_IMAGE = np.array([
[[128, 128, 128], [0, 0, 0]],
[[0, 0, 0], [255, 255, 255]],
],
dtype=np.uint8)
URL_PREFIX = 'http://host'
def encode_image(image, fmt='PNG', **kwargs):
return environment_handler._encode_image(
PIL.Image.fromarray(image), format=fmt, **kwargs)
def create_response(**kwargs) -> client_pb2.OperationResponse:
"""Returns the response with specified fields."""
return client_pb2.OperationResponse(**kwargs)
def response_call(**kwargs):
"""Returns a mock call for the encoded response with the specified fields."""
return mock.call(create_response(**kwargs))
def sample_env_spec(env_id: str = 'env',
name: str = '',
sync: bool = True,
procgen_id: str = 'maze',
**kwargs):
"""Returns a sample environment spec."""
return study_pb2.EnvironmentSpec(
id=env_id,
name=name or env_id,
sync=sync,
procgen=study_pb2.EnvironmentSpec.Procgen(
id=procgen_id, start_level=0, rand_seed=1),
**kwargs)
def sample_study_spec(study_id: str = 'study',
name: str = '',
email: str = USER_EMAIL,
**kwargs):
"""Returns a sample study spec."""
return study_pb2.StudySpec(
id=study_id,
name=name or study_id,
creator=study_pb2.User(email=email),
**kwargs)
def sample_study_spec_with_env():
return sample_study_spec(environment_specs=[sample_env_spec()])
def sample_study_spec_with_async_env():
return sample_study_spec(
environment_specs=[sample_env_spec(procgen_id='coinrun', sync=False)])
def sample_episode(study_id: str = 'study',
session_id: str = 'session',
episode_id: str = 'episode',
email: str = USER_EMAIL,
path: Optional[str] = None) -> study_pb2.Episode:
"""Returns a sample episode."""
episode = study_pb2.Episode(
study_id=study_id,
session_id=session_id,
id=episode_id,
user=study_pb2.User(email=email))
if path:
episode.storage.pickle.path = path
return episode
class EnvironmentHandler(environment_handler.EnvironmentHandler):
"""Environment handler for the tests."""
def create_env_from_spec(
self, env_spec: study_pb2.EnvironmentSpec) -> environment.Environment:
assert env_spec.WhichOneof('type') == 'procgen'
return procgen_env.ProcgenEnvironment(env_spec)
def get_url_for_path(self, path: str) -> str:
return URL_PREFIX + path
def send_response(self, response: client_pb2.OperationResponse) -> bool:
return True
class EnvironmentHandlerTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.storage = self.enter_context(
mock.patch.object(storage, 'Storage', autospec=True))
self.base_log_dir = self.create_tempdir()
self.episode_storage_factory = (
episode_storage_factory.EpisodeStorageFactory())
self.handler = EnvironmentHandler(
self.storage,
study_pb2.User(email=USER_EMAIL),
CONFIG,
self.episode_storage_factory,
self.base_log_dir,
record_videos=True)
self.handler.send_response = self.enter_context(
mock.patch.object(self.handler, 'send_response'))
# Make sure that mocks do not have any expectations set.
self._reset_mocks()
def _reset_mocks(self):
self.storage.reset_mock()
self.handler.send_response.reset_mock()
def send_request(self, **kwargs):
"""Sends the request with the specified fields to the handler."""
self.handler.handle_request(client_pb2.OperationRequest(**kwargs))
def assert_response(self, **kwargs):
"""Asserts that the response matches the specified fields."""
self.assert_responses([response_call(**kwargs)])
def assert_error_response(self, mesg: str):
self.assert_response(error=client_pb2.ErrorResponse(mesg=mesg))
def assert_responses(self, responses):
"""Asserts that the responses sent in the specified order."""
self.assertSequenceEqual(responses,
self.handler.send_response.call_args_list)
def test_setup(self):
studies = [sample_study_spec()]
self.storage.get_studies.return_value = studies
self.handler.setup()
# Config and studies should be sent to the client.
self.storage.get_studies.assert_called_once_with(email=USER_EMAIL)
self.assert_responses([
response_call(
config=client_pb2.ConfigResponse(config=json.dumps(CONFIG))),
response_call(
set_studies=client_pb2.SetStudiesResponse(studies=studies))
])
@parameterized.named_parameters(('owner', USER_EMAIL, True),
('not_owner', OTHER_USER_EMAIL, False))
def test_get_episode_metadata(self, email, can_delete):
study_spec = sample_study_spec()
env_spec = sample_env_spec()
episode = sample_episode(email=email)
episode.state = study_pb2.Episode.STATE_COMPLETED
start_time_secs = 1614855000
episode.start_time.FromSeconds(start_time_secs)
episode.end_time.FromSeconds(start_time_secs + 200)
episode.metadata['video_file'] = '/some/video.mp4'
self.assertEqual(
client_pb2.EpisodeMetadata(
study=study_spec,
env=env_spec,
episode=episode,
duration='3 minutes',
video_url='http://host/some/video.mp4',
status='Completed',
can_delete=can_delete),
self.handler._get_episode_metadata(study_spec, env_spec, episode))
def test_set_studies(self):
studies = [sample_study_spec('study1'), sample_study_spec('study2')]
self.storage.get_studies.return_value = studies
self.send_request(set_studies=client_pb2.SetStudiesRequest())
# The studies of the user should be read from storage and sent.
self.storage.get_studies.assert_called_once_with(email=USER_EMAIL)
self.assert_response(
set_studies=client_pb2.SetStudiesResponse(studies=studies))
@parameterized.named_parameters(
('study_creator', USER_EMAIL, None),
('not_study_creator', OTHER_USER_EMAIL, USER_EMAIL))
def test_select_study(self, creator_email, query_email):
# A study with two environment.
env_specs = [sample_env_spec('env1'), sample_env_spec('env2')]
study_spec = sample_study_spec(
email=creator_email,
environment_specs=env_specs,
state=study_pb2.StudySpec.STATE_ENABLED)
self.storage.get_study.return_value = study_spec
# Three existing episodes for the study.
episodes = [
study_pb2.Episode(
id='episode1', study_id='study', environment_id='env1'),
study_pb2.Episode(
id='episode2', study_id='study', environment_id='env2'),
# env3 is not present in the current study spec and should not be sent
# to the client.
study_pb2.Episode(
id='episode3', study_id='study', environment_id='env3')
]
self.storage.get_episodes.return_value = episodes
self.send_request(
select_study=client_pb2.SelectStudyRequest(study_id='study'))
# Study and its episodes should be read from the storage.
self.storage.get_study.assert_called_once_with('study')
self.storage.get_episodes.assert_called_once_with(
'study', email=query_email)
# A new session should be created.
(created_session,), _ = self.storage.create_session.call_args
# Ignored fields.
created_session.ClearField('id')
created_session.ClearField('start_time')
self.assertEqual(
study_pb2.Session(
study_id='study',
user=study_pb2.User(email=USER_EMAIL),
state=study_pb2.Session.State.STATE_VALID), created_session)
# Study and episodes should be sent in separate responses.
self.assert_responses([
response_call(
select_study=client_pb2.SelectStudyResponse(study=study_spec)),
response_call(
episodes=client_pb2.EpisodesResponse(episodes=[
self.handler._get_episode_metadata(study_spec, env_specs[0],
episodes[0]),
self.handler._get_episode_metadata(study_spec, env_specs[1],
episodes[1]),
]))
])
def test_select_missing_study(self):
self.storage.get_study.return_value = None
self.send_request(
select_study=client_pb2.SelectStudyRequest(study_id='study'))
self.storage.get_study.assert_called_once_with('study')
self.assert_error_response('Missing study.')
def test_select_study_not_accessible(self):
# Study is created by another user and not enabled.
study_spec = sample_study_spec(email=OTHER_USER_EMAIL)
self.storage.get_study.return_value = study_spec
self.send_request(
select_study=client_pb2.SelectStudyRequest(study_id='study'))
self.storage.get_study.assert_called_once_with('study')
self.assert_error_response('You cannot access this study.')
def test_select_study_enabled(self):
# Study is created by another user, but enabled and should be accessible.
study_spec = sample_study_spec(
email=OTHER_USER_EMAIL, state=study_pb2.StudySpec.STATE_ENABLED)
self.storage.get_study.return_value = study_spec
# No episodes.
self.storage.get_episodes.return_value = []
self.send_request(
select_study=client_pb2.SelectStudyRequest(study_id='study'))
self.storage.get_study.assert_called_once_with('study')
self.assert_responses([
response_call(
select_study=client_pb2.SelectStudyResponse(study=study_spec)),
# Episodes response should be empty.
response_call(episodes=client_pb2.EpisodesResponse())
])
def _select_study(self, study_spec: study_pb2.StudySpec):
"""Selects the study with the specified spec."""
self.storage.get_study.return_value = study_spec
self.send_request(
select_study=client_pb2.SelectStudyRequest(study_id='study'))
self._reset_mocks()
def test_select_environment(self):
env_specs = [sample_env_spec('env1'), sample_env_spec('env2')]
study_spec = sample_study_spec(environment_specs=env_specs)
self._select_study(study_spec)
# Select the second environment.
self.send_request(
select_environment=client_pb2.SelectEnvironmentRequest(env_id='env2'))
# Episode spec should be set and the initial state (i.e. step 0) should be
# sent. Reward would be 0 and the synced environment is not paused.
self.assert_responses([
response_call(pause=client_pb2.PauseResponse(paused=False)),
response_call(
step=client_pb2.StepResponse(
image=self.handler._image,
episode_index=1,
episode_steps=0,
reward=0)),
# This should be the last response.
response_call(
select_environment=client_pb2.SelectEnvironmentResponse(
study_id='study', env=env_specs[1])),
])
# Sanity check. Keys and action should be reset.
self.assertEqual({}, self.handler._keys)
self.assertEqual(environment.UserInput(keys={}), self.handler._user_input)
def test_select_missing_environment(self):
self._select_study(sample_study_spec())
self.send_request(
select_environment=client_pb2.SelectEnvironmentRequest(env_id='env'))
self.assert_error_response('Missing environment.')
def test_select_environment_no_study(self):
# Selecting an environment should generate an error is there was no study.
self.send_request(
select_environment=client_pb2.SelectEnvironmentRequest(env_id='env'))
self.assert_error_response('No study is selected.')
def _select_environment(self,
study_spec: study_pb2.StudySpec,
env_id: str = 'env'):
"""Selects the study and the environment with the specified ID."""
self._select_study(study_spec)
self.send_request(
select_environment=client_pb2.SelectEnvironmentRequest(env_id=env_id))
self._reset_mocks()
def test_action(self):
self._select_environment(sample_study_spec_with_env())
self.send_request(action=client_pb2.ActionRequest(keys=['ArrowUp']))
# New state of the environment should be sent. Step should be 1.
self.assert_response(
step=client_pb2.StepResponse(
image=self.handler._image,
episode_index=1,
episode_steps=1,
reward=0))
# Sanity check. Keys should contain the canonical code, ArrowUp -> Up.
self.assertEqual({'Up': 1}, self.handler._keys)
self.assertEqual(
environment.UserInput(keys={'Up': 1}), self.handler._user_input)
def test_action_sync_no_keys(self):
self._select_environment(sample_study_spec_with_env())
self.send_request(action=client_pb2.ActionRequest(keys=[]))
self.handler.send_response.assert_not_called()
def test_action_pause(self):
self._select_environment(sample_study_spec_with_env())
# Pause. Additional keys should be ignored.
keys = [environment_handler.PAUSE_KEY, 'ArrowUp']
self.send_request(action=client_pb2.ActionRequest(keys=keys))
# Unpause
self.send_request(action=client_pb2.ActionRequest(keys=keys))
self.assert_responses([
response_call(pause=client_pb2.PauseResponse(paused=True)),
response_call(pause=client_pb2.PauseResponse(paused=False))
])
def test_action_reset(self):
self._select_environment(sample_study_spec_with_env())
# Enter key resets the episode. Additional keys should be ignored.
self.send_request(
action=client_pb2.ActionRequest(keys=['Enter', 'ArrowUp']))
# Environment should be paused and the confirmation request should be sent.
self.handler.send_response.assert_has_calls([
response_call(pause=client_pb2.PauseResponse(paused=True)),
response_call(
confirm_save=client_pb2.ConfirmSaveResponse(
mark_as_completed=True)),
])
def test_action_pause_async(self):
self._select_environment(sample_study_spec_with_async_env())
# Environment will be initially in paused state. Unpause.
self.send_request(
action=client_pb2.ActionRequest(keys=[environment_handler.PAUSE_KEY]))
# Sanity check.
self.assertFalse(self.handler._paused)
time.sleep(0.5)
self._reset_mocks()
self.send_request(
action=client_pb2.ActionRequest(keys=[environment_handler.PAUSE_KEY]))
self.handler.send_response.assert_has_calls(
[response_call(pause=client_pb2.PauseResponse(paused=True))])
def test_action_gamepad(self):
self._select_environment(sample_study_spec_with_env())
self.send_request(
action=client_pb2.ActionRequest(
keys=['ArrowUp'],
gamepad_input=client_pb2.GamepadInput(
buttons={
0: client_pb2.GamepadInput.Button(pressed=True, value=1.0),
2: client_pb2.GamepadInput.Button(pressed=True, value=0.5)
},
axes={
1: 0.25,
4: 0.5
},
id='my_controller')))
# Both keyboard and gamepad input should be present.
self.assertEqual(
{
'Axis1': 0.25,
'Axis4': 0.5,
'Button0': 1,
'Button2': 0.5,
'Up': 1
}, self.handler._keys)
# Controller ID should be set in the episode.
self.assertEqual('my_controller', self.handler._episode.controller_id)
def test_async_env(self):
self._select_environment(sample_study_spec_with_async_env())
# Async environment should be in paused state.
self.assertTrue(self.handler._paused)
# Set frame rate to 10 (i.e. 10 steps per second).
self.send_request(set_fps=client_pb2.SetFpsRequest(fps=10))
steps = []
timestamps = []
def send_fn(request: client_pb2.OperationResponse):
if request.WhichOneof('type') == 'step':
steps.append(request.step.episode_steps)
timestamps.append(time.perf_counter())
return True
# Unpause the environment.
self.handler.send_response.side_effect = send_fn
self.send_request(
action=client_pb2.ActionRequest(keys=[environment_handler.PAUSE_KEY]))
time.sleep(1)
# Number of step responses should be close to 10.
num_steps = len(steps)
self.assertBetween(num_steps, 9, 11)
# Step indices should be sequential starting from 1 (0 is sent at reset).
self.assertSequenceEqual(range(1, num_steps + 1), steps)
# Time between each step should be close to 100ms.
latency = [timestamps[i] - timestamps[i - 1] for i in range(1, num_steps)]
self.assertBetween(statistics.mean(latency), 0.09, 0.11)
@parameterized.named_parameters(
('accept_completed', True, True, study_pb2.Episode.STATE_COMPLETED),
('accept_not_completed', True, False, study_pb2.Episode.STATE_CANCELLED),
('reject', False, True, study_pb2.Episode.STATE_REJECTED),
)
def test_save_episode(self, accept: bool, mark_as_completed: bool,
state: study_pb2.Episode.State):
env_spec = sample_env_spec()
study_spec = sample_study_spec(environment_specs=[env_spec])
self._select_environment(study_spec)
session_id = self.handler._session.id
# Take dummy actions.
keys = ['ArrowUp', 'ArrowRight', 'ArrowDown', 'ArrowLeft']
num_steps = len(keys)
for key in keys:
self.send_request(action=client_pb2.ActionRequest(keys=[key]))
self._reset_mocks()
self.send_request(
save_episode=client_pb2.SaveEpisodeRequest(
accept=accept, mark_as_completed=mark_as_completed))
episode_id = '1.0'
tag_directory = os.path.join(
self.base_log_dir, 'study', session_id,
'' if state == study_pb2.Episode.STATE_COMPLETED else 'ignored', 'env',
episode_id)
# Episode should have its IDs and storage set.
expected_episode = study_pb2.Episode(
id=episode_id,
study_id='study',
environment_id='env',
user=study_pb2.User(email=USER_EMAIL),
session_id=session_id,
state=state,
num_steps=num_steps,
total_reward=0,
storage=study_pb2.Episode.Storage(
pickle=study_pb2.Episode.Storage.Pickle(
path=os.path.join(tag_directory, '0.pkl'))))
# Check that the episode is stored.
(episode,), _ = self.storage.create_episode.call_args
# Start and end time should be set.
self.assertTrue(episode.HasField('start_time'))
self.assertTrue(episode.HasField('end_time'))
# Path of the video file should be present in the metadata.
self.assertEqual(
os.path.join(tag_directory, 'video.mp4'),
episode.metadata['video_file'])
# Episode metadata should be sent to the client and a new episode should
# start (i.e. episode index should be 2).
self.assert_responses([
response_call(
save_episode=client_pb2.SaveEpisodeResponse(
episode=self.handler._get_episode_metadata(
study_spec, env_spec, episode))),
response_call(pause=client_pb2.PauseResponse(paused=False)),
response_call(
step=client_pb2.StepResponse(
image=self.handler._image,
episode_index=2,
episode_steps=0,
reward=0)),
])
for ignored_fields in ['start_time', 'end_time', 'metadata']:
episode.ClearField(ignored_fields)
self.assertEqual(expected_episode, episode)
# Check that the episode is logged properly.
r = self.episode_storage_factory.create_reader(episode.storage)
steps = r.steps
self.assertLen(steps, num_steps + 1)
denv = self.handler._env.env()
self.assertEqual(denv.observation_spec(), r.observation_spec())
self.assertEqual(denv.action_spec(), r.action_spec())
self.assertEqual(denv.reward_spec(), r.reward_spec())
self.assertEqual(denv.discount_spec(), r.discount_spec())
# User actions and images should be present in the step metadata.
mapped_keys = [None, 'Up', 'Right', 'Down', 'Left']
for index, key in enumerate(mapped_keys):
step = steps[index]
self.assertEqual(step.custom_data['keys'], {key: 1} if key else {})
self.assertIn('image', step.custom_data)
if index > 0:
# Step information should be present info custom data.
self.assertIn('level_complete', step.custom_data['info'])
def test_set_fps(self):
fps = constants.ASYNC_FPS # Default frames/sec.
self.assertEqual(fps, self.handler._fps)
# Change the frame rate.
self.send_request(set_fps=client_pb2.SetFpsRequest(fps=fps + 3))
self.assertEqual(fps + 3, self.handler._fps)
def test_set_quality(self):
self.assertEqual('web_low', self.handler._quality) # Default preset.
# Change the image quality to medium and high.
self.send_request(
set_quality=client_pb2.SetQualityRequest(
quality=client_pb2.SetQualityRequest.QUALITY_MEDIUM))
self.assertEqual('web_medium', self.handler._quality)
self.send_request(
set_quality=client_pb2.SetQualityRequest(
quality=client_pb2.SetQualityRequest.QUALITY_HIGH))
self.assertEqual('web_high', self.handler._quality)
@parameterized.named_parameters(('paused_sync', True, True),
('paused_async', True, False),
('unpaused_sync', False, True))
@mock.patch.object(environment, 'Environment', autospec=True)
def test_set_camera(self, paused, sync, mock_env):
mock_env.set_camera.return_value = environment.Camera(2, 'name')
mock_env.render.return_value = SAMPLE_IMAGE
# Paused environment.
self.handler._env = mock_env
self.handler._paused = paused
self.handler._sync = sync
self.send_request(set_camera=client_pb2.SetCameraRequest(index=2))
self.assert_responses([
response_call(
set_camera=client_pb2.SetCameraResponse(index=2, name='name')),
# Image should also be updated.
response_call(
step=client_pb2.StepResponse(
image=encode_image(
SAMPLE_IMAGE, fmt='JPEG', quality=self.handler._quality),
episode_index=0,
episode_steps=0)),
])
@mock.patch.object(environment, 'Environment', autospec=True)
def test_set_camera_async(self, mock_env):
mock_env.set_camera.return_value = environment.Camera(2, 'name')
self.handler._env = mock_env
# Async environment and not paused.
self.handler._paused = False
self.handler._sync = False
self.send_request(set_camera=client_pb2.SetCameraRequest(index=2))
self.assert_response(
set_camera=client_pb2.SetCameraResponse(index=2, name='name'))
mock_env.render.assert_not_called()
def test_save_existing_study(self):
env_spec = sample_env_spec()
study_spec = sample_study_spec(
state=study_pb2.StudySpec.STATE_ENABLED, environment_specs=[env_spec])
study_spec.creation_time.GetCurrentTime()
# Existing study with different creation time and state.
existing_study_spec = sample_study_spec() # Without environment.
existing_study_spec.creation_time.FromDatetime(
datetime.datetime(2021, 1, 2))
existing_study_spec.state = study_pb2.StudySpec.STATE_DISABLED
self.storage.get_study.return_value = existing_study_spec
self.send_request(save_study=client_pb2.SaveStudyRequest(study=study_spec))
# The updated study spec should preserve the creation time and state of the
# existing one.
updated_study_spec = study_spec
updated_study_spec.creation_time.CopyFrom(existing_study_spec.creation_time)
updated_study_spec.state = study_pb2.StudySpec.STATE_DISABLED
# Existing study spec should be read from the storage and the updated
# version should be saved.
self.storage.get_study.assert_called_once_with('study')
(study_spec,), _ = self.storage.update_study.call_args
self.assertEqual(updated_study_spec, study_spec)
# The response will be empty.
self.assert_response(save_study=client_pb2.SaveStudyResponse())
def test_save_missing_study(self):
self.storage.get_study.return_value = None
self.send_request(
save_study=client_pb2.SaveStudyRequest(study=sample_study_spec()))
self.assert_error_response('Missing study.')
def test_save_existing_study_not_creator(self):
self.storage.get_study.return_value = sample_study_spec(
email=OTHER_USER_EMAIL)
self.send_request(
save_study=client_pb2.SaveStudyRequest(study=sample_study_spec()))
self.assert_error_response('You cannot modify this study.')
def test_save_new_study(self):
study_spec = sample_study_spec(
study_id=None,
email=OTHER_USER_EMAIL,
environment_specs=[sample_env_spec()])
self.send_request(save_study=client_pb2.SaveStudyRequest(study=study_spec))
self.storage.get_study.assert_not_called()
# The creator email should be set to that of the active user.
study_spec.creator.email = USER_EMAIL
(created_study_spec,), _ = self.storage.create_study.call_args
self.assertEqual(study_spec, created_study_spec)
self.assert_response(save_study=client_pb2.SaveStudyResponse())
@parameterized.named_parameters(
('enable', True, study_pb2.StudySpec.STATE_ENABLED),
('disable', False, study_pb2.StudySpec.STATE_DISABLED))
def test_enable_study(self, enable, state):
self.storage.get_study.return_value = sample_study_spec()
self.send_request(
enable_study=client_pb2.EnableStudyRequest(
study_id='study', enable=enable))
# State of the study should be updated in storage and sent to the client.
self.storage.update_study_state.assert_called_once_with('study', state)
self.assert_response(
enable_study=client_pb2.EnableStudyResponse(
study_id='study', enabled=enable))
def test_enable_missing_study(self):
self.storage.get_study.return_value = None
self.send_request(
enable_study=client_pb2.EnableStudyRequest(study_id='study'))
self.assert_error_response('Missing study.')
@mock.patch.object(replay, 'StorageReplay', autospec=True)
def test_replay_episode(self, storage_replay):
env_spec = sample_env_spec()
storage_replay.return_value.env_spec = env_spec
study_spec = sample_study_spec(environment_specs=[env_spec])
storage_replay.return_value.study_spec = study_spec
# An episode with two steps. The total reward matches that of the steps
# below.
episode = study_pb2.Episode(
id='episode',
study_id='study',
environment_id='env',
session_id='session',
state=study_pb2.Episode.STATE_COMPLETED,
num_steps=2,
total_reward=0.3)
storage_replay.return_value.episode = episode
# Observations, actions and step metadata are not used.
steps = [
episode_storage.StepData(
dm_env.TimeStep(dm_env.StepType.FIRST, None, None, None), None),
episode_storage.StepData(
dm_env.TimeStep(dm_env.StepType.MID, 0.1, 1.0, None), None),
episode_storage.StepData(
dm_env.TimeStep(dm_env.StepType.LAST, 0.2, 0.0, None), None),
]
storage_replay.return_value.get_step.side_effect = (
lambda index: steps[index])
self.send_request(
replay_episode=client_pb2.ReplayEpisodeRequest(
ref=client_pb2.EpisodeRef(
study_id='study', session_id='session', episode_id='episode')))
# Store replay object should be created with the episode reference.
storage_replay.assert_called_once_with(self.episode_storage_factory,
self.storage, 'study', 'session',
'episode')
# Episode metadata and the step rewards should be sent to the client.
self.assert_response(
replay_episode=client_pb2.ReplayEpisodeResponse(
episode=self.handler._get_episode_metadata(study_spec, env_spec,
episode),
step_rewards=[0, 0.1, 0.2]))
@parameterized.named_parameters(
('non_dict_obs', [1, 2, 3], client_pb2.Data(json_encoded='[1, 2, 3]')),
('dict_obs', {
'a': 1,
'b': [2, 3],
'c': SAMPLE_IMAGE
},
client_pb2.Data(
json_encoded='{"a": 1, "b": [2, 3]}',
images=[
client_pb2.Data.Image(
name='c', image=encode_image(SAMPLE_IMAGE))
])),
('image_obs', SAMPLE_IMAGE,
client_pb2.Data(
images=[client_pb2.Data.Image(image=encode_image(SAMPLE_IMAGE))])),
)
@mock.patch.object(replay, 'Replay', autospec=True)
def test_replay_step(self, obs, obs_data, mock_replay):
"""Tests a replay step with the specified observation."""
# Inject the mock replay to the environment handler.
episode = sample_episode()
step_metadata = episode.step_metadata[2]
step_metadata.tags.add().label = 'tag1'
step_metadata.tags.add().label = 'tag2'
mock_replay.episode = episode
self.handler._replay = mock_replay
action = 0.123
mock_replay.get_step.return_value = episode_storage.StepData(
dm_env.TimeStep(dm_env.StepType.MID, 0.1, 1.0, obs), action, {
'keys': {
'Up': 1,
'Right': 1
},
'image': b'image'
})
self.send_request(replay_step=client_pb2.ReplayStepRequest(index=2))
# get_step() should be called with the specified index.
mock_replay.get_step.assert_called_once_with(2)
# Keys image and tags should be present. Action is always json encoded.
self.assert_response(
replay_step=client_pb2.ReplayStepResponse(
index=2,
image=b'image',
keys={
'Up': 1,
'Right': 1
},
reward=0.1,
observation=obs_data,
action=client_pb2.Data(json_encoded='0.123'),
tags=['tag1', 'tag2']))
@parameterized.named_parameters(('success', True), ('failure', False))
@mock.patch.object(replay, 'Replay', autospec=True)
def test_update_replay_episode(self, success, mock_replay):
mock_replay.update_episode.return_value = success
self.handler._replay = mock_replay
self.send_request(
update_replay_episode=client_pb2.UpdateReplayEpisodeRequest(
notes='notes'))
mock_replay.update_episode.assert_called_once_with('notes')
self.assert_response(
update_replay_episode=client_pb2.UpdateReplayEpisodeResponse(
success=success))
def test_update_replay_episode_no_replay(self):
self.send_request(
update_replay_episode=client_pb2.UpdateReplayEpisodeRequest(
notes='notes'))
self.assert_response(
update_replay_episode=client_pb2.UpdateReplayEpisodeResponse(
success=False))
@parameterized.named_parameters(('success', True), ('failure', False))
@mock.patch.object(replay, 'Replay', autospec=True)
def test_add_episode_tag(self, success, mock_replay):
mock_replay.add_episode_tag.return_value = success
self.handler._replay = mock_replay
self.send_request(
add_episode_tag=client_pb2.AddEpisodeTagRequest(tag='tag'))
mock_replay.add_episode_tag.assert_called_once_with('tag')
self.assert_response(
add_episode_tag=client_pb2.AddEpisodeTagResponse(
tag='tag', success=success))
def test_add_episode_tag_no_replay(self):
self.send_request(
add_episode_tag=client_pb2.AddEpisodeTagRequest(tag='tag'))
self.assert_response(
add_episode_tag=client_pb2.AddEpisodeTagResponse(
tag='tag', success=False))
@parameterized.named_parameters(('success', True), ('failure', False))
@mock.patch.object(replay, 'Replay', autospec=True)
def test_remove_episode_tag(self, success, mock_replay):
mock_replay.remove_episode_tag.return_value = success
self.handler._replay = mock_replay
self.send_request(
remove_episode_tag=client_pb2.RemoveEpisodeTagRequest(tag='tag'))
mock_replay.remove_episode_tag.assert_called_once_with('tag')
self.assert_response(
remove_episode_tag=client_pb2.RemoveEpisodeTagResponse(
tag='tag', success=success))
def test_remove_episode_tag_no_replay(self):
self.send_request(
remove_episode_tag=client_pb2.RemoveEpisodeTagRequest(tag='tag'))
self.assert_response(
remove_episode_tag=client_pb2.RemoveEpisodeTagResponse(
tag='tag', success=False))
@parameterized.named_parameters(('success', True), ('failure', False))
@mock.patch.object(replay, 'Replay', autospec=True)
def test_add_step_tag(self, success, mock_replay):
mock_replay.add_step_tag.return_value = success
self.handler._replay = mock_replay
self.send_request(
add_step_tag=client_pb2.AddStepTagRequest(index=2, tag='tag'))
mock_replay.add_step_tag.assert_called_once_with(2, 'tag')
self.assert_response(
add_step_tag=client_pb2.AddStepTagResponse(
index=2, tag='tag', success=success))
def test_add_step_tag_no_replay(self):
self.send_request(
add_step_tag=client_pb2.AddStepTagRequest(index=2, tag='tag'))
self.assert_response(
add_step_tag=client_pb2.AddStepTagResponse(
index=2, tag='tag', success=False))
@parameterized.named_parameters(('success', True), ('failure', False))
@mock.patch.object(replay, 'Replay', autospec=True)
def test_remove_step_tag(self, success, mock_replay):
mock_replay.remove_step_tag.return_value = success
self.handler._replay = mock_replay
self.send_request(
remove_step_tag=client_pb2.RemoveStepTagRequest(index=2, tag='tag'))
mock_replay.remove_step_tag.assert_called_once_with(2, 'tag')
self.assert_response(
remove_step_tag=client_pb2.RemoveStepTagResponse(
index=2, tag='tag', success=success))
def test_remove_step_tag_no_replay(self):
self.send_request(
remove_step_tag=client_pb2.RemoveStepTagRequest(index=2, tag='tag'))
self.assert_response(
remove_step_tag=client_pb2.RemoveStepTagResponse(
index=2, tag='tag', success=False))
@parameterized.named_parameters(
('dir', False, None),
('archive', True, None),
('dir_no_end', False, ['bogus']),
('archive_no_end', True, ['bogus']),
('dir_end', False, ['bogus', 'end']),
('archive_end', True, ['bogus', 'end']),
)
def test_download_episodes(self, archive, end_of_episode_tags):
root_directory = self.create_tempdir()
episode_ids = ['one', 'two']
episodes = {}
for episode_id in episode_ids:
episode, _, _ = test_utils.record_single_episode(
episode_id, 'maze', os.path.join(root_directory, episode_id), 15)
episodes[episode_id] = episode
def get_episode(study_id, session_id, episode_id):
self.assertEqual(study_id, 'study')
self.assertEqual(session_id, 'session')
episode = episodes[episode_id]
episode.step_metadata[5].tags.add().label = 'end'
return episode
self.storage.get_episode.side_effect = get_episode
refs = [
client_pb2.EpisodeRef(
study_id=e.study_id, session_id=e.session_id, episode_id=e.id)
for e in episodes.values()
]
self.send_request(
download_episodes=client_pb2.DownloadEpisodesRequest(
refs=refs, archive=archive,
end_of_episode_tags=end_of_episode_tags))
# There should be two download responses.
((response1,), _), ((response2,), _) = (
self.handler.send_response.call_args_list)
self.assertEqual(
create_response(
download_episodes=client_pb2.DownloadEpisodesResponse(progress=50)),
response1)
download_episodes = response2.download_episodes
self.assertTrue(download_episodes.url.startswith(URL_PREFIX))
self.assertEqual(100, download_episodes.progress)
path = download_episodes.url[len(URL_PREFIX):]
if archive:
# The path should point to the ZIP file.
self.assertTrue(os.path.isfile(path))
temp_dir = self.create_tempdir()
# Extract the contents to a temporary directory.
with zipfile.ZipFile(path) as zf:
zf.extractall(temp_dir)
path = temp_dir.full_path
else:
# The path should point to the directory.
self.assertTrue(os.path.isdir(path))
# Read the generated dataset and do the sanity check.
if end_of_episode_tags and 'end' in end_of_episode_tags:
# Episodes should be truncated if there is a matching end of episode tag
# in the request.
episode_length = [6, 6]
else:
episode_length = [16, 16]
for i in range(2):
spec = study_pb2.Episode.Storage(
pickle=study_pb2.Episode.Storage.Pickle(
path=os.path.join(path, f'{i}.pkl')))
r = self.episode_storage_factory.create_reader(spec)
self.assertLen(r.steps, episode_length[i])
self.assertContainsSubset(
[
# Dataset metadata.
'episode_id',
'rlds_creator:env_id',
'rlds_creator:study_id',
# Episode specific metadata.
'agent_id',
'num_steps',
'total_reward',
],
r.metadata)
def test_download_episodes_different_study(self):
self.storage.get_episode.return_value = study_pb2.Episode()
self.send_request(
download_episodes=client_pb2.DownloadEpisodesRequest(refs=[
client_pb2.EpisodeRef(
study_id='study1', session_id='session', episode_id='1'),
client_pb2.EpisodeRef(
study_id='study2', session_id='session', episode_id='2')
]))
self.assert_error_response(
'Request failed: Episodes are not from the same study.')
def test_download_episodes_missing_episode(self):
self.storage.get_episode.return_value = None
self.send_request(
download_episodes=client_pb2.DownloadEpisodesRequest(refs=[
client_pb2.EpisodeRef(
study_id='study', session_id='session', episode_id='1')
]))
self.assert_error_response(
'Request failed: One of the episodes is missing.')
def test_download_episodes_different_environment(self):
self.storage.get_episode.side_effect = [
study_pb2.Episode(environment_id='env1'),
study_pb2.Episode(environment_id='env2')
]
self.send_request(
download_episodes=client_pb2.DownloadEpisodesRequest(refs=[
client_pb2.EpisodeRef(
study_id='study', session_id='session', episode_id='1'),
client_pb2.EpisodeRef(
study_id='study', session_id='session', episode_id='2')
]))
self.assert_error_response(
'Request failed: Episodes must be from the same environment.')
@parameterized.named_parameters(('success', True), ('failure', False))
def test_delete_episode(self, success):
f = self.create_tempfile()
path = f.full_path
episode = sample_episode(path=path)
self.storage.get_episode.return_value = episode
self.storage.delete_episode.return_value = success
ref = client_pb2.EpisodeRef(
study_id=episode.study_id,
session_id=episode.session_id,
episode_id=episode.id)
self.send_request(delete_episode=client_pb2.DeleteEpisodeRequest(ref=ref))
self.storage.delete_episode.assert_called_once_with(episode.study_id,
episode.session_id,
episode.id)
self.assert_response(
delete_episode=client_pb2.DeleteEpisodeResponse(
ref=ref, success=success))
# Tag directory should be removed if the delete operation was successful.
self.assertIsNot(os.path.exists(path), success)
def test_delete_missing_episode(self):
self.storage.get_episode.return_value = None
self.send_request(
delete_episode=client_pb2.DeleteEpisodeRequest(
ref=client_pb2.EpisodeRef(
study_id='study', session_id='session', episode_id='episode')))
self.assert_error_response('Missing episode.')
def test_delete_episode_not_allowed(self):
episode = sample_episode(email=OTHER_USER_EMAIL)
self.storage.get_episode.return_value = episode
self.send_request(
delete_episode=client_pb2.DeleteEpisodeRequest(
ref=client_pb2.EpisodeRef(
study_id=episode.study_id,
session_id=episode.session_id,
episode_id=episode.id)))
self.assert_error_response('You cannot delete this episode.')
if __name__ == '__main__':
absltest.main()
| google-research/rlds-creator | rlds_creator/environment_handler_test.py | Python | apache-2.0 | 42,579 |
# -*- coding: UTF-8 -*-
# (c) 2013, Ovais Tariq <ovaistariq@gmail.com>
#
# This file is part of mha-helper
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import socket
import smtplib
from email.mime.text import MIMEText
class Email_helper(object):
SMTP_HOST = "smtp.163.com" #更改为真实的smtp服务器
MAIL_USER = "wubingxi@163.com" #更改为自已的邮箱
MAIL_PASS = "xxxxxxxxx" #更改为邮箱密码
SENDER = "%s" % (MAIL_USER)
def __init__(self):
#self._email_sender = smtplib.SMTP(Email_helper.SMTP_HOST)
self._email_sender = smtplib.SMTP()
def send_email(self, subject, msg, to_email_list):
if len(to_email_list) < 1:
return False
#email_msg = MIMEText(msg)
email_msg = MIMEText(msg,_subtype='plain',_charset='gb2312')
email_msg['Subject'] = subject
email_msg['From'] = Email_helper.SENDER
email_msg['To'] = ';'.join(to_email_list)
me = "%s<%s>" %(subject, Email_helper.SENDER)
try:
email_sender = smtplib.SMTP()
email_sender.connect(Email_helper.SMTP_HOST)
email_sender.login(Email_helper.MAIL_USER, Email_helper.MAIL_PASS)
email_sender.sendmail(me, to_email_list,email_msg.as_string())
email_sender.close()
return True
except Exception, e:
print str(e)
print "error"
return False
'''
if __name__ == '__main__':
report_email_list=['wubingxi@gmail.com']
Email_sender = Email_helper()
Email_sender.send_email(subject="mha test", msg="hello world",to_email_list=report_email_list)
'''
| wubx/mha-helper | scripts/lib/email_helper.py | Python | gpl-3.0 | 2,117 |
import socket
import urlparse
import struct
import simplejson
import select
import errno
class TimeoutError(Exception):
pass
class ClosedError(Exception):
pass
class LocalyClosedError(ClosedError):
pass
class RemotelyClosedError(ClosedError):
pass
class Transport:
_HEADER = "!I"
_SAFE_TERMINATION_ERRORS = [errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN, errno.EBADF]
def __init__(self, uri):
self._socket = socket.socket()
try:
self._socket.connect(self._parseTCPURI(uri))
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except:
self._socket.close()
def send(self, data):
self._socket.send(self._lengthHeader(len(data)) + data)
def sendJSON(self, obj):
self.send(simplejson.dumps(obj))
def receive(self, timeout):
headerLength = struct.calcsize(self._HEADER)
header = self._recvAll(headerLength, timeout)
length = struct.unpack(self._HEADER, header)[0]
return self._recvAll(length, timeout)
def receiveJSON(self, timeout):
payload = self.receive(timeout)
return simplejson.loads(payload)
def close(self):
try:
self._socket.shutdown(socket.SHUT_WR)
except socket.error as e:
if e.errno not in self._SAFE_TERMINATION_ERRORS:
raise
try:
self._socket.shutdown(socket.SHUT_RD)
except socket.error as e:
if e.errno not in self._SAFE_TERMINATION_ERRORS:
raise
self._socket.close()
self._socket = None
def closed(self):
return self._socket is None
def _parseTCPURI(self, uri):
hostname, port = urlparse.urlparse(uri).netloc.split(":")
return hostname, int(port)
def _lengthHeader(self, length):
return struct.pack(self._HEADER, length)
def _recvAll(self, length, timeout):
data = ""
while len(data) < length:
remains = length - len(data)
ready, unused, unused = select.select([self._socket], [], [], timeout)
if len(ready) == 0:
raise TimeoutError("Timeout while receiving from server (%f seconds)" % timeout)
sock = self._socket
if sock is None:
raise LocalyClosedError("Session closed locally while still receiving")
segment = sock.recv(remains)
if len(segment) == 0:
raise RemotelyClosedError("Peer terminated connection while still receiving")
data += segment
return data
| noam-stratoscale/rackattack-api | py/rackattack/tcp/transport.py | Python | apache-2.0 | 2,624 |
# -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1443802885.9030464
_enable_loop = True
_template_filename = 'themes/monospace/templates/index.tmpl'
_template_uri = 'index.tmpl'
_source_encoding = 'utf-8'
_exports = ['content']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
ns = runtime.TemplateNamespace('comments', context._clean_inheritance_tokens(), templateuri='comments_helper.tmpl', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, 'comments')] = ns
ns = runtime.TemplateNamespace('helper', context._clean_inheritance_tokens(), templateuri='index_helper.tmpl', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, 'helper')] = ns
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base.tmpl', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
def content():
return render_content(context._locals(__M_locals))
_link = context.get('_link', UNDEFINED)
posts = context.get('posts', UNDEFINED)
messages = context.get('messages', UNDEFINED)
helper = _mako_get_namespace(context, 'helper')
index_teasers = context.get('index_teasers', UNDEFINED)
comments = _mako_get_namespace(context, 'comments')
date_format = context.get('date_format', UNDEFINED)
__M_writer = context.writer()
__M_writer('\n')
__M_writer('\n')
__M_writer('\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):
context['self'].content(**pageargs)
__M_writer('\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def content():
return render_content(context)
_link = context.get('_link', UNDEFINED)
posts = context.get('posts', UNDEFINED)
messages = context.get('messages', UNDEFINED)
helper = _mako_get_namespace(context, 'helper')
index_teasers = context.get('index_teasers', UNDEFINED)
comments = _mako_get_namespace(context, 'comments')
date_format = context.get('date_format', UNDEFINED)
__M_writer = context.writer()
__M_writer('\n')
for post in posts:
__M_writer(' <div class="postbox">\n <h1><a href="')
__M_writer(str(post.permalink()))
__M_writer('">')
__M_writer(str(post.title()))
__M_writer('</a></h1>\n <div class="meta" style="background-color: rgb(234, 234, 234); ">\n <span class="authordate">\n ')
__M_writer(str(messages("Posted:")))
__M_writer(' <time class="published" datetime="')
__M_writer(str(post.date.isoformat()))
__M_writer('">')
__M_writer(str(post.formatted_date(date_format)))
__M_writer('</time>\n </span>\n <br>\n <span class="tags">Tags: \n')
if post.tags:
for tag in post.tags:
__M_writer(' <a class="tag" href="')
__M_writer(str(_link('tag', tag)))
__M_writer('"><span>')
__M_writer(str(tag))
__M_writer('</span></a>\n')
__M_writer(' </span>\n </div>\n ')
__M_writer(str(post.text(teaser_only=index_teasers)))
__M_writer('\n')
if not post.meta('nocomments'):
__M_writer(' ')
__M_writer(str(comments.comment_link(post.permalink(), post.base_path)))
__M_writer('\n')
__M_writer(' </div>\n')
__M_writer(' ')
__M_writer(str(helper.html_pager()))
__M_writer('\n ')
__M_writer(str(comments.comment_link_script()))
__M_writer('\n\t')
__M_writer(str(helper.mathjax_script(posts)))
__M_writer('\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"uri": "index.tmpl", "source_encoding": "utf-8", "filename": "themes/monospace/templates/index.tmpl", "line_map": {"23": 3, "26": 2, "32": 0, "46": 2, "47": 3, "48": 4, "53": 31, "59": 5, "72": 5, "73": 6, "74": 7, "75": 8, "76": 8, "77": 8, "78": 8, "79": 11, "80": 11, "81": 11, "82": 11, "83": 11, "84": 11, "85": 15, "86": 16, "87": 17, "88": 17, "89": 17, "90": 17, "91": 17, "92": 20, "93": 22, "94": 22, "95": 23, "96": 24, "97": 24, "98": 24, "99": 26, "100": 28, "101": 28, "102": 28, "103": 29, "104": 29, "105": 30, "106": 30, "112": 106}}
__M_END_METADATA
"""
| wcmckee/moejobs-site | cache/.mako.tmp/index.tmpl.py | Python | mit | 5,332 |
default_app_config = 'apps.authentication.appconfig.AuthenticationConfig'
| dotKom/onlineweb4 | apps/authentication/__init__.py | Python | mit | 74 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import strfile
import wx
import sys, os
import locale, gettext
from wx.lib.mixins.listctrl import ListCtrlAutoWidthMixin
class TransDictListCtrl(wx.ListCtrl, ListCtrlAutoWidthMixin):
"""ListCtrl for a translation dictionary"""
def __init__(self, parent):
wx.ListCtrl.__init__(self, parent, -1, style=wx.LC_REPORT | wx.SUNKEN_BORDER | wx.LC_VIRTUAL | wx.LC_SINGLE_SEL)
ListCtrlAutoWidthMixin.__init__(self)
self.InsertColumn(0, _("Identification"), width=250)
self.InsertColumn(1, _("Text"))
self.dict = {}
self.update()
def update(self, onlyone=None):
"""
Update the list.
If onlyone is set, only the translation with the key = onlyone will be updated
If not, evrything will be updated
"""
if onlyone is None:
self.DeleteAllItems()
self.SetItemCount(len(self.dict))
self.RefreshItems(0,len(self.dict))
else:
key, val = onlyone
self.dict[key] = val
self.RefreshItem(self.dict.keys().index(key))
def set_dict(self, d):
"""Set the translation dictionary"""
self.dict = d
self.update()
def get_seclection(self):
"""
Get the key of the selected translation.
If no translation is selected, an empty string will be returned
"""
index = self.GetNextItem(-1, wx.LIST_NEXT_ALL, wx.LIST_STATE_SELECTED)
return "" if index == -1 else self.dict.keys()[index]
def OnGetItemText(self, item, col):
"""Basic implementation of the LC_VIRTUAL mechanism"""
if col == 0:
return self.dict.keys()[item]
else:
return self.dict.values()[item]
class new_entry_dialog(wx.Dialog):
"""
Dialog that prompts the user for a new translation key
"""
def __init__(self, parent):
# Layout stuff
wx.Dialog.__init__(self, parent, title=_("New entry"), size=(500,-1))
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(wx.StaticText(self, label=_("Name of your new entry:")), 0, wx.EXPAND | wx.ALL, 5)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.part1 = wx.TextCtrl(self)
self.part2 = wx.TextCtrl(self)
hbox.Add(self.part1, 2, wx.EXPAND, 0)
hbox.Add(wx.StaticText(self, label=":"), 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT | wx.LEFT, 5)
hbox.Add(self.part2, 3, wx.EXPAND, 0)
hbox.SetMinSize((500,-1))
vbox.Add(hbox, 0, wx.EXPAND | wx.ALL, 5)
vbox.Add(self.CreateButtonSizer(wx.OK | wx.CANCEL), 0, wx.EXPAND | wx.ALL, 5)
self.SetSizer(vbox)
vbox.Fit(self)
self.Bind(wx.EVT_BUTTON, self.on_ok, id=wx.OK)
self.Bind(wx.EVT_BUTTON, self.on_cancel, id=wx.CANCEL)
self.Bind(wx.EVT_CLOSE, self.on_cancel)
def get_identifier(self):
"""
This will return the key/identifier.
"""
allowed_chars = map(chr, range(ord("A"), ord("Z")+1) + range(ord("a"), ord("z")+1))+["_","-"]
part1 = "".join(filter(lambda c: c in allowed_chars, self.part1.GetValue()))
part2 = "".join(filter(lambda c: c in allowed_chars, self.part2.GetValue()))
return part1 + ":" + part2
def on_ok(self, event):
self.EndModal(wx.ID_OK)
def on_cancel(self, event):
self.EndModal(wx.ID_CANCEL)
class editor_frame(wx.Frame):
"""
The Frame of the editor.
"""
def __init__(self):
filter_label = _("&Filter")
self.dict = {}
self.changed = False
self.filename = ""
# GUI stuff
the_arts = wx.ArtProvider()
wx.Frame.__init__(self, None, title=_(".str Editor"), size=(500,600))
# menubar
menubar = wx.MenuBar()
m_file = wx.Menu()
m_file.AppendItem(wx.MenuItem(m_file, wx.ID_NEW, _("&New")))
m_file.AppendItem(wx.MenuItem(m_file, wx.ID_OPEN, _("&Open")))
m_file.AppendItem(wx.MenuItem(m_file, wx.ID_SEPARATOR))
m_file.AppendItem(wx.MenuItem(m_file, wx.ID_SAVE, _("&Save")))
m_file.AppendItem(wx.MenuItem(m_file, wx.ID_SAVEAS, _("Save &As")))
m_file.AppendItem(wx.MenuItem(m_file, wx.ID_SEPARATOR))
m_file.AppendItem(wx.MenuItem(m_file, wx.ID_EXIT, _("&Exit")))
m_help = wx.Menu()
m_help.AppendItem(wx.MenuItem(m_help, wx.ID_ABOUT, _("&About")))
menubar.Append(m_file, _("&File"))
menubar.Append(m_help, _("&Help"))
self.SetMenuBar(menubar)
# toolbar
toolbar = self.CreateToolBar()
toolbar.AddLabelTool(wx.ID_NEW, _("New"), the_arts.GetBitmap(wx.ART_NEW, wx.ART_TOOLBAR))
toolbar.AddLabelTool(wx.ID_OPEN, _("Open"), the_arts.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR))
toolbar.AddLabelTool(wx.ID_SAVE, _("Save"), the_arts.GetBitmap(wx.ART_FILE_SAVE, wx.ART_TOOLBAR))
toolbar.Realize()
# shortcuts
jump_to_filter_id = wx.NewId()
filter_shortcut_char = ord(filter_label[filter_label.find("&")+1].lower())
shortcuts = wx.AcceleratorTable([
(wx.ACCEL_CTRL, ord('s'), wx.ID_SAVE),
(wx.ACCEL_CTRL, ord('o'), wx.ID_OPEN),
(wx.ACCEL_CTRL, ord('n'), wx.ID_NEW),
(wx.ACCEL_CTRL, ord('q'), wx.ID_EXIT),
(wx.ACCEL_CTRL, filter_shortcut_char, jump_to_filter_id)
])
self.SetAcceleratorTable(shortcuts)
# the "real" GUI
self.mainpanel = wx.Panel(self, -1)
vbox = wx.BoxSizer(wx.VERTICAL)
filter_hbox = wx.BoxSizer(wx.HORIZONTAL)
self.input_filter = wx.TextCtrl(self.mainpanel)
filter_hbox.Add(wx.StaticText(self.mainpanel, label=filter_label), 0, wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, 5)
filter_hbox.Add(self.input_filter, 1, wx.EXPAND, 0)
vbox.Add(filter_hbox, 0, wx.EXPAND | wx.ALL, 5)
trl_hbox = wx.BoxSizer(wx.HORIZONTAL)
self.transl_list = TransDictListCtrl(self.mainpanel)
trl_vbox = wx.BoxSizer(wx.VERTICAL)
trl_add_btn = wx.BitmapButton(self.mainpanel, bitmap=the_arts.GetBitmap(wx.ART_ADD_BOOKMARK, wx.ART_BUTTON))
trl_del_btn = wx.BitmapButton(self.mainpanel, bitmap=the_arts.GetBitmap(wx.ART_DEL_BOOKMARK, wx.ART_BUTTON))
trl_vbox.Add(trl_add_btn, 0, wx.EXPAND | wx.BOTTOM, 5)
trl_vbox.Add(trl_del_btn, 0, wx.EXPAND, 0)
trl_hbox.Add(self.transl_list, 1, wx.EXPAND, wx.RIGHT, 5)
trl_hbox.Add(trl_vbox, 0, wx.EXPAND, 0)
vbox.Add(trl_hbox, 3, wx.EXPAND | wx.ALL, 5)
vbox.Add(wx.StaticLine(self.mainpanel, style=wx.LI_HORIZONTAL),.0, wx.EXPAND | wx.ALL, 5)
vbox.Add(wx.StaticText(self.mainpanel, label=_("Text:")), 0, wx.EXPAND | wx.ALL, 5)
self.trans_text_ctrl = wx.TextCtrl(self.mainpanel, style=wx.TE_MULTILINE)
vbox.Add(self.trans_text_ctrl, 2, wx.EXPAND | wx.ALL, 5)
self.mainpanel.SetSizer(vbox)
cool_icon = wx.Icon(os.path.join(scriptdir, "wm-icon.ico"), wx.BITMAP_TYPE_ICO)
self.SetIcon(cool_icon)
# Binding events
self.Bind(wx.EVT_MENU, self.on_new, id=wx.ID_NEW)
self.Bind(wx.EVT_MENU, self.on_open, id=wx.ID_OPEN)
self.Bind(wx.EVT_MENU, self.on_save, id=wx.ID_SAVE)
self.Bind(wx.EVT_MENU, self.on_saveas, id=wx.ID_SAVEAS)
self.Bind(wx.EVT_MENU, self.on_close, id=wx.ID_EXIT)
self.Bind(wx.EVT_MENU, self.on_about, id=wx.ID_ABOUT)
self.Bind(wx.EVT_TEXT, self.on_filter, id=self.input_filter.GetId())
self.Bind(wx.EVT_TEXT, self.on_textedit, id=self.trans_text_ctrl.GetId())
self.Bind(wx.EVT_BUTTON, self.on_add, id=trl_add_btn.GetId())
self.Bind(wx.EVT_BUTTON, self.on_del, id=trl_del_btn.GetId())
self.Bind(wx.EVT_MENU, self.on_jmp_filter, id=jump_to_filter_id)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.on_listsel, id=self.transl_list.GetId())
self.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.on_listunsel, id=self.transl_list.GetId())
def really_discard(self):
"""
If the content was modified, the user will be asked if he really wants to discard the changes
This will return True if the calling function can continue normal work.
"""
if self.changed:
dialog = wx.MessageDialog(None,
message=_("You did not save your changes. Continue?"),
caption=_("Unsaved changes"),
style=wx.ICON_QUESTION | wx.YES_NO)
user_ret = dialog.ShowModal()
dialog.Destroy()
return user_ret == wx.ID_YES
return True
def populate_list(self, autoselect=None):
"""
Populating the translation list with the filtered self.dict
If autoselect is not None, the given translation will be selected and focussed.
"""
filter_str = self.input_filter.GetValue().lower()
f_dict = {}
for key in self.dict.iterkeys():
if filter_str != '':
if (filter_str not in key.lower()) and (filter_str not in self.dict[key].lower()):
continue
f_dict[key] = self.dict[key]
self.transl_list.set_dict(f_dict)
self.trans_text_ctrl.SetValue("")
if autoselect is not None:
self.transl_list.Select(f_dict.keys().index(autoselect))
self.transl_list.Focus(f_dict.keys().index(autoselect))
def form_init(self):
"""
Initializes / clears all formulars
"""
self.populate_list()
self.input_filter.SetValue("")
def on_close(self, event):
if self.really_discard():
self.Destroy()
def on_new(self, event):
if self.really_discard():
self.dict = {}
self.changed = False
self.filename = ""
self.form_init()
def load_file(self, new_fn):
if new_fn != "":
try:
fp = open(new_fn, "rb")
temp_dict = strfile.dict_parse(fp.read())
self.dict = temp_dict
fp.close()
self.filename = new_fn
self.changed = False
self.form_init()
except:
del dialog
dialog = wx.MessageDialog(None,
message=_("Could not open file.\nUsually that means that the file is invalid or you do not have enough privileges."),
caption=_("Could not open file"),
style=wx.ICON_ERROR | wx.OK)
dialog.ShowModal()
dialog.Destroy()
def on_open(self, event):
if self.really_discard():
new_fn = ""
dialog = wx.FileDialog(None, _("Choose a file"), wildcard=fd_wildcard, style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if dialog.ShowModal() == wx.ID_OK:
new_fn = dialog.GetPath()
dialog.Destroy()
self.load_file(new_fn)
def save_file(self, force_path=False):
saveto = ""
if force_path or self.filename=='':
if self.filename == "":
dialog = wx.FileDialog(None,
message=_("Save to"),
wildcard=fd_wildcard,
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
else:
def_dir, def_file = os.path.split(self.filename)
dialog = wx.FileDialog(None,
message=_("Save to"),
wildcard=fd_wildcard,
defaultDir=def_dir,
defaultFile=def_file,
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dialog.ShowModal() == wx.ID_OK:
saveto = dialog.GetPath()
else:
saveto = ""
dialog.Destroy()
else:
saveto = self.filename
if saveto != "":
try:
fp = open(saveto, "w")
strfile.dict_gen(self.dict, fp)
fp.close()
except:
err_dialog = wx.MessageDialog(
None,
message=_("Can not write to file \"%s\".\nUsually that means that you do not have enough privileges or you ran out of disc memory.") % saveto,
caption=_("Can not save file."),
style=wx.ICON_ERROR | wx.OK)
err_dialog.ShowModal()
err_dialog.Close()
self.changed = False
def on_save(self, event):
self.save_file()
def on_saveas(self, event):
self.save_file(True)
def on_about(self, event):
description = _(".str Editor is a tool for editing the .str files of EA's BFME2")
licence = u"""Copyright (c) 2010-2011 \"Die Völker Mittelerdes\" Modding Crew
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."""
info = wx.AboutDialogInfo()
info.SetName(_('.str Editor'))
info.SetVersion('0.1')
info.SetDescription(description)
info.SetCopyright(u'(C) 2010-2011 \"Die Völker Mittelerdes\" Modding Crew')
info.SetLicence(licence)
info.AddDeveloper('Laria Carolin Chabowski')
info.SetIcon(wx.Icon(os.path.join(scriptdir, 'icon.png'), wx.BITMAP_TYPE_PNG))
wx.AboutBox(info)
def on_listsel(self, event):
self.trans_text_ctrl.SetValue(strfile.unescape(self.dict[self.transl_list.get_seclection()]))
def on_listunsel(self, event):
self.trans_text_ctrl.SetValue("")
def on_filter(self, event):
if event.GetString() != "":
self.populate_list()
if len(self.transl_list.dict) == 0 and event.GetString() != "":
self.input_filter.SetBackgroundColour(wx.Colour(255,100,100))
else:
self.input_filter.SetBackgroundColour(wx.NullColour)
self.input_filter.Refresh()
def on_textedit(self, event):
key = self.transl_list.get_seclection()
if key != "":
newval = strfile.escape(self.trans_text_ctrl.GetValue())
self.dict[key] = newval
self.transl_list.update((key, newval))
self.changed = True
def on_add(self, event):
addthis = ":"
while addthis == ":":
dialog = new_entry_dialog(None)
if dialog.ShowModal() != wx.ID_OK:
dialog.Destroy()
return
addthis = dialog.get_identifier()
dialog.Destroy()
if addthis in self.dict.keys():
addthis = ':'
del dialog
dialog = wx.MessageDialog(
None,
message=_("This name is already in use. Choose another one."),
caption=_("Invalid name"),
style=wx.ICON_WARNING | wx.OK
)
dialog.ShowModal()
dialog.Destroy()
del dialog
self.changed = True
self.dict[addthis] = ""
self.input_filter.SetValue("")
self.populate_list(addthis)
def on_del(self, event):
delthis = self.transl_list.get_seclection()
if delthis != "":
del self.dict[delthis]
self.changed = True
self.populate_list()
def on_jmp_filter(self, event):
self.input_filter.SetFocus()
class dotstr_edit_app(wx.App):
def OnInit(self):
app_frame = editor_frame()
app_frame.Show()
if len(sys.argv) > 1:
app_frame.load_file(sys.argv[1])
self.SetTopWindow(app_frame)
return True
if __name__ == '__main__':
# get directory of script / executable
scriptdir = os.path.dirname(unicode(
sys.executable if hasattr(sys,"frozen") and sys.frozen in ("windows_exe", "console_exe") else __file__,
sys.getfilesystemencoding()))
# init localisation
if os.name == 'nt':
# windows hack for locale setting
lang = os.getenv('LANG')
if lang is None:
default_lang, default_enc = locale.getdefaultlocale()
if default_lang:
lang = default_lang
if lang:
os.environ['LANG'] = lang
locale.setlocale(locale.LC_ALL, '')
translator = gettext.translation('dotstr_edit', os.path.join(scriptdir, 'locale'), fallback=True)
translator.install(True)
fd_wildcard = _("str File")+"|*.str|*.*|*.*"
# Start application
app = dotstr_edit_app()
app.MainLoop()
| kch42/dotstr_edit | dotstr_edit.py | Python | mit | 15,425 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2016, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: unarchive
version_added: 1.4
short_description: Unpacks an archive after (optionally) copying it from the local machine.
extends_documentation_fragment: files
description:
- The M(unarchive) module unpacks an archive. By default, it will copy the source file from the local system to the target before unpacking - set remote_src=yes to unpack an archive which already exists on the target..
options:
src:
description:
- If remote_src=no (default), local path to archive file to copy to the target server; can be absolute or relative. If remote_src=yes, path on the target server to existing archive file to unpack.
- If remote_src=yes and src contains ://, the remote machine will download the file from the url first. (version_added 2.0)
required: true
default: null
dest:
description:
- Remote absolute path where the archive should be unpacked
required: true
default: null
copy:
description:
- "If true, the file is copied from local 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine."
- "This option has been deprecated in favor of C(remote_src)"
- "This option is mutually exclusive with C(remote_src)."
required: false
choices: [ "yes", "no" ]
default: "yes"
creates:
description:
- a filename, when it already exists, this step will B(not) be run.
required: no
default: null
version_added: "1.6"
list_files:
description:
- If set to True, return the list of files that are contained in the tarball.
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "2.0"
exclude:
description:
- List the directory and file entries that you would like to exclude from the unarchive action.
required: false
default: []
version_added: "2.1"
keep_newer:
description:
- Do not replace existing files that are newer than files from the archive.
required: false
default: no
version_added: "2.1"
extra_opts:
description:
- Specify additional options by passing in an array.
default:
required: false
version_added: "2.1"
remote_src:
description:
- "Set to C(yes) to indicate the archived file is already on the remote system and not local to the Ansible controller."
- "This option is mutually exclusive with C(copy)."
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.2"
validate_certs:
description:
- This only applies if using a https url as the source of the file.
- This should only set to C(no) used on personally controlled sites using self-signed cer
- Prior to 2.2 the code worked as if this was set to C(yes).
required: false
default: "yes"
choices: ["yes", "no"]
version_added: "2.2"
author: "Dag Wieers (@dagwieers)"
todo:
- re-implement tar support using native tarfile module
- re-implement zip support using native zipfile module
notes:
- requires C(gtar)/C(unzip) command on target host
- can handle I(.zip) files using C(unzip) as well as I(.tar), I(.tar.gz), I(.tar.bz2) and I(.tar.xz) files using C(gtar)
- uses gtar's C(--diff arg) to calculate if changed or not. If this C(arg) is not
supported, it will always unpack the archive
- existing files/directories in the destination which are not in the archive
are not touched. This is the same behavior as a normal archive extraction
- existing files/directories in the destination which are not in the archive
are ignored for purposes of deciding if the archive should be unpacked or not
'''
EXAMPLES = '''
# Example from Ansible Playbooks
- unarchive:
src: foo.tgz
dest: /var/lib/foo
# Unarchive a file that is already on the remote machine
- unarchive:
src: /tmp/foo.zip
dest: /usr/local/bin
remote_src: yes
# Unarchive a file that needs to be downloaded (added in 2.0)
- unarchive:
src: "https://example.com/example.zip"
dest: /usr/local/bin
remote_src: yes
'''
import re
import os
import stat
import pwd
import grp
import datetime
import time
import binascii
import codecs
from zipfile import ZipFile, BadZipfile
from ansible.module_utils._text import to_text
try: # python 3.3+
from shlex import quote
except ImportError: # older python
from pipes import quote
# String from tar that shows the tar contents are different from the
# filesystem
OWNER_DIFF_RE = re.compile(r': Uid differs$')
GROUP_DIFF_RE = re.compile(r': Gid differs$')
MODE_DIFF_RE = re.compile(r': Mode differs$')
MOD_TIME_DIFF_RE = re.compile(r': Mod time differs$')
#NEWER_DIFF_RE = re.compile(r' is newer or same age.$')
EMPTY_FILE_RE = re.compile(r': : Warning: Cannot stat: No such file or directory$')
MISSING_FILE_RE = re.compile(r': Warning: Cannot stat: No such file or directory$')
ZIP_FILE_MODE_RE = re.compile(r'([r-][w-][SsTtx-]){3}')
# When downloading an archive, how much of the archive to download before
# saving to a tempfile (64k)
BUFSIZE = 65536
def crc32(path):
''' Return a CRC32 checksum of a file '''
return binascii.crc32(open(path).read()) & 0xffffffff
def shell_escape(string):
''' Quote meta-characters in the args for the unix shell '''
return re.sub(r'([^A-Za-z0-9_])', r'\\\1', string)
class UnarchiveError(Exception):
pass
# class to handle .zip files
class ZipArchive(object):
def __init__(self, src, dest, file_args, module):
self.src = src
self.dest = dest
self.file_args = file_args
self.opts = module.params['extra_opts']
self.module = module
self.excludes = module.params['exclude']
self.includes = []
self.cmd_path = self.module.get_bin_path('unzip')
self._files_in_archive = []
self._infodict = dict()
def _permstr_to_octal(self, modestr, umask):
''' Convert a Unix permission string (rw-r--r--) into a mode (0644) '''
revstr = modestr[::-1]
mode = 0
for j in range(0, 3):
for i in range(0, 3):
if revstr[i+3*j] in ['r', 'w', 'x', 's', 't']:
mode += 2**(i+3*j)
# The unzip utility does not support setting the stST bits
# if revstr[i+3*j] in ['s', 't', 'S', 'T' ]:
# mode += 2**(9+j)
return ( mode & ~umask )
def _legacy_file_list(self, force_refresh=False):
unzip_bin = self.module.get_bin_path('unzip')
if not unzip_bin:
raise UnarchiveError('Python Zipfile cannot read %s and unzip not found' % self.src)
rc, out, err = self.module.run_command([unzip_bin, '-v', self.src])
if rc:
raise UnarchiveError('Neither python zipfile nor unzip can read %s' % self.src)
for line in out.splitlines()[3:-2]:
fields = line.split(None, 7)
self._files_in_archive.append(fields[7])
self._infodict[fields[7]] = long(fields[6])
def _crc32(self, path):
if self._infodict:
return self._infodict[path]
try:
archive = ZipFile(self.src)
except BadZipfile:
e = get_exception()
if e.args[0].lower().startswith('bad magic number'):
# Python2.4 can't handle zipfiles with > 64K files. Try using
# /usr/bin/unzip instead
self._legacy_file_list()
else:
raise
else:
try:
for item in archive.infolist():
self._infodict[item.filename] = long(item.CRC)
except:
archive.close()
raise UnarchiveError('Unable to list files in the archive')
return self._infodict[path]
@property
def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
return self._files_in_archive
self._files_in_archive = []
try:
archive = ZipFile(self.src)
except BadZipfile:
e = get_exception()
if e.args[0].lower().startswith('bad magic number'):
# Python2.4 can't handle zipfiles with > 64K files. Try using
# /usr/bin/unzip instead
self._legacy_file_list(force_refresh)
else:
raise
else:
try:
for member in archive.namelist():
if member not in self.excludes:
self._files_in_archive.append(to_native(member))
except:
archive.close()
raise UnarchiveError('Unable to list files in the archive')
archive.close()
return self._files_in_archive
def is_unarchived(self):
cmd = [ self.cmd_path, '-ZT', '-s', self.src ]
if self.excludes:
cmd.extend([ ' -x ', ] + self.excludes)
rc, out, err = self.module.run_command(cmd)
old_out = out
diff = ''
out = ''
if rc == 0:
unarchived = True
else:
unarchived = False
# Get some information related to user/group ownership
umask = os.umask(0)
os.umask(umask)
# Get current user and group information
groups = os.getgroups()
run_uid = os.getuid()
run_gid = os.getgid()
try:
run_owner = pwd.getpwuid(run_uid).pw_name
except:
run_owner = run_uid
try:
run_group = grp.getgrgid(run_gid).gr_name
except:
run_group = run_gid
# Get future user ownership
fut_owner = fut_uid = None
if self.file_args['owner']:
try:
tpw = pwd.getpwname(self.file_args['owner'])
except:
try:
tpw = pwd.getpwuid(self.file_args['owner'])
except:
tpw = pwd.getpwuid(run_uid)
fut_owner = tpw.pw_name
fut_uid = tpw.pw_uid
else:
try:
fut_owner = run_owner
except:
pass
fut_uid = run_uid
# Get future group ownership
fut_group = fut_gid = None
if self.file_args['group']:
try:
tgr = grp.getgrnam(self.file_args['group'])
except:
try:
tgr = grp.getgrgid(self.file_args['group'])
except:
tgr = grp.getgrgid(run_gid)
fut_group = tgr.gr_name
fut_gid = tgr.gr_gid
else:
try:
fut_group = run_group
except:
pass
fut_gid = run_gid
for line in old_out.splitlines():
change = False
pcs = line.split(None, 7)
if len(pcs) != 8:
# Too few fields... probably a piece of the header or footer
continue
# Check first and seventh field in order to skip header/footer
if len(pcs[0]) != 7 and len(pcs[0]) != 10: continue
if len(pcs[6]) != 15: continue
# Possible entries:
# -rw-rws--- 1.9 unx 2802 t- defX 11-Aug-91 13:48 perms.2660
# -rw-a-- 1.0 hpf 5358 Tl i4:3 4-Dec-91 11:33 longfilename.hpfs
# -r--ahs 1.1 fat 4096 b- i4:2 14-Jul-91 12:58 EA DATA. SF
# --w------- 1.0 mac 17357 bx i8:2 4-May-92 04:02 unzip.macr
if pcs[0][0] not in 'dl-?' or not frozenset(pcs[0][1:]).issubset('rwxstah-'):
continue
ztype = pcs[0][0]
permstr = pcs[0][1:]
version = pcs[1]
ostype = pcs[2]
size = int(pcs[3])
path = to_text(pcs[7], errors='surrogate_or_strict')
# Skip excluded files
if path in self.excludes:
out += 'Path %s is excluded on request\n' % path
continue
# Itemized change requires L for symlink
if path[-1] == '/':
if ztype != 'd':
err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % (path, ztype)
ftype = 'd'
elif ztype == 'l':
ftype = 'L'
elif ztype == '-':
ftype = 'f'
elif ztype == '?':
ftype = 'f'
# Some files may be storing FAT permissions, not Unix permissions
if len(permstr) == 6:
if path[-1] == '/':
permstr = 'rwxrwxrwx'
elif permstr == 'rwx---':
permstr = 'rwxrwxrwx'
else:
permstr = 'rw-rw-rw-'
# Test string conformity
if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):
raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr)
# DEBUG
# err += "%s%s %10d %s\n" % (ztype, permstr, size, path)
dest = os.path.join(self.dest, path)
try:
st = os.lstat(dest)
except:
change = True
self.includes.append(path)
err += 'Path %s is missing\n' % path
diff += '>%s++++++.?? %s\n' % (ftype, path)
continue
# Compare file types
if ftype == 'd' and not stat.S_ISDIR(st.st_mode):
change = True
self.includes.append(path)
err += 'File %s already exists, but not as a directory\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
if ftype == 'f' and not stat.S_ISREG(st.st_mode):
change = True
unarchived = False
self.includes.append(path)
err += 'Directory %s already exists, but not as a regular file\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
if ftype == 'L' and not stat.S_ISLNK(st.st_mode):
change = True
self.includes.append(path)
err += 'Directory %s already exists, but not as a symlink\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
itemized = list('.%s.......??' % ftype)
# Note: this timestamp calculation has a rounding error
# somewhere... unzip and this timestamp can be one second off
# When that happens, we report a change and re-unzip the file
dt_object = datetime.datetime(*(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))
timestamp = time.mktime(dt_object.timetuple())
# Compare file timestamps
if stat.S_ISREG(st.st_mode):
if self.module.params['keep_newer']:
if timestamp > st.st_mtime:
change = True
self.includes.append(path)
err += 'File %s is older, replacing file\n' % path
itemized[4] = 't'
elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:
# Add to excluded files, ignore other changes
out += 'File %s is newer, excluding file\n' % path
self.excludes.append(path)
continue
else:
if timestamp != st.st_mtime:
change = True
self.includes.append(path)
err += 'File %s differs in mtime (%f vs %f)\n' % (path, timestamp, st.st_mtime)
itemized[4] = 't'
# Compare file sizes
if stat.S_ISREG(st.st_mode) and size != st.st_size:
change = True
err += 'File %s differs in size (%d vs %d)\n' % (path, size, st.st_size)
itemized[3] = 's'
# Compare file checksums
if stat.S_ISREG(st.st_mode):
crc = crc32(dest)
if crc != self._crc32(path):
change = True
err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % (path, self._crc32(path), crc)
itemized[2] = 'c'
# Compare file permissions
# Do not handle permissions of symlinks
if ftype != 'L':
# Use the new mode provided with the action, if there is one
if self.file_args['mode']:
if isinstance(self.file_args['mode'], int):
mode = self.file_args['mode']
else:
try:
mode = int(self.file_args['mode'], 8)
except Exception:
e = get_exception()
self.module.fail_json(path=path, msg="mode %(mode)s must be in octal form" % self.file_args, details=str(e))
# Only special files require no umask-handling
elif ztype == '?':
mode = self._permstr_to_octal(permstr, 0)
else:
mode = self._permstr_to_octal(permstr, umask)
if mode != stat.S_IMODE(st.st_mode):
change = True
itemized[5] = 'p'
err += 'Path %s differs in permissions (%o vs %o)\n' % (path, mode, stat.S_IMODE(st.st_mode))
# Compare file user ownership
owner = uid = None
try:
owner = pwd.getpwuid(st.st_uid).pw_name
except:
uid = st.st_uid
# If we are not root and requested owner is not our user, fail
if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):
raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner))
if owner and owner != fut_owner:
change = True
err += 'Path %s is owned by user %s, not by user %s as expected\n' % (path, owner, fut_owner)
itemized[6] = 'o'
elif uid and uid != fut_uid:
change = True
err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % (path, uid, fut_uid)
itemized[6] = 'o'
# Compare file group ownership
group = gid = None
try:
group = grp.getgrgid(st.st_gid).gr_name
except:
gid = st.st_gid
if run_uid != 0 and fut_gid not in groups:
raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner))
if group and group != fut_group:
change = True
err += 'Path %s is owned by group %s, not by group %s as expected\n' % (path, group, fut_group)
itemized[6] = 'g'
elif gid and gid != fut_gid:
change = True
err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % (path, gid, fut_gid)
itemized[6] = 'g'
# Register changed files and finalize diff output
if change:
if path not in self.includes:
self.includes.append(path)
diff += '%s %s\n' % (''.join(itemized), path)
if self.includes:
unarchived = False
# DEBUG
# out = old_out + out
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)
def unarchive(self):
cmd = [ self.cmd_path, '-o', self.src ]
if self.opts:
cmd.extend(self.opts)
# NOTE: Including (changed) files as arguments is problematic (limits on command line/arguments)
# if self.includes:
# NOTE: Command unzip has this strange behaviour where it expects quoted filenames to also be escaped
# cmd.extend(map(shell_escape, self.includes))
if self.excludes:
cmd.extend([ '-x' ] + self.excludes)
cmd.extend([ '-d', self.dest ])
rc, out, err = self.module.run_command(cmd)
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
if not self.cmd_path:
return False, 'Command "unzip" not found.'
cmd = [ self.cmd_path, '-l', self.src ]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return True, None
return False, 'Command "%s" could not handle archive.' % self.cmd_path
# class to handle gzipped tar files
class TgzArchive(object):
def __init__(self, src, dest, file_args, module):
self.src = src
self.dest = dest
self.file_args = file_args
self.opts = module.params['extra_opts']
self.module = module
if self.module.check_mode:
self.module.exit_json(skipped=True, msg="remote module (%s) does not support check mode when using gtar" % self.module._name)
self.excludes = [ path.rstrip('/') for path in self.module.params['exclude']]
# Prefer gtar (GNU tar) as it supports the compression options -z, -j and -J
self.cmd_path = self.module.get_bin_path('gtar', None)
if not self.cmd_path:
# Fallback to tar
self.cmd_path = self.module.get_bin_path('tar')
self.zipflag = '-z'
self._files_in_archive = []
if self.cmd_path:
self.tar_type = self._get_tar_type()
else:
self.tar_type = None
def _get_tar_type(self):
cmd = [self.cmd_path, '--version']
(rc, out, err) = self.module.run_command(cmd)
tar_type = None
if out.startswith('bsdtar'):
tar_type = 'bsd'
elif out.startswith('tar') and 'GNU' in out:
tar_type = 'gnu'
return tar_type
@property
def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
return self._files_in_archive
cmd = [ self.cmd_path, '--list', '-C', self.dest ]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend([ '--show-transformed-names' ] + self.opts)
if self.excludes:
cmd.extend([ '--exclude=' + quote(f) for f in self.excludes ])
cmd.extend([ '-f', self.src ])
rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
if rc != 0:
raise UnarchiveError('Unable to list files in the archive')
for filename in out.splitlines():
# Compensate for locale-related problems in gtar output (octal unicode representation) #11348
# filename = filename.decode('string_escape')
filename = codecs.escape_decode(filename)[0]
if filename and filename not in self.excludes:
self._files_in_archive.append(to_native(filename))
return self._files_in_archive
def is_unarchived(self):
cmd = [ self.cmd_path, '--diff', '-C', self.dest ]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend([ '--show-transformed-names' ] + self.opts)
if self.file_args['owner']:
cmd.append('--owner=' + quote(self.file_args['owner']))
if self.file_args['group']:
cmd.append('--group=' + quote(self.file_args['group']))
if self.module.params['keep_newer']:
cmd.append('--keep-newer-files')
if self.excludes:
cmd.extend([ '--exclude=' + quote(f) for f in self.excludes ])
cmd.extend([ '-f', self.src ])
rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
# Check whether the differences are in something that we're
# setting anyway
# What is different
unarchived = True
old_out = out
out = ''
run_uid = os.getuid()
# When unarchiving as a user, or when owner/group/mode is supplied --diff is insufficient
# Only way to be sure is to check request with what is on disk (as we do for zip)
# Leave this up to set_fs_attributes_if_different() instead of inducing a (false) change
for line in old_out.splitlines() + err.splitlines():
# FIXME: Remove the bogus lines from error-output as well !
# Ignore bogus errors on empty filenames (when using --split-component)
if EMPTY_FILE_RE.search(line):
continue
if run_uid == 0 and not self.file_args['owner'] and OWNER_DIFF_RE.search(line):
out += line + '\n'
if run_uid == 0 and not self.file_args['group'] and GROUP_DIFF_RE.search(line):
out += line + '\n'
if not self.file_args['mode'] and MODE_DIFF_RE.search(line):
out += line + '\n'
if MOD_TIME_DIFF_RE.search(line):
out += line + '\n'
if MISSING_FILE_RE.search(line):
out += line + '\n'
if out:
unarchived = False
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
def unarchive(self):
cmd = [ self.cmd_path, '--extract', '-C', self.dest ]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend([ '--show-transformed-names' ] + self.opts)
if self.file_args['owner']:
cmd.append('--owner=' + quote(self.file_args['owner']))
if self.file_args['group']:
cmd.append('--group=' + quote(self.file_args['group']))
if self.module.params['keep_newer']:
cmd.append('--keep-newer-files')
if self.excludes:
cmd.extend([ '--exclude=' + quote(f) for f in self.excludes ])
cmd.extend([ '-f', self.src ])
rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
if not self.cmd_path:
return False, 'Commands "gtar" and "tar" not found.'
if self.tar_type != 'gnu':
return False, 'Command "%s" detected as tar type %s. GNU tar required.' % (self.cmd_path, self.tar_type)
try:
if self.files_in_archive:
return True, None
except UnarchiveError:
return False, 'Command "%s" could not handle archive.' % self.cmd_path
# Errors and no files in archive assume that we weren't able to
# properly unarchive it
return False, 'Command "%s" found no files in archive.' % self.cmd_path
# class to handle tar files that aren't compressed
class TarArchive(TgzArchive):
def __init__(self, src, dest, file_args, module):
super(TarArchive, self).__init__(src, dest, file_args, module)
# argument to tar
self.zipflag = ''
# class to handle bzip2 compressed tar files
class TarBzipArchive(TgzArchive):
def __init__(self, src, dest, file_args, module):
super(TarBzipArchive, self).__init__(src, dest, file_args, module)
self.zipflag = '-j'
# class to handle xz compressed tar files
class TarXzArchive(TgzArchive):
def __init__(self, src, dest, file_args, module):
super(TarXzArchive, self).__init__(src, dest, file_args, module)
self.zipflag = '-J'
# try handlers in order and return the one that works or bail if none work
def pick_handler(src, dest, file_args, module):
handlers = [ZipArchive, TgzArchive, TarArchive, TarBzipArchive, TarXzArchive]
reasons = set()
for handler in handlers:
obj = handler(src, dest, file_args, module)
(can_handle, reason) = obj.can_handle_archive()
if can_handle:
return obj
reasons.add(reason)
reason_msg = ' '.join(reasons)
module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed. %s' % (src, reason_msg))
def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec = dict(
src = dict(required=True, type='path'),
original_basename = dict(required=False, type='str'), # used to handle 'dest is a directory' via template, a slight hack
dest = dict(required=True, type='path'),
copy = dict(required=False, default=True, type='bool'),
remote_src = dict(required=False, default=False, type='bool'),
creates = dict(required=False, type='path'),
list_files = dict(required=False, default=False, type='bool'),
keep_newer = dict(required=False, default=False, type='bool'),
exclude = dict(required=False, default=[], type='list'),
extra_opts = dict(required=False, default=[], type='list'),
validate_certs = dict(required=False, default=True, type='bool'),
),
add_file_common_args = True,
mutually_exclusive = [("copy", "remote_src"),],
# check-mode only works for zip files, we cover that later
supports_check_mode = True,
)
src = os.path.expanduser(module.params['src'])
dest = os.path.expanduser(module.params['dest'])
copy = module.params['copy']
remote_src = module.params['remote_src']
file_args = module.load_file_common_arguments(module.params)
# did tar file arrive?
if not os.path.exists(src):
if not remote_src and copy:
module.fail_json(msg="Source '%s' failed to transfer" % src)
# If copy=false, and src= contains ://, try and download the file to a temp directory.
elif '://' in src:
tempdir = os.path.dirname(os.path.realpath(__file__))
package = os.path.join(tempdir, str(src.rsplit('/', 1)[1]))
try:
rsp, info = fetch_url(module, src)
# If download fails, raise a proper exception
if rsp is None:
raise Exception(info['msg'])
f = open(package, 'w')
# Read 1kb at a time to save on ram
while True:
data = rsp.read(BUFSIZE)
if data == "":
break # End of file, break while loop
f.write(data)
f.close()
src = package
except Exception:
e = get_exception()
module.fail_json(msg="Failure downloading %s, %s" % (src, e))
else:
module.fail_json(msg="Source '%s' does not exist" % src)
if not os.access(src, os.R_OK):
module.fail_json(msg="Source '%s' not readable" % src)
# skip working with 0 size archives
try:
if os.path.getsize(src) == 0:
module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src)
except Exception:
e = get_exception()
module.fail_json(msg="Source '%s' not readable" % src)
# is dest OK to receive tar file?
if not os.path.isdir(dest):
module.fail_json(msg="Destination '%s' is not a directory" % dest)
handler = pick_handler(src, dest, file_args, module)
res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)
# do we need to do unpack?
check_results = handler.is_unarchived()
# DEBUG
# res_args['check_results'] = check_results
if module.check_mode:
res_args['changed'] = not check_results['unarchived']
elif check_results['unarchived']:
res_args['changed'] = False
else:
# do the unpack
try:
res_args['extract_results'] = handler.unarchive()
if res_args['extract_results']['rc'] != 0:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
except IOError:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
else:
res_args['changed'] = True
# Get diff if required
if check_results.get('diff', False):
res_args['diff'] = { 'prepared': check_results['diff'] }
# Run only if we found differences (idempotence) or diff was missing
if res_args.get('diff', True) and not module.check_mode:
# do we need to change perms?
for filename in handler.files_in_archive:
file_args['path'] = os.path.join(dest, filename)
try:
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])
except (IOError, OSError):
e = get_exception()
module.fail_json(msg="Unexpected error when accessing exploded file: %s" % str(e), **res_args)
if module.params['list_files']:
res_args['files'] = handler.files_in_archive
module.exit_json(**res_args)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils._text import to_native
if __name__ == '__main__':
main()
| mkrupcale/ansible | lib/ansible/modules/files/unarchive.py | Python | gpl-3.0 | 34,616 |
import os
def job_dir(settings):
path = settings['JOBDIR']
if path and not os.path.exists(path):
os.makedirs(path)
return path
| bdh1011/wau | venv/lib/python2.7/site-packages/scrapy/utils/job.py | Python | mit | 148 |
import datetime
from combine import factorial
def main(num):
return sum(map(int, str(factorial(num))))
try:
para = int(input())
except:
para = 100
beg = datetime.datetime.now()
ans = main(para)
end = datetime.datetime.now()
print("answer:", ans)
print("time:", end - beg)
| nowsword/ProjectEuler | p020.py | Python | gpl-3.0 | 288 |
import os
from kubeflow.kubeflow.crud_backend import config, logging
from ..common import create_app as create_default_app
from .routes import bp as routes_bp
log = logging.getLogger(__name__)
def create_app(name=__name__, cfg: config.Config = None):
cfg = config.Config() if cfg is None else cfg
# Properly set the static serving directory
static_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),
"static")
app = create_default_app(name, static_dir, cfg)
log.info("Setting STATIC_DIR to: " + static_dir)
app.config["STATIC_DIR"] = static_dir
# Register the app's blueprints
app.register_blueprint(routes_bp)
return app
| kubeflow/kfserving-lts | web-app/backend/apps/v1beta1/__init__.py | Python | apache-2.0 | 709 |
from core.hook import BaseHook
class Hook(BaseHook):
def addChecklistToCard(self, action):
data = action["data"]
context = {
"card_link": data["card"]["shortLink"],
"card_name": data["card"]["name"],
"checklist_name": data["checklist"]["name"],
}
payload = u':incoming_envelope: New checklist "[{checklist_name}](https://trello.com/c/{card_link})" add to card "[{card_name}](https://trello.com/c/{card_link})"'
return payload.format(**context)
def createCheckItem(self, action):
data = action["data"]
context = {
"card_link": data["card"]["shortLink"],
"card_name": data["card"]["name"],
"checkitem_name": data["checkItem"]["name"],
}
payload = u':incoming_envelope: New checklist item "[{checkitem_name}](https://trello.com/c/{card_link})" add to card "[{card_name}](https://trello.com/c/{card_link})"'
return payload.format(**context)
def updateCheckItemStateOnCard(self, action):
data = action["data"]
context = {
"card_link": data["card"]["shortLink"],
"card_name": data["card"]["name"],
"checkitem_name": data["checkItem"]["name"],
"member_fullname": action["memberCreator"]["fullName"],
}
payload = u':incoming_envelope: Checklist item "{checkitem_name}" on card "[{card_name}](https://trello.com/c/{card_link})" was completed by `{member_fullname}`'
if data["checkItem"]["state"] == "incomplete":
payload = u':incoming_envelope: Checklist item "{checkitem_name}" on card "[{card_name}](https://trello.com/c/{card_link})" was marked incomplete by `{member_fullname}`'
return payload.format(**context)
| Lujeni/matterllo | core/hook/checklist.py | Python | mit | 1,786 |
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Alias,
Typed,
String,
Float,
Integer,
Bool,
NoneSet,
Set,
)
from openpyxl.descriptors.excel import (
ExtensionList,
HexBinary,
Guid,
Relation,
)
class WorkbookProtection(Serialisable):
tagname = "workbookPr"
workbookPassword = HexBinary(allow_none=True)
workbook_password = Alias("workbookPassword")
workbookPasswordCharacterSet = String(allow_none=True)
revisionsPassword = HexBinary(allow_none=True)
revision_password = Alias("revisionsPassword")
revisionsPasswordCharacterSet = String(allow_none=True)
lockStructure = Bool(allow_none=True)
lock_structure = Alias("lockStructure")
lockWindows = Bool(allow_none=True)
lock_windows = Alias("lockWindows")
lockRevision = Bool(allow_none=True)
lock_revision = Alias("lockRevision")
revisionsAlgorithmName = String(allow_none=True)
revisionsHashValue = HexBinary(allow_none=True)
revisionsSaltValue = HexBinary(allow_none=True)
revisionsSpinCount = Integer(allow_none=True)
workbookAlgorithmName = String(allow_none=True)
workbookHashValue = HexBinary(allow_none=True)
workbookSaltValue = HexBinary(allow_none=True)
workbookSpinCount = Integer(allow_none=True)
def __init__(self,
workbookPassword=None,
workbookPasswordCharacterSet=None,
revisionsPassword=None,
revisionsPasswordCharacterSet=None,
lockStructure=None,
lockWindows=None,
lockRevision=None,
revisionsAlgorithmName=None,
revisionsHashValue=None,
revisionsSaltValue=None,
revisionsSpinCount=None,
workbookAlgorithmName=None,
workbookHashValue=None,
workbookSaltValue=None,
workbookSpinCount=None,
):
self.workbookPassword = workbookPassword
self.workbookPasswordCharacterSet = workbookPasswordCharacterSet
self.revisionsPassword = revisionsPassword
self.revisionsPasswordCharacterSet = revisionsPasswordCharacterSet
self.lockStructure = lockStructure
self.lockWindows = lockWindows
self.lockRevision = lockRevision
self.revisionsAlgorithmName = revisionsAlgorithmName
self.revisionsHashValue = revisionsHashValue
self.revisionsSaltValue = revisionsSaltValue
self.revisionsSpinCount = revisionsSpinCount
self.workbookAlgorithmName = workbookAlgorithmName
self.workbookHashValue = workbookHashValue
self.workbookSaltValue = workbookSaltValue
self.workbookSpinCount = workbookSpinCount
# Backwards compatibility
DocumentSecurity = WorkbookProtection
class FileSharing(Serialisable):
tagname = "fileSharing"
readOnlyRecommended = Bool(allow_none=True)
userName = String()
reservationPassword = HexBinary(allow_none=True)
algorithmName = String(allow_none=True)
hashValue = HexBinary(allow_none=True)
saltValue = HexBinary(allow_none=True)
spinCount = Integer(allow_none=True)
def __init__(self,
readOnlyRecommended=None,
userName=None,
reservationPassword=None,
algorithmName=None,
hashValue=None,
saltValue=None,
spinCount=None,
):
self.readOnlyRecommended = readOnlyRecommended
self.userName = userName
self.reservationPassword = reservationPassword
self.algorithmName = algorithmName
self.hashValue = hashValue
self.saltValue = saltValue
self.spinCount = spinCount
| aragos/tichu-tournament | python/openpyxl/workbook/protection.py | Python | mit | 3,908 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models
class MailMessage(models.Model):
_inherit = 'mail.message'
def portal_message_format(self):
return self._portal_message_format([
'id', 'body', 'date', 'author_id', 'email_from', # base message fields
'message_type', 'subtype_id', 'is_internal', 'subject', # message specific
'model', 'res_id', 'record_name', # document related
])
def _portal_message_format(self, fields_list):
vals_list = self._message_format(fields_list)
IrAttachmentSudo = self.env['ir.attachment'].sudo()
for vals in vals_list:
for attachment in vals.get('attachment_ids', []):
if not attachment.get('access_token'):
attachment['access_token'] = IrAttachmentSudo.browse(attachment['id']).generate_access_token()[0]
return vals_list
| jeremiahyan/odoo | addons/portal/models/mail_message.py | Python | gpl-3.0 | 978 |
from pycp2k.inputsection import InputSection
from ._aa_planar2 import _aa_planar2
from ._planar2 import _planar2
from ._aa_cylindrical2 import _aa_cylindrical2
from ._aa_cuboidal2 import _aa_cuboidal2
class _dirichlet_bc2(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Verbose_output = None
self.AA_PLANAR_list = []
self.PLANAR_list = []
self.AA_CYLINDRICAL_list = []
self.AA_CUBOIDAL_list = []
self._name = "DIRICHLET_BC"
self._keywords = {'Verbose_output': 'VERBOSE_OUTPUT'}
self._repeated_subsections = {'AA_CYLINDRICAL': '_aa_cylindrical2', 'PLANAR': '_planar2', 'AA_PLANAR': '_aa_planar2', 'AA_CUBOIDAL': '_aa_cuboidal2'}
self._attributes = ['AA_PLANAR_list', 'PLANAR_list', 'AA_CYLINDRICAL_list', 'AA_CUBOIDAL_list']
def AA_CYLINDRICAL_add(self, section_parameters=None):
new_section = _aa_cylindrical2()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.AA_CYLINDRICAL_list.append(new_section)
return new_section
def PLANAR_add(self, section_parameters=None):
new_section = _planar2()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.PLANAR_list.append(new_section)
return new_section
def AA_PLANAR_add(self, section_parameters=None):
new_section = _aa_planar2()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.AA_PLANAR_list.append(new_section)
return new_section
def AA_CUBOIDAL_add(self, section_parameters=None):
new_section = _aa_cuboidal2()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.AA_CUBOIDAL_list.append(new_section)
return new_section
| SINGROUP/pycp2k | pycp2k/classes/_dirichlet_bc2.py | Python | lgpl-3.0 | 2,187 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'StickyTweet'
db.create_table('lizard_sticky_twitterized_stickytweet', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('twitter_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('status_id', self.gf('django.db.models.fields.BigIntegerField')(max_length=255, null=True, blank=True)),
('tweet', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('visible', self.gf('django.db.models.fields.BooleanField')(default=True)),
('media_url', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_on', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('geom', self.gf('django.contrib.gis.db.models.fields.PointField')(null=True, blank=True)),
))
db.send_create_signal('lizard_sticky_twitterized', ['StickyTweet'])
def backwards(self, orm):
# Deleting model 'StickyTweet'
db.delete_table('lizard_sticky_twitterized_stickytweet')
models = {
'lizard_sticky_twitterized.stickytweet': {
'Meta': {'object_name': 'StickyTweet'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status_id': ('django.db.models.fields.BigIntegerField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tweet': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'twitter_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
}
}
complete_apps = ['lizard_sticky_twitterized']
| lizardsystem/lizard-sticky-twitterized | lizard_sticky_twitterized/migrations/0001_initial.py | Python | gpl-3.0 | 2,652 |
"""log more
Revision ID: 430039611635
Revises: d827694c7555
Create Date: 2016-02-10 08:47:28.950891
"""
# revision identifiers, used by Alembic.
revision = '430039611635'
down_revision = 'd827694c7555'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('logs', sa.Column('dashboard_id', sa.Integer(), nullable=True))
op.add_column('logs', sa.Column('slice_id', sa.Integer(), nullable=True))
def downgrade():
op.drop_column('logs', 'slice_id')
op.drop_column('logs', 'dashboard_id')
| alanmcruickshank/superset-dev | superset/migrations/versions/430039611635_log_more.py | Python | apache-2.0 | 530 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
GridAverage.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputRaster
from processing.tools import dataobjects
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class GridAverage(GdalAlgorithm):
INPUT = 'INPUT'
Z_FIELD = 'Z_FIELD'
RADIUS_1 = 'RADIUS_1'
RADIUS_2 = 'RADIUS_2'
MIN_POINTS = 'MIN_POINTS'
ANGLE = 'ANGLE'
NODATA = 'NODATA'
OUTPUT = 'OUTPUT'
RTYPE = 'RTYPE'
TYPE = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64']
def name(self):
return 'gridaverage'
def displayName(self):
return self.tr('Grid (Moving average)')
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'grid.png'))
def group(self):
return self.tr('Raster analysis')
def defineCharacteristics(self):
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [dataobjects.TYPE_VECTOR_POINT]))
self.addParameter(ParameterTableField(self.Z_FIELD,
self.tr('Z field'), self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER, True))
self.addParameter(ParameterNumber(self.RADIUS_1,
self.tr('Radius 1'), 0.0, 99999999.999999, 0.0))
self.addParameter(ParameterNumber(self.RADIUS_2,
self.tr('Radius 2'), 0.0, 99999999.999999, 0.0))
self.addParameter(ParameterNumber(self.MIN_POINTS,
self.tr('Min points'), 0.0, 99999999.999999, 0.0))
self.addParameter(ParameterNumber(self.ANGLE,
self.tr('Angle'), 0.0, 359.0, 0.0))
self.addParameter(ParameterNumber(self.NODATA,
self.tr('Nodata'), 0.0, 99999999.999999, 0.0))
self.addParameter(ParameterSelection(self.RTYPE,
self.tr('Output raster type'), self.TYPE, 5))
self.addOutput(OutputRaster(self.OUTPUT, self.tr('Interpolated moving average')))
def getConsoleCommands(self):
arguments = ['-l']
arguments.append(
os.path.basename(os.path.splitext(
str(self.getParameterValue(self.INPUT)))[0]))
fieldName = self.getParameterValue(self.Z_FIELD)
if fieldName is not None and fieldName != '':
arguments.append('-zfield')
arguments.append(fieldName)
params = 'average'
params += ':radius1=%s' % self.getParameterValue(self.RADIUS_1)
params += ':radius2=%s' % self.getParameterValue(self.RADIUS_2)
params += ':angle=%s' % self.getParameterValue(self.ANGLE)
params += ':min_points=%s' % self.getParameterValue(self.MIN_POINTS)
params += ':nodata=%s' % self.getParameterValue(self.NODATA)
arguments.append('-a')
arguments.append(params)
arguments.append('-ot')
arguments.append(self.TYPE[self.getParameterValue(self.RTYPE)])
arguments.append(str(self.getParameterValue(self.INPUT)))
arguments.append(str(self.getOutputValue(self.OUTPUT)))
return ['gdal_grid', GdalUtils.escapeAndJoin(arguments)]
| gioman/QGIS | python/plugins/processing/algs/gdal/GridAverage.py | Python | gpl-2.0 | 4,840 |
# Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl / Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations on Hyper-V.
"""
import sys
import time
import uuid
if sys.platform == 'win32':
import wmi
from oslo.config import cfg
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import constants
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# TODO(alexpilotti): Move the exceptions to a separate module
# TODO(alexpilotti): Add more domain exceptions
class HyperVException(exception.NovaException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
# TODO(alexpilotti): Add a storage exception base class
class VHDResizeException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class HyperVAuthorizationException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class UnsupportedConfigDriveFormatException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class VMUtils(object):
# These constants can be overridden by inherited classes
_PHYS_DISK_RES_SUB_TYPE = 'Microsoft Physical Disk Drive'
_DISK_RES_SUB_TYPE = 'Microsoft Synthetic Disk Drive'
_DVD_RES_SUB_TYPE = 'Microsoft Synthetic DVD Drive'
_IDE_DISK_RES_SUB_TYPE = 'Microsoft Virtual Hard Disk'
_IDE_DVD_RES_SUB_TYPE = 'Microsoft Virtual CD/DVD Disk'
_IDE_CTRL_RES_SUB_TYPE = 'Microsoft Emulated IDE Controller'
_SCSI_CTRL_RES_SUB_TYPE = 'Microsoft Synthetic SCSI Controller'
_SERIAL_PORT_RES_SUB_TYPE = 'Microsoft Serial Port'
_SETTINGS_DEFINE_STATE_CLASS = 'Msvm_SettingsDefineState'
_VIRTUAL_SYSTEM_SETTING_DATA_CLASS = 'Msvm_VirtualSystemSettingData'
_RESOURCE_ALLOC_SETTING_DATA_CLASS = 'Msvm_ResourceAllocationSettingData'
_PROCESSOR_SETTING_DATA_CLASS = 'Msvm_ProcessorSettingData'
_MEMORY_SETTING_DATA_CLASS = 'Msvm_MemorySettingData'
_STORAGE_ALLOC_SETTING_DATA_CLASS = _RESOURCE_ALLOC_SETTING_DATA_CLASS
_SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS = \
'Msvm_SyntheticEthernetPortSettingData'
_AFFECTED_JOB_ELEMENT_CLASS = "Msvm_AffectedJobElement"
_SHUTDOWN_COMPONENT = "Msvm_ShutdownComponent"
_VIRTUAL_SYSTEM_CURRENT_SETTINGS = 3
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
constants.HYPERV_VM_STATE_SHUTTING_DOWN: 4,
constants.HYPERV_VM_STATE_REBOOT: 10,
constants.HYPERV_VM_STATE_PAUSED: 32768,
constants.HYPERV_VM_STATE_SUSPENDED: 32769}
def __init__(self, host='.'):
self._enabled_states_map = dict((v, k) for k, v in
self._vm_power_states_map.iteritems())
if sys.platform == 'win32':
self._init_hyperv_wmi_conn(host)
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization' % host)
def list_instance_notes(self):
instance_notes = []
for vs in self._conn.Msvm_VirtualSystemSettingData(
['ElementName', 'Notes'],
SettingType=self._VIRTUAL_SYSTEM_CURRENT_SETTINGS):
instance_notes.append((vs.ElementName,
[v for v in vs.Notes.split('\n') if v]))
return instance_notes
def list_instances(self):
"""Return the names of all the instances known to Hyper-V."""
vm_names = [v.ElementName for v in
self._conn.Msvm_ComputerSystem(['ElementName'],
Caption="Virtual Machine")]
return vm_names
def get_vm_summary_info(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
wmi_association_class=self._SETTINGS_DEFINE_STATE_CLASS,
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
settings_paths = [v.path_() for v in vmsettings]
# See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
(ret_val, summary_info) = vs_man_svc.GetSummaryInformation(
[constants.VM_SUMMARY_NUM_PROCS,
constants.VM_SUMMARY_ENABLED_STATE,
constants.VM_SUMMARY_MEMORY_USAGE,
constants.VM_SUMMARY_UPTIME],
settings_paths)
if ret_val:
raise HyperVException(_('Cannot get VM summary data for: %s')
% vm_name)
si = summary_info[0]
memory_usage = None
if si.MemoryUsage is not None:
memory_usage = long(si.MemoryUsage)
up_time = None
if si.UpTime is not None:
up_time = long(si.UpTime)
# Nova requires a valid state to be returned. Hyper-V has more
# states than Nova, typically intermediate ones and since there is
# no direct mapping for those, ENABLED is the only reasonable option
# considering that in all the non mappable states the instance
# is running.
enabled_state = self._enabled_states_map.get(si.EnabledState,
constants.HYPERV_VM_STATE_ENABLED)
summary_info_dict = {'NumberOfProcessors': si.NumberOfProcessors,
'EnabledState': enabled_state,
'MemoryUsage': memory_usage,
'UpTime': up_time}
return summary_info_dict
def _lookup_vm_check(self, vm_name):
vm = self._lookup_vm(vm_name)
if not vm:
raise exception.NotFound(_('VM not found: %s') % vm_name)
return vm
def _lookup_vm(self, vm_name):
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if n == 0:
return None
elif n > 1:
raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
else:
return vms[0]
def vm_exists(self, vm_name):
return self._lookup_vm(vm_name) is not None
def get_vm_id(self, vm_name):
vm = self._lookup_vm_check(vm_name)
return vm.Name
def _get_vm_setting_data(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
# Avoid snapshots
return [s for s in vmsettings if s.SettingType == 3][0]
def _set_vm_memory(self, vm, vmsetting, memory_mb, dynamic_memory_ratio):
mem_settings = vmsetting.associators(
wmi_result_class=self._MEMORY_SETTING_DATA_CLASS)[0]
max_mem = long(memory_mb)
mem_settings.Limit = max_mem
if dynamic_memory_ratio > 1:
mem_settings.DynamicMemoryEnabled = True
# Must be a multiple of 2
reserved_mem = min(
long(max_mem / dynamic_memory_ratio) >> 1 << 1,
max_mem)
else:
mem_settings.DynamicMemoryEnabled = False
reserved_mem = max_mem
mem_settings.Reservation = reserved_mem
# Start with the minimum memory
mem_settings.VirtualQuantity = reserved_mem
self._modify_virt_resource(mem_settings, vm.path_())
def _set_vm_vcpus(self, vm, vmsetting, vcpus_num, limit_cpu_features):
procsetting = vmsetting.associators(
wmi_result_class=self._PROCESSOR_SETTING_DATA_CLASS)[0]
vcpus = long(vcpus_num)
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
procsetting.Limit = 100000 # static assignment to 100%
procsetting.LimitProcessorFeatures = limit_cpu_features
self._modify_virt_resource(procsetting, vm.path_())
def update_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio):
vm = self._lookup_vm_check(vm_name)
vmsetting = self._get_vm_setting_data(vm)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def check_admin_permissions(self):
if not self._conn.Msvm_VirtualSystemManagementService():
msg = _("The Windows account running nova-compute on this Hyper-V"
" host doesn't have the required permissions to create or"
" operate the virtual machine.")
raise HyperVAuthorizationException(msg)
def create_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio, notes=None):
"""Creates a VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
LOG.debug('Creating VM %s', vm_name)
vm = self._create_vm_obj(vs_man_svc, vm_name, notes)
vmsetting = self._get_vm_setting_data(vm)
LOG.debug('Setting memory for vm %s', vm_name)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
LOG.debug('Set vCPUs for vm %s', vm_name)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def _create_vm_obj(self, vs_man_svc, vm_name, notes):
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = vm_name
(vm_path,
job_path,
ret_val) = vs_man_svc.DefineVirtualSystem([], None,
vs_gs_data.GetText_(1))
self.check_ret_val(ret_val, job_path)
vm = self._get_wmi_obj(vm_path)
if notes:
vmsetting = self._get_vm_setting_data(vm)
vmsetting.Notes = '\n'.join(notes)
self._modify_virtual_system(vs_man_svc, vm_path, vmsetting)
return self._get_wmi_obj(vm_path)
def _modify_virtual_system(self, vs_man_svc, vm_path, vmsetting):
(job_path, ret_val) = vs_man_svc.ModifyVirtualSystem(
ComputerSystem=vm_path,
SystemSettingData=vmsetting.GetText_(1))[1:]
self.check_ret_val(ret_val, job_path)
def get_vm_scsi_controller(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
res = [r for r in rasds
if r.ResourceSubType == self._SCSI_CTRL_RES_SUB_TYPE][0]
return res.path_()
def _get_vm_ide_controller(self, vm, ctrller_addr):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
return [r for r in rasds
if r.ResourceSubType == self._IDE_CTRL_RES_SUB_TYPE
and r.Address == str(ctrller_addr)][0].path_()
def get_vm_ide_controller(self, vm_name, ctrller_addr):
vm = self._lookup_vm_check(vm_name)
return self._get_vm_ide_controller(vm, ctrller_addr)
def get_attached_disks(self, scsi_controller_path):
volumes = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
"Parent = '%(parent)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
'res_sub_type':
self._PHYS_DISK_RES_SUB_TYPE,
'parent':
scsi_controller_path.replace("'", "''")})
return volumes
def _get_new_setting_data(self, class_name):
return self._conn.query("SELECT * FROM %s WHERE InstanceID "
"LIKE '%%\\Default'" % class_name)[0]
def _get_new_resource_setting_data(self, resource_sub_type,
class_name=None):
if class_name is None:
class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS
return self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
"InstanceID LIKE '%%\\Default'" %
{"class_name": class_name,
"res_sub_type": resource_sub_type})[0]
def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.IDE_DISK):
"""Create an IDE drive and attach it to the vm."""
vm = self._lookup_vm_check(vm_name)
ctrller_path = self._get_vm_ide_controller(vm, ctrller_addr)
if drive_type == constants.IDE_DISK:
res_sub_type = self._DISK_RES_SUB_TYPE
elif drive_type == constants.IDE_DVD:
res_sub_type = self._DVD_RES_SUB_TYPE
drive = self._get_new_resource_setting_data(res_sub_type)
# Set the IDE ctrller as parent.
drive.Parent = ctrller_path
drive.Address = drive_addr
# Add the cloned disk drive object to the vm.
new_resources = self._add_virt_resource(drive, vm.path_())
drive_path = new_resources[0]
if drive_type == constants.IDE_DISK:
res_sub_type = self._IDE_DISK_RES_SUB_TYPE
elif drive_type == constants.IDE_DVD:
res_sub_type = self._IDE_DVD_RES_SUB_TYPE
res = self._get_new_resource_setting_data(res_sub_type)
# Set the new drive as the parent.
res.Parent = drive_path
res.Connection = [path]
# Add the new vhd object as a virtual hard disk to the vm.
self._add_virt_resource(res, vm.path_())
def create_scsi_controller(self, vm_name):
"""Create an iscsi controller ready to mount volumes."""
vm = self._lookup_vm_check(vm_name)
scsicontrl = self._get_new_resource_setting_data(
self._SCSI_CTRL_RES_SUB_TYPE)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
self._add_virt_resource(scsicontrl, vm.path_())
def attach_volume_to_controller(self, vm_name, controller_path, address,
mounted_disk_path):
"""Attach a volume to a controller."""
vm = self._lookup_vm_check(vm_name)
diskdrive = self._get_new_resource_setting_data(
self._PHYS_DISK_RES_SUB_TYPE)
diskdrive.Address = address
diskdrive.Parent = controller_path
diskdrive.HostResource = [mounted_disk_path]
self._add_virt_resource(diskdrive, vm.path_())
def set_nic_connection(self, vm_name, nic_name, vswitch_conn_data):
nic_data = self._get_nic_data_by_name(nic_name)
nic_data.Connection = [vswitch_conn_data]
vm = self._lookup_vm_check(vm_name)
self._modify_virt_resource(nic_data, vm.path_())
def _get_nic_data_by_name(self, name):
return self._conn.Msvm_SyntheticEthernetPortSettingData(
ElementName=name)[0]
def create_nic(self, vm_name, nic_name, mac_address):
"""Create a (synthetic) nic and attach it to the vm."""
# Create a new nic
new_nic_data = self._get_new_setting_data(
self._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS)
# Configure the nic
new_nic_data.ElementName = nic_name
new_nic_data.Address = mac_address.replace(':', '')
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
# Add the new nic to the vm
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(new_nic_data, vm.path_())
def soft_shutdown_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
shutdown_component = vm.associators(
wmi_result_class=self._SHUTDOWN_COMPONENT)
if not shutdown_component:
# If no shutdown_component is found, it means the VM is already
# in a shutdown state.
return
reason = 'Soft shutdown requested by OpenStack Nova.'
(ret_val, ) = shutdown_component[0].InitiateShutdown(Force=False,
Reason=reason)
self.check_ret_val(ret_val, None)
def set_vm_state(self, vm_name, req_state):
"""Set the desired state of the VM."""
vm = self._lookup_vm_check(vm_name)
(job_path,
ret_val) = vm.RequestStateChange(self._vm_power_states_map[req_state])
# Invalid state for current operation (32775) typically means that
# the VM is already in the state requested
self.check_ret_val(ret_val, job_path, [0, 32775])
LOG.debug("Successfully changed vm state of %(vm_name)s "
"to %(req_state)s",
{'vm_name': vm_name, 'req_state': req_state})
def _get_disk_resource_disk_path(self, disk_resource):
return disk_resource.Connection
def get_vm_storage_paths(self, vm_name):
vm = self._lookup_vm_check(vm_name)
(disk_resources, volume_resources) = self._get_vm_disks(vm)
volume_drives = []
for volume_resource in volume_resources:
drive_path = volume_resource.HostResource[0]
volume_drives.append(drive_path)
disk_files = []
for disk_resource in disk_resources:
disk_files.extend(
[c for c in self._get_disk_resource_disk_path(disk_resource)])
return (disk_files, volume_drives)
def _get_vm_disks(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._STORAGE_ALLOC_SETTING_DATA_CLASS)
disk_resources = [r for r in rasds if
r.ResourceSubType in
[self._IDE_DISK_RES_SUB_TYPE,
self._IDE_DVD_RES_SUB_TYPE]]
volume_resources = [r for r in rasds if
r.ResourceSubType == self._PHYS_DISK_RES_SUB_TYPE]
return (disk_resources, volume_resources)
def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
# Remove the VM. Does not destroy disks.
(job_path, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
self.check_ret_val(ret_val, job_path)
def check_ret_val(self, ret_val, job_path, success_values=[0]):
if ret_val == constants.WMI_JOB_STATUS_STARTED:
return self._wait_for_job(job_path)
elif ret_val not in success_values:
raise HyperVException(_('Operation failed with return value: %s')
% ret_val)
def _wait_for_job(self, job_path):
"""Poll WMI job state and wait for completion."""
job = self._get_wmi_obj(job_path)
while job.JobState == constants.WMI_JOB_STATE_RUNNING:
time.sleep(0.1)
job = self._get_wmi_obj(job_path)
if job.JobState != constants.WMI_JOB_STATE_COMPLETED:
job_state = job.JobState
if job.path().Class == "Msvm_ConcreteJob":
err_sum_desc = job.ErrorSummaryDescription
err_desc = job.ErrorDescription
err_code = job.ErrorCode
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(err_sum_desc)s - %(err_desc)s - "
"Error code: %(err_code)d") %
{'job_state': job_state,
'err_sum_desc': err_sum_desc,
'err_desc': err_desc,
'err_code': err_code})
else:
(error, ret_val) = job.GetError()
if not ret_val and error:
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(error)s") %
{'job_state': job_state,
'error': error})
else:
raise HyperVException(_("WMI job failed with status "
"%d. No error "
"description available") %
job_state)
desc = job.Description
elap = job.ElapsedTime
LOG.debug("WMI job succeeded: %(desc)s, Elapsed=%(elap)s",
{'desc': desc, 'elap': elap})
return job
def _get_wmi_obj(self, path):
return wmi.WMI(moniker=path.replace('\\', '/'))
def _add_virt_resource(self, res_setting_data, vm_path):
"""Adds a new resource to the VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_xml = [res_setting_data.GetText_(1)]
(job_path,
new_resources,
ret_val) = vs_man_svc.AddVirtualSystemResources(res_xml, vm_path)
self.check_ret_val(ret_val, job_path)
return new_resources
def _modify_virt_resource(self, res_setting_data, vm_path):
"""Updates a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
ResourceSettingData=[res_setting_data.GetText_(1)],
ComputerSystem=vm_path)
self.check_ret_val(ret_val, job_path)
def _remove_virt_resource(self, res_setting_data, vm_path):
"""Removes a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_path = [res_setting_data.path_()]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemResources(res_path,
vm_path)
self.check_ret_val(ret_val, job_path)
def take_vm_snapshot(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val,
snp_setting_data) = vs_man_svc.CreateVirtualSystemSnapshot(vm.path_())
self.check_ret_val(ret_val, job_path)
job_wmi_path = job_path.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
snp_setting_data = job.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0]
return snp_setting_data.path_()
def remove_vm_snapshot(self, snapshot_path):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot(
snapshot_path)
self.check_ret_val(ret_val, job_path)
def detach_vm_disk(self, vm_name, disk_path):
vm = self._lookup_vm_check(vm_name)
physical_disk = self._get_mounted_disk_resource_from_path(disk_path)
if physical_disk:
self._remove_virt_resource(physical_disk, vm.path_())
def _get_mounted_disk_resource_from_path(self, disk_path):
physical_disks = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = '%(res_sub_type)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
'res_sub_type':
self._PHYS_DISK_RES_SUB_TYPE})
for physical_disk in physical_disks:
if physical_disk.HostResource:
if physical_disk.HostResource[0].lower() == disk_path.lower():
return physical_disk
def get_mounted_disk_by_drive_number(self, device_number):
mounted_disks = self._conn.query("SELECT * FROM Msvm_DiskDrive "
"WHERE DriveNumber=" +
str(device_number))
if len(mounted_disks):
return mounted_disks[0].path_()
def get_controller_volume_paths(self, controller_path):
disks = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = '%(res_sub_type)s' "
"AND Parent='%(parent)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._PHYS_DISK_RES_SUB_TYPE,
"parent":
controller_path})
disk_data = {}
for disk in disks:
if disk.HostResource:
disk_data[disk.path().RelPath] = disk.HostResource[0]
return disk_data
def enable_vm_metrics_collection(self, vm_name):
raise NotImplementedError(_("Metrics collection is not supported on "
"this version of Hyper-V"))
def get_vm_serial_port_connection(self, vm_name, update_connection=None):
vm = self._lookup_vm_check(vm_name)
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
serial_port = (
[r for r in rasds if
r.ResourceSubType == self._SERIAL_PORT_RES_SUB_TYPE][0])
if update_connection:
serial_port.Connection = [update_connection]
self._modify_virt_resource(serial_port, vm.path_())
return serial_port.Connection
def get_active_instances(self):
"""Return the names of all the active instances known to Hyper-V."""
vm_names = [v.ElementName for v in
self._conn.Msvm_ComputerSystem(Caption="Virtual Machine")
if v.EnabledState == constants.HYPERV_VM_STATE_ENABLED]
return vm_names
| jumpstarter-io/nova | nova/virt/hyperv/vmutils.py | Python | apache-2.0 | 27,434 |
# TeamLogger
# Copyright (C) 2017 Maxence PAPILLON
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""TeamLogger URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
"""
import re
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib.auth import views as auth
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from nouvelles.admin import admin_page
# Build the root like "^/" or with a context like "^context/"
site_root = r'^' + re.sub(r'^/', '', getattr(settings, 'APP_CONTEXT'))
urlpatterns = [url(site_root, include([
url('^', include('django.contrib.auth.urls')),
url('^', include('nouvelles.urls')),
url(r'^login/$', auth.LoginView.as_view(), name='login'),
url(r'^logout/$', auth.LogoutView.as_view(), name='logout'),
url(r'^admin/', admin_page.urls)
]))]
if settings.DEBUG:
import debug_toolbar
# Turn on Django Debug Toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
# Adding urls for MEDIA files in debug
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += staticfiles_urlpatterns()
| mpapillon/django-teamlogger | src/teamlogger/urls.py | Python | gpl-3.0 | 1,933 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import openerp.addons.decimal_precision as dp
from openerp import models, fields, api, exceptions, _
class AccountTreasuryForecastTemplate(models.Model):
_inherit = 'account.treasury.forecast.template'
receivable_line_ids = fields.One2many(
"account.treasury.forecast.line.template", "treasury_template_id",
string="Receivable Line", domain=[('line_type', '=', 'receivable')])
cashflow_ids = fields.One2many(
"account.treasury.forecast.cashflow.template", "treasury_template_id",
string="Cash-Flow")
class AccountTreasuryForecastLineTemplate(models.Model):
_inherit = "account.treasury.forecast.line.template"
line_type = fields.Selection([('recurring', 'Recurring'),
('variable', 'Variable'),
('receivable', 'Receivable')])
class AccountTreasuryForecastCashflowTemplate(models.Model):
_name = "account.treasury.forecast.cashflow.template"
_description = "Cash-Flow Record Template"
name = fields.Char(string="Description")
date = fields.Date(string="Date")
journal_id = fields.Many2one("account.journal", string="Journal")
amount = fields.Float(string="Amount",
digits_compute=dp.get_precision("Account"))
flow_type = fields.Selection([('in', 'Input'), ('out', 'Output')],
string="Type")
treasury_template_id = fields.Many2one(
"account.treasury.forecast.template", string="Treasury Template")
@api.one
@api.constrains('flow_type', 'amount')
def _check_amount(self):
if self.flow_type == 'in' and self.amount <= 0.0:
raise exceptions.Warning(_("Error!:: If input cash-flow, "
"amount must be positive"))
if self.flow_type == 'out' and self.amount >= 0.0:
raise exceptions.Warning(_("Error!:: If output cash-flow, "
"amount must be negative"))
| InakiZabala/odoomrp-wip | account_treasury_forecast_cashflow/models/account_treasury_forecast_template.py | Python | agpl-3.0 | 2,861 |
from jawa.attribute import get_attribute_classes
def test_mandatory_attributes():
required_properties = ['ADDED_IN', 'MINIMUM_CLASS_VERSION']
for name, class_ in get_attribute_classes().items():
for p in required_properties:
assert hasattr(class_, p), (
'{name} parser missing mandatory {p} property'.format(
name=name,
p=p
)
)
def test_attribute_naming():
for name, class_ in get_attribute_classes().items():
if hasattr(class_, 'ATTRIBUTE_NAME'):
continue
assert class_.__name__.endswith('Attribute'), (
'{name} parser does not follow naming convention and does'
' not explicitly set it.'.format(name=name)
)
| TkTech/Jawa | tests/attributes/test_general_attributes.py | Python | mit | 791 |
"""Tests for add_inator route."""
import json
from http import HTTPStatus
from urllib.parse import urlparse
from utils import from_datetime
def test_login_required(app):
"""Redirect to login if we're not logged in."""
# Perform a GET and check that we're redirected properly
rv = app.get("/add/")
assert rv.status_code == HTTPStatus.FOUND
assert urlparse(rv.location).path == "/login/"
# Perform another GET and follow the redirect this time
rv = app.get("/add/", follow_redirects=True)
assert b"You must be logged in to access that page." in rv.data
def test_load_page(app):
"""Page loads if the user is logged in."""
# Log in
with app.session_transaction() as sess:
sess["username"] = "heinz"
# Load the page
rv = app.get("/add/")
assert rv.status_code == HTTPStatus.OK
assert b"Add a New Inator" in rv.data
def test_submit_invalid_methods(app):
"""Form submission doesn't work for invalid HTTP methods."""
# Log in
with app.session_transaction() as sess:
sess["username"] = "heinz"
# HEAD and OPTIONS are implemented by Flask. No need to test those.
# DELETE shouldn't work
rv = app.delete("/add/")
assert rv.status_code == HTTPStatus.METHOD_NOT_ALLOWED
# PUT shouldn't work either
rv = app.put("/add/")
assert rv.status_code == HTTPStatus.METHOD_NOT_ALLOWED
def test_submit_valid_form(app):
"""Form submission works for valid forms."""
# Log in
with app.session_transaction() as sess:
sess["username"] = "heinz"
# Submit a valid form, following redirects
rv = app.post("/add/", data={
"name": "Beep-inator",
"location": "Upstairs computer science",
"condition": 5,
"description": "Someone needs to check the battery on their UPS" +
"or I am going to go insane."
}, follow_redirects=True)
# We should be redirected back to the home page
assert rv.status_code == HTTPStatus.OK
assert b"List of Inators" in rv.data
# The new -inator should be present
assert b"Beep-inator" in rv.data
# Check that we see our flashed message.
assert b"Successfully added Beep-inator." in rv.data
def test_submit_valid_check_redirect(app):
"""Check redirection for correct form submission."""
# Log in
with app.session_transaction() as sess:
sess["username"] = "heinz"
# Submit a valid form, don't follow redirects
rv = app.post("/add/", data={
"name": "Beep-inator",
"location": "Upstairs computer science",
"condition": 5,
"description": "Someone needs to check the battery on their UPS" +
"or I am going to go insane."
})
# Check that the redirection is what we expect
assert rv.status_code == HTTPStatus.FOUND
assert urlparse(rv.location).path == "/"
def test_submit_valid_form_data_exists(app, inator_data, data_path):
"""Form submission works for valid forms when inators already exist."""
# Log in
with app.session_transaction() as sess:
sess["username"] = "heinz"
# Add some random inators to the data file
with open(data_path, "w") as data_file:
json.dump({"inators": inator_data}, data_file, default=from_datetime)
# Submit a valid inator
rv = app.post("/add/", data={
"name": "Beep-inator",
"location": "Upstairs computer science",
"condition": 5,
"description": "Someone needs to check the battery on their UPS" +
"or I am going to go insane."
}, follow_redirects=True)
# We should be redirected back to the home page
assert rv.status_code == HTTPStatus.OK
assert b"List of Inators" in rv.data
# The new -inator should be present
assert b"Beep-inator" in rv.data
# So should all the old inators
for ident, inator in inator_data.items():
assert ident.encode("ascii") in rv.data
assert inator["name"].encode("ascii") in rv.data
def test_submit_missing_field(app):
"""Form validates fields."""
# Log in
with app.session_transaction() as sess:
sess["username"] = "heinz"
# Post a form with a missing field
rv = app.post("/add/", data={
# Missing "name"
"location": "Upstairs computer science",
"condition": 5,
"description": "Someone needs to check the battery on their UPS" + \
"or I am going to go insane."
}, follow_redirects=True)
# We should see a warning page
# Flask aborts with BAD REQUEST automatically
assert rv.status_code == HTTPStatus.BAD_REQUEST
def test_submit_invalid_choice(app):
"""Form validates fields."""
# Log in
with app.session_transaction() as sess:
sess["username"] = "heinz"
# Post a form with an invalid condition
rv = app.post("/add/", data={
"name": "Beep-inator",
"location": "Upstairs computer science",
"condition": "blep", # Invalid option
"description": "Someone needs to check the battery on their UPS" + \
"or I am going to go insane."
}, follow_redirects=True)
# We should see a warning page
# We should be redirected back to the home page
assert rv.status_code == HTTPStatus.BAD_REQUEST
# Post a form with another invalid condition
rv = app.post("/add/", data={
"name": "Beep-inator",
"location": "Upstairs computer science",
"condition": 6, # Invalid option
"description": "Someone needs to check the battery on their UPS" + \
"or I am going to go insane."
}, follow_redirects=True)
# We should see a warning page
# We should be redirected back to the home page
assert rv.status_code == HTTPStatus.BAD_REQUEST
| kevinschoonover/cs2001-cpl | homework/2017-fs-1b-hw04-ksyh3/test/test_add_inator.py | Python | gpl-3.0 | 5,836 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 12, transform = "Anscombe", sigma = 0.0, exog_count = 0, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_Anscombe/trend_MovingMedian/cycle_12/ar_12/test_artificial_1024_Anscombe_MovingMedian_12_12_0.py | Python | bsd-3-clause | 268 |
# In The Name Of God
# ========================================
# [] File Name : functions.py
#
# [] Creation Date : 10-05-2015
#
# [] Created By : Parham Alvani (parham.alvani@gmail.com)
# =======================================
__author__ = 'Parham Alvani'
import dis
def square(x):
if not isinstance(x, int):
print("Error: x is a %s instead of int." % type(x))
else:
return x * x
def square2(x):
if not hasattr(x, '__mul__'):
print("Error: your object do not support multiplication")
else:
return x * x
print(square(10))
print(square2(10.1))
print(square.__module__)
# Disassembling function byte codes #
print(square.__code__.co_code)
print(square.__code__.co_varnames)
print(square.__code__.co_argcount)
print(square.__code__.co_consts)
dis.dis(square.__code__)
| 1995parham/Python101 | src/functions.py | Python | gpl-2.0 | 825 |
from django.conf.urls import url
from . import views
app_name = 'polls'
urlpatterns = [
# ex: /polls/
url(r'^$', views.index, name='index'),
# ex: /polls/5/
#url(r'^(?P<question_id>[0-9]+)/$', views.detail, name='detail'),
# ex: /polls/5/results/
url(r'^specifics/(?P<question_id>[0-9]+)/$', views.detail, name='detail'),
url(r'^(?P<question_id>[0-9]+)/results/$', views.results, name='results'),
# ex: /polls/5/vote/
url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'),
] | jas1/pythonWEBprueba | mysite/polls/urls.py | Python | mit | 523 |
import agros2d
from test_suite.scenario import Agros2DTestCase
from test_suite.scenario import Agros2DTestResult
class TestRFTEHarmonicPlanar(Agros2DTestCase):
def setUp(self):
# model
problem = agros2d.problem(clear = True)
problem.coordinate_type = "planar"
problem.mesh_type = "triangle"
problem.frequency = 1.6e10
# disable view
agros2d.view.mesh.disable()
agros2d.view.post2d.disable()
# fields
self.rf = agros2d.field("rf_te")
self.rf.analysis_type = "harmonic"
self.rf.number_of_refinements = 3
self.rf.polynomial_order = 3
self.rf.solver = "linear"
# boundaries
self.rf.add_boundary("Perfect electric conductor", "rf_te_electric_field")
self.rf.add_boundary("Matched boundary", "rf_te_impedance", { "rf_te_impedance" : 377 })
self.rf.add_boundary("Surface current", "rf_te_surface_current", {"rf_te_surface_current_real" : 1, "rf_te_surface_current_imag" : 0.5})
self.rf.add_material("Air", {"rf_te_permittivity" : 1, "rf_te_permeability" : 1, "rf_te_conductivity" : 3e-2})
# geometry
geometry = agros2d.geometry
# edges
geometry.add_edge(-0.01, 0.02286, -0.01, 0, 0, boundaries = {"rf_te" : "Surface current"})
geometry.add_edge(0.06907, 0.02286, 0.076, 0.01593, 90, boundaries = {"rf_te" : "Perfect electric conductor"})
geometry.add_edge(0.076, 0.01593, 0.081, 0.01593, 0, boundaries = {"rf_te" : "Perfect electric conductor"})
geometry.add_edge(0.081, 0.01593, 0.081, 0.02286, 0, boundaries = {"rf_te" : "Perfect electric conductor"})
geometry.add_edge(0.086, 0.00693, 0.086, 0, 0, boundaries = {"rf_te" : "Perfect electric conductor"})
geometry.add_edge(0.086, 0.00693, 0.081, 0.00693, 0, boundaries = {"rf_te" : "Perfect electric conductor"})
geometry.add_edge(0.081, 0.00693, 0.05907, 0, 20, boundaries = {"rf_te" : "Perfect electric conductor"})
geometry.add_edge(-0.01, 0, 0.05907, 0, 0, boundaries = {"rf_te" : "Perfect electric conductor"})
geometry.add_edge(0.17, 0.02286, 0.081, 0.02286, 0, boundaries = {"rf_te" : "Perfect electric conductor"})
geometry.add_edge(0.06907, 0.02286, -0.01, 0.02286, 0, boundaries = {"rf_te" : "Perfect electric conductor"})
geometry.add_edge(0.19286, 0, 0.17, 0.02286, 90, boundaries = {"rf_te" : "Perfect electric conductor"})
geometry.add_edge(0.19286, 0, 0.19286, -0.04, 0, boundaries = {"rf_te" : "Perfect electric conductor"})
geometry.add_edge(0.175, -0.04, 0.175, -0.015, 0, boundaries = {"rf_te" : "Perfect electric conductor"})
geometry.add_edge(0.175, -0.015, 0.16, 0, 90, boundaries = {"rf_te" : "Perfect electric conductor"})
geometry.add_edge(0.175, -0.04, 0.19286, -0.04, 0, boundaries = {"rf_te" : "Matched boundary"})
geometry.add_edge(0.086, 0, 0.16, 0, 0, boundaries = {"rf_te" : "Perfect electric conductor"})
# labels
geometry.add_label(0.0359418, 0.0109393, materials = {"rf_te" : "Air"})
agros2d.view.zoom_best_fit()
# solve problem
problem.solve()
def test_values(self):
# point value
point = self.rf.local_values(0.019107, 0.016725)
self.value_test("Electric field", point["E"], 456.810483)
self.value_test("Electric field - real", point["Er"], 141.973049)
self.value_test("Electric field - imag", point["Ei"], 434.18829)
self.value_test("Flux density", point["B"], 1.115591e-6)
self.value_test("Flux density - x - real", point["Brx"], 6.483596e-7)
self.value_test("Flux density - x - imag", point["Bix"], -2.301715e-7)
self.value_test("Flux density - y - real", point["Bry"], -4.300969e-7)
self.value_test("Flux density - y - imag", point["Biy"], -7.656443e-7)
self.value_test("Magnetic field", point["H"], 0.887759)
self.value_test("Magnetic field - x - real", point["Hrx"], 0.515948)
self.value_test("Magnetic field - x - imag", point["Hix"], -0.183165)
self.value_test("Magnetic field - y - real", point["Hry"], -0.34226)
self.value_test("Magnetic field - y - imag", point["Hiy"], -0.60928)
self.value_test("Poynting vector - x", point["Nx"], 156.567066)
self.value_test("Poynting vector - y", point["Ny"], -3.138616, 1)
# volume integral
# volume_integrals = rf.volume_integrals([0, 1, 2])
# testEnergy = agros2d.test("Energy", volume["We"], 1.799349e-8)
# surface integral
# surface_integrals = rf.surface_integrals([1, 12])
# testQ = agros2d.test("Electric charge", surface["Q"], -1.291778e-9)
class TestRFTEHarmonicAxisymmetric(Agros2DTestCase):
def setUp(self):
# problem
problem = agros2d.problem(clear = True)
problem.coordinate_type = "axisymmetric"
problem.mesh_type = "triangle"
problem.frequency = 1e+09
# disable view
agros2d.view.mesh.disable()
agros2d.view.post2d.disable()
# fields
# rf
self.rf = agros2d.field("rf_te")
self.rf.analysis_type = "harmonic"
self.rf.number_of_refinements = 3
self.rf.polynomial_order = 3
self.rf.adaptivity_type = "disabled"
self.rf.solver = "linear"
# boundaries
self.rf.add_boundary("PEC", "rf_te_electric_field", {"rf_te_electric_field_real" : 0, "rf_te_electric_field_imag" : 0})
self.rf.add_boundary("Source", "rf_te_electric_field", {"rf_te_electric_field_real" : 1, "rf_te_electric_field_imag" : 0})
self.rf.add_boundary("Impedance", "rf_te_impedance", {"rf_te_impedance" : 377})
self.rf.add_boundary("PMC", "rf_te_magnetic_field", {"rf_te_magnetic_field_real" : 0, "rf_te_magnetic_field_imag" : 0})
# materials
self.rf.add_material("Air", {"rf_te_permittivity" : 1, "rf_te_permeability" : 1, "rf_te_conductivity" : 0, "rf_te_current_density_external_real" : 0, "rf_te_current_density_external_imag" : 0})
# geometry
geometry = agros2d.geometry
geometry.add_edge(0, 1, 0, -1, boundaries = {"rf_te" : "PEC"})
geometry.add_edge(0, -1, 1, -1, boundaries = {"rf_te" : "Impedance"})
geometry.add_edge(1, -1, 1, -0.75, boundaries = {"rf_te" : "Impedance"})
geometry.add_edge(1, -0.75, 1.7, 0.65, boundaries = {"rf_te" : "Impedance"})
geometry.add_edge(1.7, 0.65, 0.9, 0.35, boundaries = {"rf_te" : "Impedance"})
geometry.add_edge(0.9, 0.35, 0, 1, boundaries = {"rf_te" : "Impedance"})
geometry.add_edge(0.5, -0.25, 0.25, -0.5, angle = 90, boundaries = {"rf_te" : "Source"})
geometry.add_edge(0.25, -0.5, 0.5, -0.75, angle = 90, boundaries = {"rf_te" : "Source"})
geometry.add_edge(0.5, -0.75, 0.75, -0.5, angle = 90, boundaries = {"rf_te" : "Source"})
geometry.add_edge(0.75, -0.5, 0.5, -0.25, angle = 90, boundaries = {"rf_te" : "Source"})
geometry.add_edge(0.4, 0.25, 0.3, -0.05, boundaries = {"rf_te" : "PMC"})
geometry.add_edge(0.4, 0.25, 0.75, 0.2, boundaries = {"rf_te" : "PMC"})
geometry.add_edge(0.75, 0.2, 0.85, -0.1, boundaries = {"rf_te" : "PMC"})
geometry.add_edge(0.3, -0.05, 0.85, -0.1, boundaries = {"rf_te" : "PMC"})
geometry.add_label(0.399371, 0.440347, materials = {"rf_te" : "Air"})
geometry.add_label(0.484795, -0.434246, materials = {"rf_te" : "none"})
geometry.add_label(0.57193, 0.0710058, materials = {"rf_te" : "none"})
agros2d.view.zoom_best_fit()
problem.solve()
def test_values(self):
# point value
point = self.rf.local_values(0.92463, -0.20118)
self.value_test("Electric field", point["E"], 0.5385)
self.value_test("Electric field - real", point["Er"], 0.53821)
self.value_test("Electric field - imag", point["Ei"], 0.01767)
self.value_test("Displacement", point["D"], 4.768e-12)
self.value_test("Displacement - real", point["Dr"], 4.7654e-12)
self.value_test("Displacement - imag", point["Di"], 1.56463e-13)
self.value_test("Magnetic field", point["H"], 0.00195)
self.value_test("Magnetic field r component - real", point["Hrr"], -8.69388e-4)
self.value_test("Magnetic field r component - imag", point["Hir"], -0.00157)
self.value_test("Magnetic field z component - real", point["Hrz"], 7.3442e-4)
self.value_test("Magnetic field z component - imag", point["Hiz"], 1.06797e-4)
self.value_test("Magnetic flux density", point["B"], 2.4447e-9)
self.value_test("Magnetic flux density r component - real", point["Brr"], -1.09251e-9)
self.value_test("Magnetic flux density r component - imag", point["Bir"], -1.9781e-9)
self.value_test("Magnetic flux density z component - real", point["Brz"], 9.2291e-10)
self.value_test("Magnetic flux density z component - imag", point["Biz"], 1.34204e-10)
self.value_test("Poynting vector r component", point["Nr"], 1.98583e-4)
self.value_test("Poynting vector z component", point["Nz"], 2.47866e-4)
if __name__ == '__main__':
import unittest as ut
suite = ut.TestSuite()
result = Agros2DTestResult()
suite.addTest(ut.TestLoader().loadTestsFromTestCase(TestRFTEHarmonicPlanar))
suite.addTest(ut.TestLoader().loadTestsFromTestCase(TestRFTEHarmonicAxisymmetric))
suite.run(result) | hpfem/agros2d | resources/test/test_suite/fields/rf_te.py | Python | gpl-2.0 | 9,777 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from . import account_invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| sysadminmatmoz/ingadhoc | account_invoice_commercial/__init__.py | Python | agpl-3.0 | 366 |
# -*- coding: utf-8 -*-
"""
modulestore Django application initialization.
"""
from __future__ import absolute_import, unicode_literals
from django.apps import AppConfig
class ModulestoreConfig(AppConfig):
"""
Configuration for the modulestore Django application.
"""
name = 'modulestore'
| bmedx/modulestore | modulestore/apps.py | Python | apache-2.0 | 310 |
from scrapy.utils.markup import *
def removeTagsAndWhiteSpaces(text):
text = remove_comments(text)
text = remove_tags_with_content(text,which_ones=('script',))
text = remove_tags(text,keep=('br',))
text = replace_tags(text,token='\n')
return text
def removeTopTag(text):
return text.partition(">")[2].rpartition("<")[0] | shengcanxu/CCrawler | source/helper/textUtil.py | Python | apache-2.0 | 345 |
import cloud
| ichi23de5/ichi_Repo | cloud_management/models/__init__.py | Python | gpl-3.0 | 13 |
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""chroot_lib unit tests."""
from __future__ import print_function
import os
import sys
from chromite.lib import chroot_lib
from chromite.lib import cros_test_lib
from chromite.lib import osutils
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
class ChrootTest(cros_test_lib.TempDirTestCase):
"""Chroot class tests."""
def testGetEnterArgsEmpty(self):
"""Test empty instance behavior."""
chroot = chroot_lib.Chroot()
self.assertFalse(chroot.get_enter_args())
def testGetEnterArgsAll(self):
"""Test complete instance behavior."""
path = '/chroot/path'
cache_dir = '/cache/dir'
chrome_root = '/chrome/root'
expected = ['--chroot', path, '--cache-dir', cache_dir,
'--chrome-root', chrome_root]
chroot = chroot_lib.Chroot(path=path, cache_dir=cache_dir,
chrome_root=chrome_root)
self.assertCountEqual(expected, chroot.get_enter_args())
def testEnv(self):
"""Test the env handling."""
env = {'VAR': 'val'}
chroot = chroot_lib.Chroot(env=env)
self.assertEqual(env, chroot.env)
def testTempdir(self):
"""Test the tempdir functionality."""
chroot = chroot_lib.Chroot(path=self.tempdir)
osutils.SafeMakedirs(chroot.tmp)
self.assertEqual(os.path.join(self.tempdir, 'tmp'), chroot.tmp)
with chroot.tempdir() as tempdir:
self.assertStartsWith(tempdir, chroot.tmp)
self.assertNotExists(tempdir)
def testExists(self):
"""Test chroot exists."""
chroot = chroot_lib.Chroot(self.tempdir)
self.assertTrue(chroot.exists())
chroot = chroot_lib.Chroot(os.path.join(self.tempdir, 'DOES_NOT_EXIST'))
self.assertFalse(chroot.exists())
def testChrootPath(self):
"""Test chroot_path functionality."""
chroot = chroot_lib.Chroot(self.tempdir)
path1 = os.path.join(self.tempdir, 'some/path')
path2 = '/bad/path'
# Make sure that it gives an absolute path inside the chroot.
self.assertEqual('/some/path', chroot.chroot_path(path1))
# Make sure it raises an error for paths not inside the chroot.
self.assertRaises(chroot_lib.ChrootError, chroot.chroot_path, path2)
def testFullPath(self):
"""Test full_path functionality."""
chroot = chroot_lib.Chroot(self.tempdir)
path1 = 'some/path'
path2 = '/some/path'
# Make sure it's building out the path in the chroot.
self.assertEqual(os.path.join(self.tempdir, path1), chroot.full_path(path1))
# Make sure it can handle absolute paths.
self.assertEqual(chroot.full_path(path1), chroot.full_path(path2))
def testFullPathWithExtraArgs(self):
"""Test full_path functionality with extra args passed."""
chroot = chroot_lib.Chroot(self.tempdir)
path1 = 'some/path'
self.assertEqual(os.path.join(self.tempdir, 'some/path/abc/def/g/h/i'),
chroot.full_path(path1, '/abc', 'def', '/g/h/i'))
def testHasPathSuccess(self):
"""Test has path for a valid path."""
path = 'some/file.txt'
tempdir_path = os.path.join(self.tempdir, path)
osutils.Touch(tempdir_path, makedirs=True)
chroot = chroot_lib.Chroot(self.tempdir)
self.assertTrue(chroot.has_path(path))
def testHasPathInvalidPath(self):
"""Test has path for a non-existent path."""
chroot = chroot_lib.Chroot(self.tempdir)
self.assertFalse(chroot.has_path('/does/not/exist'))
def testHasPathVariadic(self):
"""Test multiple args to has path."""
path = ['some', 'file.txt']
tempdir_path = os.path.join(self.tempdir, *path)
osutils.Touch(tempdir_path, makedirs=True)
chroot = chroot_lib.Chroot(self.tempdir)
self.assertTrue(chroot.has_path(*path))
def testEqual(self):
"""__eq__ method sanity check."""
path = '/chroot/path'
cache_dir = '/cache/dir'
chrome_root = '/chrome/root'
env = {'USE': 'useflag',
'FEATURES': 'feature'}
chroot1 = chroot_lib.Chroot(path=path, cache_dir=cache_dir,
chrome_root=chrome_root, env=env)
chroot2 = chroot_lib.Chroot(path=path, cache_dir=cache_dir,
chrome_root=chrome_root, env=env)
chroot3 = chroot_lib.Chroot(path=path)
chroot4 = chroot_lib.Chroot(path=path)
self.assertEqual(chroot1, chroot2)
self.assertEqual(chroot3, chroot4)
self.assertNotEqual(chroot1, chroot3)
| endlessm/chromium-browser | third_party/chromite/lib/chroot_lib_unittest.py | Python | bsd-3-clause | 4,560 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Responses/CheckChallengeResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Responses/CheckChallengeResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n<POGOProtos/Networking/Responses/CheckChallengeResponse.proto\x12\x1fPOGOProtos.Networking.Responses\"G\n\x16\x43heckChallengeResponse\x12\x16\n\x0eshow_challenge\x18\x01 \x01(\x08\x12\x15\n\rchallenge_url\x18\x02 \x01(\tb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CHECKCHALLENGERESPONSE = _descriptor.Descriptor(
name='CheckChallengeResponse',
full_name='POGOProtos.Networking.Responses.CheckChallengeResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='show_challenge', full_name='POGOProtos.Networking.Responses.CheckChallengeResponse.show_challenge', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='challenge_url', full_name='POGOProtos.Networking.Responses.CheckChallengeResponse.challenge_url', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=97,
serialized_end=168,
)
DESCRIPTOR.message_types_by_name['CheckChallengeResponse'] = _CHECKCHALLENGERESPONSE
CheckChallengeResponse = _reflection.GeneratedProtocolMessageType('CheckChallengeResponse', (_message.Message,), dict(
DESCRIPTOR = _CHECKCHALLENGERESPONSE,
__module__ = 'POGOProtos.Networking.Responses.CheckChallengeResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.CheckChallengeResponse)
))
_sym_db.RegisterMessage(CheckChallengeResponse)
# @@protoc_insertion_point(module_scope)
| favll/pogom | pogom/pgoapi/protos/POGOProtos/Networking/Responses/CheckChallengeResponse_pb2.py | Python | mit | 2,768 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Drink.endtime'
db.delete_column('core_drink', 'endtime')
# Changing field 'Drink.duration'
db.alter_column('core_drink', 'duration', self.gf('django.db.models.fields.PositiveIntegerField')())
def backwards(self, orm):
# Adding field 'Drink.endtime'
db.add_column('core_drink', 'endtime', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now()), keep_default=False)
# Changing field 'Drink.duration'
db.alter_column('core_drink', 'duration', self.gf('django.db.models.fields.PositiveIntegerField')(null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'beerdb.beerimage': {
'Meta': {'object_name': 'BeerImage'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'num_views': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'original_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'revision': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'beerdb.beerstyle': {
'Meta': {'object_name': 'BeerStyle'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'revision': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'beerdb.beertype': {
'Meta': {'object_name': 'BeerType'},
'abv': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'brewer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beerdb.Brewer']"}),
'calories_oz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'carbs_oz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'edition': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'beers'", 'null': 'True', 'to': "orm['beerdb.BeerImage']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_gravity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'revision': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'specific_gravity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'style': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beerdb.BeerStyle']"})
},
'beerdb.brewer': {
'Meta': {'object_name': 'Brewer'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'country': ('pykeg.core.fields.CountryField', [], {'default': "'USA'", 'max_length': '3'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'brewers'", 'null': 'True', 'to': "orm['beerdb.BeerImage']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'origin_city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'origin_state': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'production': ('django.db.models.fields.CharField', [], {'default': "'commercial'", 'max_length': '128'}),
'revision': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.authenticationtoken': {
'Meta': {'unique_together': "(('site', 'seqn', 'auth_device', 'token_value'),)", 'object_name': 'AuthenticationToken'},
'auth_device': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pin': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tokens'", 'to': "orm['core.KegbotSite']"}),
'token_value': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'core.bac': {
'Meta': {'object_name': 'BAC'},
'bac': ('django.db.models.fields.FloatField', [], {}),
'drink': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Drink']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rectime': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.config': {
'Meta': {'object_name': 'Config'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'configs'", 'to': "orm['core.KegbotSite']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'core.drink': {
'Meta': {'ordering': "('-starttime',)", 'unique_together': "(('site', 'seqn'),)", 'object_name': 'Drink'},
'auth_token': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'drinks'", 'null': 'True', 'to': "orm['core.Keg']"}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'drinks'", 'null': 'True', 'to': "orm['core.DrinkingSession']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'drinks'", 'to': "orm['core.KegbotSite']"}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'valid'", 'max_length': '128'}),
'ticks': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'drinks'", 'null': 'True', 'to': "orm['auth.User']"}),
'volume_ml': ('django.db.models.fields.FloatField', [], {})
},
'core.drinkingsession': {
'Meta': {'ordering': "('-starttime',)", 'unique_together': "(('site', 'seqn'),)", 'object_name': 'DrinkingSession'},
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessions'", 'to': "orm['core.KegbotSite']"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'blank': 'True', 'null': 'True', 'populate_from': 'None', 'db_index': 'True'}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'volume_ml': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'core.keg': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'Keg'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'enddate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'origcost': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'kegs'", 'to': "orm['core.KegbotSite']"}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegSize']"}),
'startdate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beerdb.BeerType']"})
},
'core.kegbotsite': {
'Meta': {'object_name': 'KegbotSite'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'core.kegsessionchunk': {
'Meta': {'ordering': "('-starttime',)", 'unique_together': "(('session', 'keg'),)", 'object_name': 'KegSessionChunk'},
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'keg_session_chunks'", 'null': 'True', 'to': "orm['core.Keg']"}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keg_chunks'", 'to': "orm['core.DrinkingSession']"}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'volume_ml': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'core.kegsize': {
'Meta': {'object_name': 'KegSize'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'volume_ml': ('django.db.models.fields.FloatField', [], {})
},
'core.kegstats': {
'Meta': {'object_name': 'KegStats'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stats'", 'unique': 'True', 'to': "orm['core.Keg']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'stats': ('pykeg.core.jsonfield.JSONField', [], {'default': "'{}'"})
},
'core.kegtap': {
'Meta': {'object_name': 'KegTap'},
'current_keg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Keg']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_tick_delta': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'}),
'meter_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'ml_per_tick': ('django.db.models.fields.FloatField', [], {'default': '0.45454545454545453'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'temperature_sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ThermoSensor']", 'null': 'True', 'blank': 'True'})
},
'core.relaylog': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'RelayLog'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'relaylogs'", 'to': "orm['core.KegbotSite']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'core.sessionchunk': {
'Meta': {'ordering': "('-starttime',)", 'unique_together': "(('session', 'user', 'keg'),)", 'object_name': 'SessionChunk'},
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'session_chunks'", 'null': 'True', 'to': "orm['core.Keg']"}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'chunks'", 'to': "orm['core.DrinkingSession']"}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'session_chunks'", 'null': 'True', 'to': "orm['auth.User']"}),
'volume_ml': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'core.sessionstats': {
'Meta': {'object_name': 'SessionStats'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stats'", 'unique': 'True', 'to': "orm['core.DrinkingSession']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'stats': ('pykeg.core.jsonfield.JSONField', [], {'default': "'{}'"})
},
'core.systemevent': {
'Meta': {'ordering': "('-when', '-id')", 'object_name': 'SystemEvent'},
'drink': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events'", 'null': 'True', 'to': "orm['core.Drink']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events'", 'null': 'True', 'to': "orm['core.Keg']"}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events'", 'null': 'True', 'to': "orm['core.DrinkingSession']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events'", 'null': 'True', 'to': "orm['auth.User']"}),
'when': ('django.db.models.fields.DateTimeField', [], {})
},
'core.systemstats': {
'Meta': {'object_name': 'SystemStats'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'stats': ('pykeg.core.jsonfield.JSONField', [], {'default': "'{}'"})
},
'core.thermolog': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'Thermolog'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ThermoSensor']"}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'thermologs'", 'to': "orm['core.KegbotSite']"}),
'temp': ('django.db.models.fields.FloatField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'core.thermosensor': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'ThermoSensor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nice_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'thermosensors'", 'to': "orm['core.KegbotSite']"})
},
'core.thermosummarylog': {
'Meta': {'unique_together': "(('site', 'seqn'),)", 'object_name': 'ThermoSummaryLog'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_temp': ('django.db.models.fields.FloatField', [], {}),
'mean_temp': ('django.db.models.fields.FloatField', [], {}),
'min_temp': ('django.db.models.fields.FloatField', [], {}),
'num_readings': ('django.db.models.fields.PositiveIntegerField', [], {}),
'period': ('django.db.models.fields.CharField', [], {'default': "'daily'", 'max_length': '64'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ThermoSensor']"}),
'seqn': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'thermosummarylogs'", 'to': "orm['core.KegbotSite']"})
},
'core.userpicture': {
'Meta': {'object_name': 'UserPicture'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'gender': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mugshot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.UserPicture']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'weight': ('django.db.models.fields.FloatField', [], {})
},
'core.usersessionchunk': {
'Meta': {'ordering': "('-starttime',)", 'unique_together': "(('session', 'user'),)", 'object_name': 'UserSessionChunk'},
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_chunks'", 'to': "orm['core.DrinkingSession']"}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_session_chunks'", 'null': 'True', 'to': "orm['auth.User']"}),
'volume_ml': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'core.userstats': {
'Meta': {'object_name': 'UserStats'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegbotSite']"}),
'stats': ('pykeg.core.jsonfield.JSONField', [], {'default': "'{}'"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stats'", 'unique': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['core']
| Alwnikrotikz/kegbot | pykeg/src/pykeg/core/migrations/0049_remove_drink_endtime.py | Python | gpl-2.0 | 26,331 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-24 15:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('emailer', '0007_auto_20150509_1922'),
]
operations = [
migrations.AlterField(
model_name='email',
name='recipient',
field=models.EmailField(db_index=True, max_length=254),
),
]
| JustinWingChungHui/okKindred | emailer/migrations/0008_auto_20151224_1528.py | Python | gpl-2.0 | 467 |
#!/usr/bin/env python
from runtest import TestBase
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'abc', """
# DURATION TID FUNCTION
0.508 us [ 772] | __monstartup();
0.425 us [ 772] | __cxa_atexit();
[ 772] | main() {
[ 772] | a() {
[ 772] | b() {
[ 772] | c() {
0.419 us [ 772] | getpid();
0.844 us [ 772] | }
1.037 us [ 772] | }
1.188 us [ 772] | }
1.378 us [ 772] | }
""")
def runcmd(self):
return '%s --no-comment %s' % (TestBase.ftrace, 't-abc')
| andrewjss/uftrace | tests/t072_no_comment.py | Python | gpl-2.0 | 621 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Database script functions."""
from __future__ import print_function
import os
import sys
import shutil
import datetime
from pipes import quote
from flask import current_app
from six import iteritems
from invenio.ext.script import Manager, change_command_name, print_progress
manager = Manager(usage="Perform database operations")
# Shortcuts for manager options to keep code DRY.
option_yes_i_know = manager.option('--yes-i-know', action='store_true',
dest='yes_i_know', help='use with care!')
option_quiet = manager.option('--quiet', action='store_true',
dest='quiet', help='show less output')
option_default_data = manager.option(
'--no-data', action='store_false',
dest='default_data',
help='do not populate tables with default data'
)
@manager.option('-u', '--user', dest='user', default="root")
@manager.option('-p', '--password', dest='password', default="")
@option_yes_i_know
def init(user='root', password='', yes_i_know=False):
"""Initialize database and user."""
from invenio.ext.sqlalchemy import db
from invenio.utils.text import wrap_text_in_a_box, wait_for_user
## Step 0: confirm deletion
wait_for_user(wrap_text_in_a_box(
"WARNING: You are going to destroy your database tables! Run first"
" `inveniomanage database drop`."
))
## Step 1: drop database and recreate it
if db.engine.name == 'mysql':
#FIXME improve escaping
args = dict((k, str(v).replace('$', '\$'))
for (k, v) in iteritems(current_app.config)
if k.startswith('CFG_DATABASE'))
args = dict(zip(args, map(quote, args.values())))
prefix = ('{cmd} -u {user} --password={password} '
'-h {CFG_DATABASE_HOST} -P {CFG_DATABASE_PORT} ')
cmd_prefix = prefix.format(cmd='mysql', user=user, password=password,
**args)
cmd_admin_prefix = prefix.format(cmd='mysqladmin', user=user,
password=password,
**args)
cmds = [
cmd_prefix + '-e "DROP DATABASE IF EXISTS {CFG_DATABASE_NAME}"',
(cmd_prefix + '-e "CREATE DATABASE IF NOT EXISTS '
'{CFG_DATABASE_NAME} DEFAULT CHARACTER SET utf8 '
'COLLATE utf8_general_ci"'),
# Create user and grant access to database.
(cmd_prefix + '-e "GRANT ALL PRIVILEGES ON '
'{CFG_DATABASE_NAME}.* TO {CFG_DATABASE_USER}@localhost '
'IDENTIFIED BY {CFG_DATABASE_PASS}"'),
cmd_admin_prefix + 'flush-privileges'
]
for cmd in cmds:
cmd = cmd.format(**args)
print(cmd)
if os.system(cmd):
print("ERROR: failed execution of", cmd, file=sys.stderr)
sys.exit(1)
print('>>> Database has been installed.')
@option_yes_i_know
@option_quiet
def drop(yes_i_know=False, quiet=False):
"""Drop database tables."""
print(">>> Going to drop tables and related data on filesystem ...")
from sqlalchemy import event
from invenio.utils.date import get_time_estimator
from invenio.utils.text import wrap_text_in_a_box, wait_for_user
from invenio.ext.sqlalchemy.utils import test_sqla_connection, test_sqla_utf8_chain
from invenio.ext.sqlalchemy import db, models
from invenio.legacy.bibdocfile.api import _make_base_dir
from invenio.modules.jsonalchemy.wrappers import StorageEngine
## Step 0: confirm deletion
wait_for_user(wrap_text_in_a_box(
"WARNING: You are going to destroy your database tables and related "
"data on filesystem!"))
## Step 1: test database connection
test_sqla_connection()
test_sqla_utf8_chain()
list(models)
## Step 2: disable foreign key checks
if db.engine.name == 'mysql':
db.engine.execute('SET FOREIGN_KEY_CHECKS=0;')
## Step 3: destroy associated data
try:
from invenio.legacy.webstat.api import destroy_customevents
msg = destroy_customevents()
if msg:
print(msg)
except:
print("ERROR: Could not destroy customevents.")
## FIXME: move to bibedit_model
def bibdoc_before_drop(target, connection_dummy, **kw_dummy):
print
print(">>> Going to remove records data...")
for (docid,) in db.session.query(target.c.id).all():
directory = _make_base_dir(docid)
if os.path.isdir(directory):
print(' >>> Removing files for docid =', docid)
shutil.rmtree(directory)
db.session.commit()
print(">>> Data has been removed.")
from invenio.modules.editor.models import Bibdoc
event.listen(Bibdoc.__table__, "before_drop", bibdoc_before_drop)
tables = list(reversed(db.metadata.sorted_tables))
def _dropper(items, prefix, dropper):
N = len(items)
prefix = prefix.format(N)
e = get_time_estimator(N)
dropped = 0
if quiet:
print(prefix)
for i, table in enumerate(items):
try:
if not quiet:
print_progress(
1.0 * (i+1) / N, prefix=prefix,
suffix=str(datetime.timedelta(seconds=e()[0])))
dropper(table)
dropped += 1
except:
print('\r', '>>> problem with dropping ', table)
current_app.logger.exception(table)
if dropped == N:
print(">>> Everything has been dropped successfully.")
else:
print("ERROR: not all items were properly dropped.")
print(">>> Dropped", dropped, 'out of', N)
_dropper(tables, '>>> Dropping {0} tables ...',
lambda table: table.drop(bind=db.engine))
_dropper(StorageEngine.__storage_engine_registry__,
'>>> Dropping {0} storage engines ...',
lambda api: api.storage_engine.drop())
@option_default_data
@option_quiet
def create(default_data=True, quiet=False):
"""Create database tables from sqlalchemy models."""
print(">>> Going to create tables...")
from sqlalchemy import event
from invenio.utils.date import get_time_estimator
from invenio.ext.sqlalchemy.utils import test_sqla_connection, test_sqla_utf8_chain
from invenio.ext.sqlalchemy import db, models
from invenio.modules.jsonalchemy.wrappers import StorageEngine
test_sqla_connection()
test_sqla_utf8_chain()
list(models)
def cfv_after_create(target, connection, **kw):
print
print(">>> Modifing table structure...")
from invenio.legacy.dbquery import run_sql
run_sql('ALTER TABLE collection_field_fieldvalue DROP PRIMARY KEY')
run_sql('ALTER TABLE collection_field_fieldvalue ADD INDEX id_collection(id_collection)')
run_sql('ALTER TABLE collection_field_fieldvalue CHANGE id_fieldvalue id_fieldvalue mediumint(9) unsigned')
#print(run_sql('SHOW CREATE TABLE collection_field_fieldvalue'))
from invenio.modules.search.models import CollectionFieldFieldvalue
event.listen(CollectionFieldFieldvalue.__table__, "after_create", cfv_after_create)
tables = db.metadata.sorted_tables
def _creator(items, prefix, creator):
N = len(items)
prefix = prefix.format(N)
e = get_time_estimator(N)
created = 0
if quiet:
print(prefix)
for i, table in enumerate(items):
try:
if not quiet:
print_progress(
1.0 * (i+1) / N, prefix=prefix,
suffix=str(datetime.timedelta(seconds=e()[0])))
creator(table)
created += 1
except:
print('\r', '>>> problem with creating ', table)
current_app.logger.exception(table)
if created == N:
print(">>> Everything has been created successfully.")
else:
print("ERROR: not all items were properly created.")
print(">>> Created", created, 'out of', N)
_creator(tables, '>>> Creating {0} tables ...',
lambda table: table.create(bind=db.engine))
_creator(StorageEngine.__storage_engine_registry__,
'>>> Creating {0} storage engines ...',
lambda api: api.storage_engine.create())
@manager.command
def dump():
"""Export all the tables, similar to `dbdump`."""
print('>>> Dumping the DataBase.')
@manager.command
def diff():
"""Diff database against SQLAlchemy models."""
try:
from migrate.versioning import schemadiff # noqa
except ImportError:
print(">>> Required package sqlalchemy-migrate is not installed. "
"Please install with:")
print(">>> pip install sqlalchemy-migrate")
return
from invenio.ext.sqlalchemy import db
print(db.schemadiff())
@option_yes_i_know
@option_default_data
@option_quiet
def recreate(yes_i_know=False, default_data=True, quiet=False):
"""Recreate database tables (same as issuing 'drop' and then 'create')."""
drop(quiet=quiet)
create(default_data=default_data, quiet=quiet)
@manager.command
def uri():
"""Print SQLAlchemy database uri."""
from flask import current_app
print(current_app.config['SQLALCHEMY_DATABASE_URI'])
def version():
""" Get running version of database driver."""
from invenio.ext.sqlalchemy import db
try:
return db.engine.dialect.dbapi.__version__
except:
import MySQLdb
return MySQLdb.__version__
@manager.option('-v', '--verbose', action='store_true', dest='verbose',
help='Display more details (driver version).')
@change_command_name
def driver_info(verbose=False):
""" Get name of running database driver."""
from invenio.ext.sqlalchemy import db
try:
return db.engine.dialect.dbapi.__name__ + (('==' + version())
if verbose else '')
except:
import MySQLdb
return MySQLdb.__name__ + (('==' + version()) if verbose else '')
@manager.option('-l', '--line-format', dest='line_format', default="%s: %s")
@manager.option('-s', '--separator', dest='separator', default="\n")
@change_command_name
def mysql_info(separator=None, line_format=None):
"""Detect and print MySQL details.
Useful for debugging problems on various OS.
"""
from invenio.ext.sqlalchemy import db
if db.engine.name != 'mysql':
raise Exception('Database engine is not mysql.')
from invenio.legacy.dbquery import run_sql
out = []
for key, val in run_sql("SHOW VARIABLES LIKE 'version%'") + \
run_sql("SHOW VARIABLES LIKE 'charact%'") + \
run_sql("SHOW VARIABLES LIKE 'collat%'"):
if False:
print(" - %s: %s" % (key, val))
elif key in ['version',
'character_set_client',
'character_set_connection',
'character_set_database',
'character_set_results',
'character_set_server',
'character_set_system',
'collation_connection',
'collation_database',
'collation_server']:
out.append((key, val))
if separator is not None:
if line_format is None:
line_format = "%s: %s"
return separator.join(map(lambda i: line_format % i, out))
return dict(out)
def main():
"""Main."""
from invenio.base.factory import create_app
app = create_app()
manager.app = app
manager.run()
if __name__ == '__main__':
main()
| egabancho/invenio | invenio/base/scripts/database.py | Python | gpl-2.0 | 12,655 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.