code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
import argparse
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.resource import Resource
import os
from balbec.jsonhandler import JSONHandler
from balbec.xmlhandler import XmlHandler
ROOT = lambda base : os.path.join(os.path.dirname(__file__), base).replace('\\','/')
class StatusPage(Resource):
isLeaf = True
config_dir = None
def render_GET(self, request):
if request.received_headers["accept"] == "text/xml":
handler = XmlHandler(self.config_dir)
output = handler.xml()
elif request.received_headers["accept"] == "application/json":
handler = JSONHandler(self.config_dir)
output = handler.json()
else:
output = open(ROOT("static/index.html")).read()
return output
def main():
parser = argparse.ArgumentParser(description='Run an instance of python-nagios-frontend.')
parser.add_argument('--port', dest='www_port', default=8880, help='Port for the webserver')
parser.add_argument('--configdir', dest='config_dir', default="/etc/python-nagios-frontend/", help='Path to the configuration files')
args = parser.parse_args()
resource = StatusPage()
resource.config_dir = args.config_dir
factory = Site(resource)
reactor.listenTCP(int(args.www_port), factory)
reactor.run() | KristianOellegaard/python-nagios-frontend | balbec/balbec_twisted.py | Python | agpl-3.0 | 1,355 |
from django import forms
from django.contrib.auth.models import User
from harnas.userprofile.models import UserProfile
class UserProfileEditForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('organization', 'personal_page', 'show_email', 'show_age')
class UserFieldsForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name')
| harnasproject/harnas | harnas/userprofile/forms.py | Python | agpl-3.0 | 409 |
"""
Calculates peak load per load area
"""
__copyright__ = "Reiner Lemoine Institut, Flensburg University of Applied Sciences, Centre for Sustainable Energy Systems"
__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)"
__url__ = "https://github.com/openego/data_processing/blob/master/LICENSE"
__author__ = "gplssm, IlkaCu"
import pandas as pd
from workalendar.europe import Germany
from datetime import time as settime
import time
from sqlalchemy.orm import sessionmaker
from demandlib import bdew as bdew, particular_profiles as profiles
from dataprocessing.tools import io, metadata
from egoio.db_tables.model_draft import EgoDemandLoadareaPeakLoad as orm_peak_load
from oemof.db import tools
from dataprocessing.python_scripts.functions.ego_scenario_log import write_ego_scenario_log
def get_load_areas_table(schema, table, index_col, conn, columns=None):
r"""Retrieve load areas intermediate results table from oedb
"""
# retrieve table with processed input data
load_areas = pd.read_sql_table(table, conn, schema=schema,
index_col=index_col, columns=columns)
return load_areas
def add_sectoral_peak_load(load_areas, **kwargs):
r"""Add peak load per sector based on given annual consumption
"""
# define data year
# TODO: in the future get this from somewhere else
year = 2011
# call demandlib
# TODO: change to use new demandlib
# read standard load profiles
e_slp = bdew.ElecSlp(year, holidays=holidays)
# multiply given annual demand with timeseries
# elec_demand = e_slp.get_profile(load_areas['h0', 'g0', 'l0', 'i0'].to_dict())
elec_demand = e_slp.get_profile(load_areas.to_dict())
# tmp_peak_load = dm.electrical_demand(method='calculate_profile',
# year=year,
# ann_el_demand_per_sector= {
# 'h0':
# load_areas['sector_consumption_residential'],
# 'g0':
# load_areas['sector_consumption_retail'],
# 'i0':
# load_areas['sector_consumption_industrial'],
# 'l0':
# load_areas['sector_consumption_agricultural']}
# ).elec_demand
# hack correct industrial profile into dataframe
# print(load_areas['sector_consumption_industrial'])
# if load_areas['sector_consumption_industrial'] == 0:
# load_areas['sector_consumption_industrial'] = 0.1
# Add the slp for the industrial group
ilp = profiles.IndustrialLoadProfile(e_slp.date_time_index,
holidays=holidays)
# Beginning and end of workday, weekdays and weekend days, and scaling factors
# by default
elec_demand['i0'] = ilp.simple_profile(
load_areas['i0'],
am=settime(6, 0, 0),
pm=settime(22, 0, 0),
profile_factors=
{'week': {'day': 0.8, 'night': 0.6},
'weekend': {'day': 0.6, 'night': 0.6}})
# Resample 15-minute values to hourly values and sum across sectors
elec_demand = elec_demand.resample('H').mean().fillna(0).max().to_frame().T
# demand_industry = eb.IndustrialLoadProfile('simple_industrial_profile',
# **{'annual_demand': load_areas['sector_consumption_industrial'],
# 'year': year,
# 'am': settime(6, 0, 0),
# 'pm': settime(22, 0, 0),
# 'profile_factors':
# {'week': {'day': 0.8, 'night': 0.6},
# 'weekend': {'day': 0.6, 'night': 0.6}}
# })
# ind_demand = demand_industry.profile
# elec_demand['i0'] = ind_demand
peak_load = elec_demand.max(axis=0)
return peak_load
if __name__ == '__main__':
la_index_col = 'id'
schema = 'model_draft'
table = 'ego_demand_loadarea'
target_table = 'ego_demand_loadarea_peak_load'
year = 2011
db_group = 'oeuser'
cal = Germany()
holidays = dict(cal.holidays(2011))
# get database connection object
conn = io.oedb_session(section='oedb')
Session = sessionmaker(bind=conn)
session = Session()
# retrieve load areas table
columns = [la_index_col,
'sector_consumption_residential',
'sector_consumption_retail',
'sector_consumption_industrial',
'sector_consumption_agricultural']
load_areas = get_load_areas_table(schema, table, la_index_col, conn,
columns=columns)
write_ego_scenario_log(conn=conn,
version='v0.4.5',
io='input',
schema='model_draft',
table=table,
script='ego_dp_loadarea_peakload.py',
entries=len(load_areas))
names_dc = {'sector_consumption_residential': 'h0',
'sector_consumption_retail': 'g0',
'sector_consumption_agricultural': 'l0',
'sector_consumption_industrial': 'i0',}
names_dc2 = {'h0': 'residential',
'g0': 'retail',
'l0': 'agricultural',
'i0': 'industrial'}
# rename columns to demandlib compatible names
load_areas.rename(columns=names_dc, inplace=True)
# # delete old content from table
# del_str = "DROP TABLE IF EXISTS {0}.{1} CASCADE;".format(
# schema, target_table)
# conn.execute(del_str)
# empty table or create
try:
orm_peak_load.__table__.create(conn)
except:
session.query(orm_peak_load).delete()
session.commit()
# Use above function `add_sectoral_peak_load` via apply
# elec_demand = load_areas.fillna(0).apply(
# add_sectoral_peak_load, axis=1, args=())
# read standard load profiles
e_slp = bdew.ElecSlp(year, holidays=holidays)
# Add the slp for the industrial group
ilp = profiles.IndustrialLoadProfile(e_slp.date_time_index,
holidays=holidays)
# counter
ctr = 0
# iterate over substation retrieving sectoral demand at each of it
for it, row in load_areas.iterrows():
row = row.fillna(0)
# multiply given annual demand with timeseries
elec_demand = e_slp.get_profile(row.to_dict())
# Beginning and end of workday, weekdays and weekend days, and scaling factors
# by default
elec_demand['i0'] = ilp.simple_profile(
row['i0'],
am=settime(6, 0, 0),
pm=settime(22, 0, 0),
profile_factors=
{'week': {'day': 0.8, 'night': 0.6},
'weekend': {'day': 0.6, 'night': 0.6}})
# Resample 15-minute values to hourly values and sum across sectors
elec_demand = elec_demand.resample('H').mean().fillna(0).max().to_frame().T#.max(axis=0)#.to_frame().unstack()#.\
# to_frame(name='peak_load')
elec_demand['id'] = it
elec_demand.set_index('id', inplace=True)
# rename columns
elec_demand.rename(columns=names_dc2, inplace=True)
# Add data to orm object
peak_load = orm_peak_load(
id=it,
retail=float(elec_demand['retail']),
residential=float(elec_demand['residential']),
industrial=float(elec_demand['industrial']),
agricultural=float(elec_demand['agricultural']))
session.add(peak_load)
# # write results to new database table
# elec_demand.to_sql(target_table,
# conn,
# schema=schema,
# index=True,
# if_exists='fail')
ctr += 1
# commit data to database every 1000 datasets: This is done since pushing every
# single dataset slows down entire script, single commiting in the end sometimes
# leads to conn. timeout.
if (ctr % 1000) == 0:
session.commit()
# commit remaining datasets that were not committed in loop above
session.commit()
# grant access to db_group
tools.grant_db_access(conn, schema, target_table, db_group)
# change owner of table to db_group
tools.change_owner_to(conn, schema, target_table, db_group)
# # add primary key constraint on id column
# tools.add_primary_key(conn, schema, target_table, la_index_col)
# create metadata json str
json_str = metadata.create_metadata_json(
'Peak load per load area',
'',
'2011',
time.strftime("%d.%m.%Y"),
'Open Energy Database, schema: {0}, table: {1}'.format(schema,
target_table),
'Germany',
'Sectoral peak of single load areas based on synthetic standard load ' +
'profiles.',
[{'Name': 'id',
'Description': 'Unique identifier',
'Unit': '-'},
{'Name': 'g0',
'Description': 'Peak demand of retail sector',
'Unit': 'GW'},
{'Name': 'h0',
'Description': 'Peak demand of household sector',
'Unit': 'GW'},
{'Name': 'l0',
'Description': 'Peak demand of agricultural sector',
'Unit': 'GW'},
{'Name': 'i0',
'Description': 'Peak demand of industrial sector',
'Unit': 'GW'}
],
{'Name': 'Guido Pleßmann',
'Mail': 'guido.plessmann@rl-institut.de',
'Date': time.strftime("%d.%m.%Y"),
'Comment': 'Initial creation of dataset'},
'Be aware of applicability. Data bases on synthetic load profiles',
'',
''
)
metadata.submit_comment(conn, json_str, schema, target_table)
write_ego_scenario_log(conn=conn,
version='v0.4.5',
io='output',
schema='model_draft',
table=target_table,
script='ego_dp_loadarea_peakload.py',
entries=len(load_areas))
conn.close()
| openego/data_processing | dataprocessing/python_scripts/ego_dp_loadarea_peakload.py | Python | agpl-3.0 | 10,383 |
# Learn Python -- level 2 logic
# Copyright (C) 2013 Cornell FB Hackathon Team.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
player1._position = [5, 6]
_world.grid[player1._position[1]][player1._position[0]] = player1
player2._position = [14, 6]
_world.grid[player2._position[1]][player2._position[0]] = player2
items = [Item("bread", [0, 2]),
Item("mushroom", [11, 9]),
Item("scepter", [5, 4]),
Item("banana", [17, 3]),
Item("bread", [10, 1]),
Item("sword", [8, 9])]
def at_end():
if (player1.inventory.count() != 3 or
player2.inventory.count() != 3):
raise Exception("Failure")
| pniedzielski/fb-hackathon-2013-11-21 | src/py/level3.py | Python | agpl-3.0 | 1,260 |
from . import cmis_backend
from . import ir_model_fields
| acsone/alfodoo | cmis_field/models/__init__.py | Python | agpl-3.0 | 57 |
#!/usr/bin/env python3
"""
This script can be used to automatically pull translations from Transifex,
commit, push, and merge them to their respective repos.
To use, export an environment variable `GITHUB_ACCESS_TOKEN`. The token requires
GitHub's "repo" scope.
Run the script from the root of this repo.
python transifex/pull.py git@github.com:edx/course-discovery.git
If you want to use a custom merge method pass the --merge-method option.
python transifex/pull.py git@github.com:edx/course-discovery.git --merge-method rebase
If you want to skip the compile messages step, pass the --skip-compilemessages option.
python transifex/pull.py git@github.com:edx/course-discovery.git --skip-compilemessages
"""
import os
import shutil
from argparse import ArgumentParser
from utils import DEFAULT_MERGE_METHOD, MERGE_METHODS, logger, repo_context
# The name of the branch to use.
BRANCH_NAME = 'transifex-bot-update-translations'
# The commit message to use.
MESSAGE = 'chore(i18n): update translations'
# Environment variable needed to run paver compilejsi18n command
os.environ['LMS_CFG']='../lms.yml'
os.environ['STUDIO_CFG'] = '../studio.yml'
os.environ['REVISION_CFG'] = ''
os.environ['SKIP_NPM_INSTALL'] = 'True'
os.environ['LANG'] = 'C.UTF-8'
# Configuration repo to fetch lms/studio settings
CONFIGURATION_REPO_URL = 'https://github.com/edx/configuration.git'
def pull(clone_url, repo_owner, merge_method=DEFAULT_MERGE_METHOD, skip_compilemessages=False,
skip_check_changes=False):
"""Pulls translations for the given repo.
If applicable, commits them, pushes them to GitHub, opens a PR, waits for
status checks to pass, then merges the PR and deletes the branch.
"""
with repo_context(CONFIGURATION_REPO_URL, repo_owner, BRANCH_NAME, MESSAGE, merge_method=merge_method) as config_repo:
logger.info('Pulling lms/studio settings from [%s].', config_repo.name)
shutil.copy('./docker/build/edxapp/lms.yml', '../')
shutil.copy('./docker/build/edxapp/studio.yml', '../')
with repo_context(clone_url, repo_owner, BRANCH_NAME, MESSAGE, merge_method=merge_method) as repo:
logger.info('Pulling translations for [%s].', repo.name)
repo.pull_translations()
if skip_compilemessages:
logger.info('Skipping compilemessages.')
else:
compilemessages_succeeded = repo.compilemessages()
repo.commit_push_and_open_pr(skip_check_changes)
if repo.pr:
if not (skip_compilemessages or compilemessages_succeeded):
# Notify the team that message compilation failed.
repo.pr.create_issue_comment(
'@{owner} failing message compilation prevents this PR from being automatically merged. '
'Refer to the build log for more details.'.format(
owner=repo.owner
)
)
# Fail job immediately, without trying to merge the PR. We don't
# want to merge PRs without compiled messages.
raise RuntimeError('Failed to compile messages.')
repo.merge_pr()
def parse_arguments():
parser = ArgumentParser()
parser.add_argument(
'clone_url',
help='URL to use to clone the repository.'
)
parser.add_argument(
'repo_owner',
help='This is the user/team that will be pinged when errors occur.'
)
parser.add_argument(
'--merge-method',
choices=MERGE_METHODS,
default=DEFAULT_MERGE_METHOD,
help='Method to use when merging the PR. See https://developer.github.com/v3/pulls/#merge-a-pull-request-merge-button for details.'
)
parser.add_argument(
'--skip-compilemessages',
action='store_true',
help='Skip the message compilation step.'
)
parser.add_argument(
'--skip-check-changes',
action='store_true',
help='Skip the check changes step.'
)
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
pull(
args.clone_url,
args.repo_owner,
merge_method=args.merge_method,
skip_compilemessages=args.skip_compilemessages,
skip_check_changes=args.skip_check_changes,
)
| edx/ecommerce-scripts | transifex/pull.py | Python | agpl-3.0 | 4,324 |
# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015-2019 OpenCraft <contact@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
URL Patterns for api app
"""
# Imports #####################################################################
from django.conf.urls import include, url
from django.views.generic.base import RedirectView
from drf_yasg.views import get_schema_view
from rest_framework.permissions import AllowAny
from api.auth import JWTAuthToken, JwtTokenRefresh, JwtTokenVerify
from api.router import v1_router, v2_router
from opencraft.swagger import api_info
# URL Patterns ################################################################
app_name = 'api'
# pylint: disable=invalid-name
schema_view = get_schema_view(
info=api_info,
public=True,
permission_classes=(AllowAny,),
)
urlpatterns = [
url(r'^$', RedirectView.as_view(url='v1/', permanent=False), name='index'),
# v1 urls
url(r'^v1/', include((v1_router.urls, 'api_v1'), namespace='v1')),
url(r'^v1/auth/', include('rest_framework.urls', namespace='rest_framework')),
# v2 urls
url(r'^v2/', include((v2_router.urls, 'api_v2'), namespace='v2')),
url(r'^v2/auth/token/', JWTAuthToken.as_view(), name='token_obtain_pair'),
# They are required to check if the token is valid and to refresh the access
# token and allow a session that lasts more than a few minutes
url(r'^v2/auth/refresh/', JwtTokenRefresh.as_view(), name='token_refresh'),
url(r'^v2/auth/verify/', JwtTokenVerify.as_view(), name='token_verify'),
# Reset password
url(r'^v2/password_reset/', include('django_rest_passwordreset.urls', namespace='password_reset')),
# Documentation
url(r'^swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=10), name='schema-json'),
url(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=10), name='schema-swagger-ui'),
url(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=10), name='schema-redoc'),
]
| open-craft/opencraft | api/urls.py | Python | agpl-3.0 | 2,688 |
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from dace.processdefinition.processdef import ProcessDefinition
from dace.processdefinition.activitydef import ActivityDefinition
from dace.processdefinition.gatewaydef import (
ExclusiveGatewayDefinition,
ParallelGatewayDefinition)
from dace.processdefinition.transitiondef import TransitionDefinition
from dace.processdefinition.eventdef import (
StartEventDefinition,
EndEventDefinition)
from dace.objectofcollaboration.services.processdef_container import (
process_definition)
from pontus.core import VisualisableElement
from .behaviors import (
Addapplications,
AddFacebookApplication,
AddTwitterApplication,
AddGoogleApplication,
SeeApplication,
EditApplication,
RemoveApplication
)
from lac import _
@process_definition(name='socialapplicationsprocess',
id='socialapplicationsprocess')
class SocialApplicationsProcess(ProcessDefinition, VisualisableElement):
isUnique = True
def __init__(self, **kwargs):
super(SocialApplicationsProcess, self).__init__(**kwargs)
self.title = _('Social applications process')
self.description = _('Social applications process')
def _init_definition(self):
self.defineNodes(
start = StartEventDefinition(),
pg = ParallelGatewayDefinition(),
addapplication = ActivityDefinition(contexts=[Addapplications, AddFacebookApplication,
AddTwitterApplication, AddGoogleApplication],
description=_("Add a social application"),
title=_("Add a social application"),
groups=[]),
seeapplication = ActivityDefinition(contexts=[SeeApplication],
description=_("See the application"),
title=_("See the application"),
groups=[]),
editapplication = ActivityDefinition(contexts=[EditApplication],
description=_("Edit the application"),
title=_("Edit"),
groups=[]),
removeapplication = ActivityDefinition(contexts=[RemoveApplication],
description=_("Remove the application"),
title=_("Remove"),
groups=[]),
eg = ExclusiveGatewayDefinition(),
end = EndEventDefinition(),
)
self.defineTransitions(
TransitionDefinition('start', 'pg'),
TransitionDefinition('pg', 'addapplication'),
TransitionDefinition('addapplication', 'eg'),
TransitionDefinition('pg', 'seeapplication'),
TransitionDefinition('seeapplication', 'eg'),
TransitionDefinition('pg', 'editapplication'),
TransitionDefinition('editapplication', 'eg'),
TransitionDefinition('pg', 'removeapplication'),
TransitionDefinition('removeapplication', 'eg'),
TransitionDefinition('eg', 'end'),
)
| ecreall/lagendacommun | lac/content/processes/social_applications_management/definition.py | Python | agpl-3.0 | 3,478 |
""" Tests for utils. """
import collections
from datetime import datetime, timedelta
from pytz import UTC
from django.test import TestCase
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.django import modulestore
from xmodule.partitions.partitions import UserPartition, Group
from openedx.core.djangoapps.site_configuration.tests.test_util import with_site_configuration_context
from contentstore import utils
from contentstore.tests.utils import CourseTestCase
class LMSLinksTestCase(TestCase):
""" Tests for LMS links. """
def lms_link_test(self):
""" Tests get_lms_link_for_item. """
course_key = SlashSeparatedCourseKey('mitX', '101', 'test')
location = course_key.make_usage_key('vertical', 'contacting_us')
link = utils.get_lms_link_for_item(location, False)
self.assertEquals(link, "//localhost:8000/courses/mitX/101/test/jump_to/i4x://mitX/101/vertical/contacting_us")
# test preview
link = utils.get_lms_link_for_item(location, True)
self.assertEquals(
link,
"//preview.localhost/courses/mitX/101/test/jump_to/i4x://mitX/101/vertical/contacting_us"
)
# now test with the course' location
location = course_key.make_usage_key('course', 'test')
link = utils.get_lms_link_for_item(location)
self.assertEquals(link, "//localhost:8000/courses/mitX/101/test/jump_to/i4x://mitX/101/course/test")
def lms_link_for_certificate_web_view_test(self):
""" Tests get_lms_link_for_certificate_web_view. """
course_key = SlashSeparatedCourseKey('mitX', '101', 'test')
dummy_user = ModuleStoreEnum.UserID.test
mode = 'professional'
self.assertEquals(
utils.get_lms_link_for_certificate_web_view(dummy_user, course_key, mode),
"//localhost:8000/certificates/user/{user_id}/course/{course_key}?preview={mode}".format(
user_id=dummy_user,
course_key=course_key,
mode=mode
)
)
with with_site_configuration_context(configuration={"course_org_filter": "mitX", "LMS_BASE": "dummyhost:8000"}):
self.assertEquals(
utils.get_lms_link_for_certificate_web_view(dummy_user, course_key, mode),
"//dummyhost:8000/certificates/user/{user_id}/course/{course_key}?preview={mode}".format(
user_id=dummy_user,
course_key=course_key,
mode=mode
)
)
class ExtraPanelTabTestCase(TestCase):
""" Tests adding and removing extra course tabs. """
def get_tab_type_dicts(self, tab_types):
""" Returns an array of tab dictionaries. """
if tab_types:
return [{'tab_type': tab_type} for tab_type in tab_types.split(',')]
else:
return []
def get_course_with_tabs(self, tabs=None):
""" Returns a mock course object with a tabs attribute. """
if tabs is None:
tabs = []
course = collections.namedtuple('MockCourse', ['tabs'])
if isinstance(tabs, basestring):
course.tabs = self.get_tab_type_dicts(tabs)
else:
course.tabs = tabs
return course
class XBlockVisibilityTestCase(SharedModuleStoreTestCase):
"""Tests for xblock visibility for students."""
@classmethod
def setUpClass(cls):
super(XBlockVisibilityTestCase, cls).setUpClass()
cls.dummy_user = ModuleStoreEnum.UserID.test
cls.past = datetime(1970, 1, 1, tzinfo=UTC)
cls.future = datetime.now(UTC) + timedelta(days=1)
cls.course = CourseFactory.create()
def test_private_unreleased_xblock(self):
"""Verifies that a private unreleased xblock is not visible"""
self._test_visible_to_students(False, 'private_unreleased', self.future)
def test_private_released_xblock(self):
"""Verifies that a private released xblock is not visible"""
self._test_visible_to_students(False, 'private_released', self.past)
def test_public_unreleased_xblock(self):
"""Verifies that a public (published) unreleased xblock is not visible"""
self._test_visible_to_students(False, 'public_unreleased', self.future, publish=True)
def test_public_released_xblock(self):
"""Verifies that public (published) released xblock is visible if staff lock is not enabled."""
self._test_visible_to_students(True, 'public_released', self.past, publish=True)
def test_private_no_start_xblock(self):
"""Verifies that a private xblock with no start date is not visible"""
self._test_visible_to_students(False, 'private_no_start', None)
def test_public_no_start_xblock(self):
"""Verifies that a public (published) xblock with no start date is visible unless staff lock is enabled"""
self._test_visible_to_students(True, 'public_no_start', None, publish=True)
def test_draft_released_xblock(self):
"""Verifies that a xblock with an unreleased draft and a released published version is visible"""
vertical = self._create_xblock_with_start_date('draft_released', self.past, publish=True)
# Create an unreleased draft version of the xblock
vertical.start = self.future
modulestore().update_item(vertical, self.dummy_user)
self.assertTrue(utils.is_currently_visible_to_students(vertical))
def _test_visible_to_students(self, expected_visible_without_lock, name, start_date, publish=False):
"""
Helper method that checks that is_xblock_visible_to_students returns the correct value both
with and without visible_to_staff_only set.
"""
no_staff_lock = self._create_xblock_with_start_date(name, start_date, publish, visible_to_staff_only=False)
self.assertEqual(expected_visible_without_lock, utils.is_currently_visible_to_students(no_staff_lock))
# any xblock with visible_to_staff_only set to True should not be visible to students.
staff_lock = self._create_xblock_with_start_date(
name + "_locked", start_date, publish, visible_to_staff_only=True
)
self.assertFalse(utils.is_currently_visible_to_students(staff_lock))
def _create_xblock_with_start_date(self, name, start_date, publish=False, visible_to_staff_only=False):
"""Helper to create an xblock with a start date, optionally publishing it"""
vertical = modulestore().create_item(
self.dummy_user, self.course.location.course_key, 'vertical', name,
fields={'start': start_date, 'visible_to_staff_only': visible_to_staff_only}
)
if publish:
modulestore().publish(vertical.location, self.dummy_user)
return vertical
class ReleaseDateSourceTest(CourseTestCase):
"""Tests for finding the source of an xblock's release date."""
def setUp(self):
super(ReleaseDateSourceTest, self).setUp()
self.chapter = ItemFactory.create(category='chapter', parent_location=self.course.location)
self.sequential = ItemFactory.create(category='sequential', parent_location=self.chapter.location)
self.vertical = ItemFactory.create(category='vertical', parent_location=self.sequential.location)
# Read again so that children lists are accurate
self.chapter = self.store.get_item(self.chapter.location)
self.sequential = self.store.get_item(self.sequential.location)
self.vertical = self.store.get_item(self.vertical.location)
self.date_one = datetime(1980, 1, 1, tzinfo=UTC)
self.date_two = datetime(2020, 1, 1, tzinfo=UTC)
def _update_release_dates(self, chapter_start, sequential_start, vertical_start):
"""Sets the release dates of the chapter, sequential, and vertical"""
self.chapter.start = chapter_start
self.chapter = self.store.update_item(self.chapter, ModuleStoreEnum.UserID.test)
self.sequential.start = sequential_start
self.sequential = self.store.update_item(self.sequential, ModuleStoreEnum.UserID.test)
self.vertical.start = vertical_start
self.vertical = self.store.update_item(self.vertical, ModuleStoreEnum.UserID.test)
def _verify_release_date_source(self, item, expected_source):
"""Helper to verify that the release date source of a given item matches the expected source"""
source = utils.find_release_date_source(item)
self.assertEqual(source.location, expected_source.location)
self.assertEqual(source.start, expected_source.start)
def test_chapter_source_for_vertical(self):
"""Tests a vertical's release date being set by its chapter"""
self._update_release_dates(self.date_one, self.date_one, self.date_one)
self._verify_release_date_source(self.vertical, self.chapter)
def test_sequential_source_for_vertical(self):
"""Tests a vertical's release date being set by its sequential"""
self._update_release_dates(self.date_one, self.date_two, self.date_two)
self._verify_release_date_source(self.vertical, self.sequential)
def test_chapter_source_for_sequential(self):
"""Tests a sequential's release date being set by its chapter"""
self._update_release_dates(self.date_one, self.date_one, self.date_one)
self._verify_release_date_source(self.sequential, self.chapter)
def test_sequential_source_for_sequential(self):
"""Tests a sequential's release date being set by itself"""
self._update_release_dates(self.date_one, self.date_two, self.date_two)
self._verify_release_date_source(self.sequential, self.sequential)
class StaffLockTest(CourseTestCase):
"""Base class for testing staff lock functions."""
def setUp(self):
super(StaffLockTest, self).setUp()
self.chapter = ItemFactory.create(category='chapter', parent_location=self.course.location)
self.sequential = ItemFactory.create(category='sequential', parent_location=self.chapter.location)
self.vertical = ItemFactory.create(category='vertical', parent_location=self.sequential.location)
self.orphan = ItemFactory.create(category='vertical', parent_location=self.sequential.location)
# Read again so that children lists are accurate
self.chapter = self.store.get_item(self.chapter.location)
self.sequential = self.store.get_item(self.sequential.location)
self.vertical = self.store.get_item(self.vertical.location)
# Orphan the orphaned xblock
self.sequential.children = [self.vertical.location]
self.sequential = self.store.update_item(self.sequential, ModuleStoreEnum.UserID.test)
def _set_staff_lock(self, xblock, is_locked):
"""If is_locked is True, xblock is staff locked. Otherwise, the xblock staff lock field is removed."""
field = xblock.fields['visible_to_staff_only']
if is_locked:
field.write_to(xblock, True)
else:
field.delete_from(xblock)
return self.store.update_item(xblock, ModuleStoreEnum.UserID.test)
def _update_staff_locks(self, chapter_locked, sequential_locked, vertical_locked):
"""
Sets the staff lock on the chapter, sequential, and vertical
If the corresponding argument is False, then the field is deleted from the xblock
"""
self.chapter = self._set_staff_lock(self.chapter, chapter_locked)
self.sequential = self._set_staff_lock(self.sequential, sequential_locked)
self.vertical = self._set_staff_lock(self.vertical, vertical_locked)
class StaffLockSourceTest(StaffLockTest):
"""Tests for finding the source of an xblock's staff lock."""
def _verify_staff_lock_source(self, item, expected_source):
"""Helper to verify that the staff lock source of a given item matches the expected source"""
source = utils.find_staff_lock_source(item)
self.assertEqual(source.location, expected_source.location)
self.assertTrue(source.visible_to_staff_only)
def test_chapter_source_for_vertical(self):
"""Tests a vertical's staff lock being set by its chapter"""
self._update_staff_locks(True, False, False)
self._verify_staff_lock_source(self.vertical, self.chapter)
def test_sequential_source_for_vertical(self):
"""Tests a vertical's staff lock being set by its sequential"""
self._update_staff_locks(True, True, False)
self._verify_staff_lock_source(self.vertical, self.sequential)
self._update_staff_locks(False, True, False)
self._verify_staff_lock_source(self.vertical, self.sequential)
def test_vertical_source_for_vertical(self):
"""Tests a vertical's staff lock being set by itself"""
self._update_staff_locks(True, True, True)
self._verify_staff_lock_source(self.vertical, self.vertical)
self._update_staff_locks(False, True, True)
self._verify_staff_lock_source(self.vertical, self.vertical)
self._update_staff_locks(False, False, True)
self._verify_staff_lock_source(self.vertical, self.vertical)
def test_orphan_has_no_source(self):
"""Tests that a orphaned xblock has no staff lock source"""
self.assertIsNone(utils.find_staff_lock_source(self.orphan))
def test_no_source_for_vertical(self):
"""Tests a vertical with no staff lock set anywhere"""
self._update_staff_locks(False, False, False)
self.assertIsNone(utils.find_staff_lock_source(self.vertical))
class InheritedStaffLockTest(StaffLockTest):
"""Tests for determining if an xblock inherits a staff lock."""
def test_no_inheritance(self):
"""Tests that a locked or unlocked vertical with no locked ancestors does not have an inherited lock"""
self._update_staff_locks(False, False, False)
self.assertFalse(utils.ancestor_has_staff_lock(self.vertical))
self._update_staff_locks(False, False, True)
self.assertFalse(utils.ancestor_has_staff_lock(self.vertical))
def test_inheritance_in_locked_section(self):
"""Tests that a locked or unlocked vertical in a locked section has an inherited lock"""
self._update_staff_locks(True, False, False)
self.assertTrue(utils.ancestor_has_staff_lock(self.vertical))
self._update_staff_locks(True, False, True)
self.assertTrue(utils.ancestor_has_staff_lock(self.vertical))
def test_inheritance_in_locked_subsection(self):
"""Tests that a locked or unlocked vertical in a locked subsection has an inherited lock"""
self._update_staff_locks(False, True, False)
self.assertTrue(utils.ancestor_has_staff_lock(self.vertical))
self._update_staff_locks(False, True, True)
self.assertTrue(utils.ancestor_has_staff_lock(self.vertical))
def test_no_inheritance_for_orphan(self):
"""Tests that an orphaned xblock does not inherit staff lock"""
self.assertFalse(utils.ancestor_has_staff_lock(self.orphan))
class GroupVisibilityTest(CourseTestCase):
"""
Test content group access rules.
"""
def setUp(self):
super(GroupVisibilityTest, self).setUp()
chapter = ItemFactory.create(category='chapter', parent_location=self.course.location)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
vertical = ItemFactory.create(category='vertical', parent_location=sequential.location)
html = ItemFactory.create(category='html', parent_location=vertical.location)
problem = ItemFactory.create(
category='problem', parent_location=vertical.location, data="<problem></problem>"
)
self.sequential = self.store.get_item(sequential.location)
self.vertical = self.store.get_item(vertical.location)
self.html = self.store.get_item(html.location)
self.problem = self.store.get_item(problem.location)
# Add partitions to the course
self.course.user_partitions = [
UserPartition(
id=0,
name="Partition 0",
description="Partition 0",
scheme=UserPartition.get_scheme("random"),
groups=[
Group(id=0, name="Group A"),
Group(id=1, name="Group B"),
],
),
UserPartition(
id=1,
name="Partition 1",
description="Partition 1",
scheme=UserPartition.get_scheme("random"),
groups=[
Group(id=0, name="Group C"),
Group(id=1, name="Group D"),
],
),
UserPartition(
id=2,
name="Partition 2",
description="Partition 2",
scheme=UserPartition.get_scheme("random"),
groups=[
Group(id=0, name="Group E"),
Group(id=1, name="Group F"),
Group(id=2, name="Group G"),
Group(id=3, name="Group H"),
],
),
]
self.course = self.store.update_item(self.course, ModuleStoreEnum.UserID.test)
def set_group_access(self, xblock, value):
""" Sets group_access to specified value and calls update_item to persist the change. """
xblock.group_access = value
self.store.update_item(xblock, self.user.id)
def test_no_visibility_set(self):
""" Tests when group_access has not been set on anything. """
def verify_all_components_visible_to_all(): # pylint: disable=invalid-name
""" Verifies when group_access has not been set on anything. """
for item in (self.sequential, self.vertical, self.html, self.problem):
self.assertFalse(utils.has_children_visible_to_specific_content_groups(item))
self.assertFalse(utils.is_visible_to_specific_content_groups(item))
verify_all_components_visible_to_all()
# Test with group_access set to Falsey values.
self.set_group_access(self.vertical, {1: []})
self.set_group_access(self.html, {2: None})
verify_all_components_visible_to_all()
def test_sequential_and_problem_have_group_access(self):
""" Tests when group_access is set on a few different components. """
self.set_group_access(self.sequential, {1: [0]})
# This is a no-op.
self.set_group_access(self.vertical, {1: []})
self.set_group_access(self.problem, {2: [3, 4]})
# Note that "has_children_visible_to_specific_content_groups" only checks immediate children.
self.assertFalse(utils.has_children_visible_to_specific_content_groups(self.sequential))
self.assertTrue(utils.has_children_visible_to_specific_content_groups(self.vertical))
self.assertFalse(utils.has_children_visible_to_specific_content_groups(self.html))
self.assertFalse(utils.has_children_visible_to_specific_content_groups(self.problem))
self.assertTrue(utils.is_visible_to_specific_content_groups(self.sequential))
self.assertFalse(utils.is_visible_to_specific_content_groups(self.vertical))
self.assertFalse(utils.is_visible_to_specific_content_groups(self.html))
self.assertTrue(utils.is_visible_to_specific_content_groups(self.problem))
class GetUserPartitionInfoTest(ModuleStoreTestCase):
"""
Tests for utility function that retrieves user partition info
and formats it for consumption by the editing UI.
"""
def setUp(self):
"""Create a dummy course. """
super(GetUserPartitionInfoTest, self).setUp()
self.course = CourseFactory()
self.block = ItemFactory.create(category="problem", parent_location=self.course.location) # pylint: disable=no-member
# Set up some default partitions
self._set_partitions([
UserPartition(
id=0,
name="Cohort user partition",
scheme=UserPartition.get_scheme("cohort"),
description="Cohorted user partition",
groups=[
Group(id=0, name="Group A"),
Group(id=1, name="Group B"),
],
),
UserPartition(
id=1,
name="Random user partition",
scheme=UserPartition.get_scheme("random"),
description="Random user partition",
groups=[
Group(id=0, name="Group C"),
],
),
])
def test_retrieves_partition_info_with_selected_groups(self):
# Initially, no group access is set on the block, so no groups should
# be marked as selected.
expected = [
{
"id": 0,
"name": u"Cohort user partition",
"scheme": u"cohort",
"groups": [
{
"id": 0,
"name": u"Group A",
"selected": False,
"deleted": False,
},
{
"id": 1,
"name": u"Group B",
"selected": False,
"deleted": False,
},
]
},
{
"id": 1,
"name": u"Random user partition",
"scheme": u"random",
"groups": [
{
"id": 0,
"name": u"Group C",
"selected": False,
"deleted": False,
},
]
}
]
self.assertEqual(self._get_partition_info(schemes=["cohort", "random"]), expected)
# Update group access and expect that now one group is marked as selected.
self._set_group_access({0: [1]})
expected[0]["groups"][1]["selected"] = True
self.assertEqual(self._get_partition_info(schemes=["cohort", "random"]), expected)
def test_deleted_groups(self):
# Select a group that is not defined in the partition
self._set_group_access({0: [3]})
# Expect that the group appears as selected but is marked as deleted
partitions = self._get_partition_info()
groups = partitions[0]["groups"]
self.assertEqual(len(groups), 3)
self.assertEqual(groups[2], {
"id": 3,
"name": "Deleted Group",
"selected": True,
"deleted": True
})
def test_filter_by_partition_scheme(self):
partitions = self._get_partition_info(schemes=["random"])
self.assertEqual(len(partitions), 1)
self.assertEqual(partitions[0]["scheme"], "random")
def test_exclude_inactive_partitions(self):
# Include an inactive verification scheme
self._set_partitions([
UserPartition(
id=0,
name="Cohort user partition",
scheme=UserPartition.get_scheme("cohort"),
description="Cohorted user partition",
groups=[
Group(id=0, name="Group A"),
Group(id=1, name="Group B"),
],
),
UserPartition(
id=1,
name="Verification user partition",
scheme=UserPartition.get_scheme("verification"),
description="Verification user partition",
groups=[
Group(id=0, name="Group C"),
],
active=False,
),
])
# Expect that the inactive scheme is excluded from the results
partitions = self._get_partition_info(schemes=["cohort", "verification"])
self.assertEqual(len(partitions), 1)
self.assertEqual(partitions[0]["scheme"], "cohort")
def test_exclude_partitions_with_no_groups(self):
# The cohort partition has no groups defined
self._set_partitions([
UserPartition(
id=0,
name="Cohort user partition",
scheme=UserPartition.get_scheme("cohort"),
description="Cohorted user partition",
groups=[],
),
UserPartition(
id=1,
name="Verification user partition",
scheme=UserPartition.get_scheme("verification"),
description="Verification user partition",
groups=[
Group(id=0, name="Group C"),
],
),
])
# Expect that the partition with no groups is excluded from the results
partitions = self._get_partition_info(schemes=["cohort", "verification"])
self.assertEqual(len(partitions), 1)
self.assertEqual(partitions[0]["scheme"], "verification")
def _set_partitions(self, partitions):
"""Set the user partitions of the course descriptor. """
self.course.user_partitions = partitions
self.course = self.store.update_item(self.course, ModuleStoreEnum.UserID.test)
def _set_group_access(self, group_access):
"""Set group access of the block. """
self.block.group_access = group_access
self.block = self.store.update_item(self.block, ModuleStoreEnum.UserID.test)
def _get_partition_info(self, schemes=None):
"""Retrieve partition info and selected groups. """
return utils.get_user_partition_info(self.block, schemes=schemes)
| romain-li/edx-platform | cms/djangoapps/contentstore/tests/test_utils.py | Python | agpl-3.0 | 26,121 |
# Copyright 2019 Tecnativa - David
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import logging
from odoo import SUPERUSER_ID, api
_logger = logging.getLogger(__name__)
def pre_init_hook(cr):
"""Speed up the installation of the module on an existing Odoo instance"""
cr.execute(
"""
SELECT column_name
FROM information_schema.columns
WHERE table_name='stock_move' AND
column_name='qty_returnable'
"""
)
if not cr.fetchone():
_logger.info("Creating field qty_returnable on stock_move")
cr.execute(
"""
ALTER TABLE stock_move ADD COLUMN qty_returnable float;
"""
)
cr.execute(
"""
UPDATE stock_move SET qty_returnable = 0
WHERE state IN ('draft', 'cancel')
"""
)
cr.execute(
"""
UPDATE stock_move SET qty_returnable = product_uom_qty
WHERE state = 'done'
"""
)
def post_init_hook(cr, registry):
"""Set moves returnable qty on hand"""
with api.Environment.manage():
env = api.Environment(cr, SUPERUSER_ID, {})
moves_draft = env["stock.move"].search([("state", "in", ["draft", "cancel"])])
moves_no_return_pendant = env["stock.move"].search(
[
("returned_move_ids", "=", False),
("state", "not in", ["draft", "cancel", "done"]),
]
)
moves_by_reserved_availability = {}
for move in moves_no_return_pendant:
moves_by_reserved_availability.setdefault(move.reserved_availability, [])
moves_by_reserved_availability[move.reserved_availability].append(move.id)
for qty, ids in moves_by_reserved_availability.items():
cr.execute(
"UPDATE stock_move SET qty_returnable = %s " "WHERE id IN %s",
(qty, tuple(ids)),
)
moves_no_return_done = env["stock.move"].search(
[
("returned_move_ids", "=", False),
("state", "=", "done"),
]
)
# Recursively solve quantities
updated_moves = moves_no_return_done + moves_draft + moves_no_return_pendant
remaining_moves = env["stock.move"].search(
[
("returned_move_ids", "!=", False),
("state", "=", "done"),
]
)
while remaining_moves:
_logger.info("{} moves left...".format(len(remaining_moves)))
remaining_moves, updated_moves = update_qty_returnable(
cr, remaining_moves, updated_moves
)
def update_qty_returnable(cr, remaining_moves, updated_moves):
for move in remaining_moves:
if all([x in updated_moves for x in move.returned_move_ids]):
quantity_returned = sum(move.returned_move_ids.mapped("qty_returnable"))
quantity = move.product_uom_qty - quantity_returned
cr.execute(
"UPDATE stock_move SET qty_returnable = %s " "WHERE id = %s",
(quantity, move.id),
)
remaining_moves -= move
updated_moves += move
return remaining_moves, updated_moves
| OCA/stock-logistics-workflow | stock_return_request/hooks.py | Python | agpl-3.0 | 3,276 |
"""This file contains (or should), all access control logic for the courseware.
Ideally, it will be the only place that needs to know about any special settings
like DISABLE_START_DATES"""
import logging
from datetime import datetime, timedelta
import pytz
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from xmodule.course_module import (
CourseDescriptor, CATALOG_VISIBILITY_CATALOG_AND_ABOUT,
CATALOG_VISIBILITY_ABOUT)
from xmodule.error_module import ErrorDescriptor
from xmodule.x_module import XModule
from xblock.core import XBlock
from external_auth.models import ExternalAuthMap
from courseware.masquerade import is_masquerading_as_student
from django.utils.timezone import UTC
from student import auth
from student.roles import (
GlobalStaff, CourseStaffRole, CourseInstructorRole,
OrgStaffRole, OrgInstructorRole, CourseBetaTesterRole
)
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from opaque_keys.edx.keys import CourseKey, UsageKey
DEBUG_ACCESS = False
log = logging.getLogger(__name__)
def debug(*args, **kwargs):
# to avoid overly verbose output, this is off by default
if DEBUG_ACCESS:
log.debug(*args, **kwargs)
def has_access(user, action, obj, course_key=None):
"""
Check whether a user has the access to do action on obj. Handles any magic
switching based on various settings.
Things this module understands:
- start dates for modules
- visible_to_staff_only for modules
- DISABLE_START_DATES
- different access for instructor, staff, course staff, and students.
- mobile_available flag for course modules
user: a Django user object. May be anonymous. If none is passed,
anonymous is assumed
obj: The object to check access for. A module, descriptor, location, or
certain special strings (e.g. 'global')
action: A string specifying the action that the client is trying to perform.
actions depend on the obj type, but include e.g. 'enroll' for courses. See the
type-specific functions below for the known actions for that type.
course_key: A course_key specifying which course run this access is for.
Required when accessing anything other than a CourseDescriptor, 'global',
or a location with category 'course'
Returns a bool. It is up to the caller to actually deny access in a way
that makes sense in context.
"""
# Just in case user is passed in as None, make them anonymous
if not user:
user = AnonymousUser()
# delegate the work to type-specific functions.
# (start with more specific types, then get more general)
if isinstance(obj, CourseDescriptor):
return _has_access_course_desc(user, action, obj)
if isinstance(obj, ErrorDescriptor):
return _has_access_error_desc(user, action, obj, course_key)
if isinstance(obj, XModule):
return _has_access_xmodule(user, action, obj, course_key)
# NOTE: any descriptor access checkers need to go above this
if isinstance(obj, XBlock):
return _has_access_descriptor(user, action, obj, course_key)
if isinstance(obj, CourseKey):
return _has_access_course_key(user, action, obj)
if isinstance(obj, UsageKey):
return _has_access_location(user, action, obj, course_key)
if isinstance(obj, basestring):
return _has_access_string(user, action, obj, course_key)
# Passing an unknown object here is a coding error, so rather than
# returning a default, complain.
raise TypeError("Unknown object type in has_access(): '{0}'"
.format(type(obj)))
# ================ Implementation helpers ================================
def _has_access_course_desc(user, action, course):
"""
Check if user has access to a course descriptor.
Valid actions:
'load' -- load the courseware, see inside the course
'load_forum' -- can load and contribute to the forums (one access level for now)
'load_mobile' -- can load from a mobile context
'load_mobile_no_enrollment_check' -- can load from a mobile context without checking for enrollment
'enroll' -- enroll. Checks for enrollment window,
ACCESS_REQUIRE_STAFF_FOR_COURSE,
'see_exists' -- can see that the course exists.
'staff' -- staff access to course.
'see_in_catalog' -- user is able to see the course listed in the course catalog.
'see_about_page' -- user is able to see the course about page.
"""
def can_load():
"""
Can this user load this course?
NOTE: this is not checking whether user is actually enrolled in the course.
"""
# delegate to generic descriptor check to check start dates
return _has_access_descriptor(user, 'load', course, course.id)
def can_load_forum():
"""
Can this user access the forums in this course?
"""
return (
can_load() and
(
CourseEnrollment.is_enrolled(user, course.id) or
_has_staff_access_to_descriptor(user, course, course.id)
)
)
def can_load_mobile():
"""
Can this user access this course from a mobile device?
"""
return (
# check mobile requirements
can_load_mobile_no_enroll_check() and
# check enrollment
(
CourseEnrollment.is_enrolled(user, course.id) or
_has_staff_access_to_descriptor(user, course, course.id)
)
)
def can_load_mobile_no_enroll_check():
"""
Can this enrolled user access this course from a mobile device?
Note: does not check for enrollment since it is assumed the caller has done so.
"""
return (
# check start date
can_load() and
# check mobile_available flag
(
course.mobile_available or
auth.has_access(user, CourseBetaTesterRole(course.id)) or
_has_staff_access_to_descriptor(user, course, course.id)
)
)
def can_enroll():
"""
First check if restriction of enrollment by login method is enabled, both
globally and by the course.
If it is, then the user must pass the criterion set by the course, e.g. that ExternalAuthMap
was set by 'shib:https://idp.stanford.edu/", in addition to requirements below.
Rest of requirements:
(CourseEnrollmentAllowed always overrides)
or
(staff can always enroll)
or
Enrollment can only happen in the course enrollment period, if one exists, and
course is not invitation only.
"""
# if using registration method to restrict (say shibboleth)
if settings.FEATURES.get('RESTRICT_ENROLL_BY_REG_METHOD') and course.enrollment_domain:
if user is not None and user.is_authenticated() and \
ExternalAuthMap.objects.filter(user=user, external_domain=course.enrollment_domain):
debug("Allow: external_auth of " + course.enrollment_domain)
reg_method_ok = True
else:
reg_method_ok = False
else:
reg_method_ok = True # if not using this access check, it's always OK.
now = datetime.now(UTC())
start = course.enrollment_start or datetime.min.replace(tzinfo=pytz.UTC)
end = course.enrollment_end or datetime.max.replace(tzinfo=pytz.UTC)
# if user is in CourseEnrollmentAllowed with right course key then can also enroll
# (note that course.id actually points to a CourseKey)
# (the filter call uses course_id= since that's the legacy database schema)
# (sorry that it's confusing :( )
if user is not None and user.is_authenticated() and CourseEnrollmentAllowed:
if CourseEnrollmentAllowed.objects.filter(email=user.email, course_id=course.id):
return True
if _has_staff_access_to_descriptor(user, course, course.id):
return True
# Invitation_only doesn't apply to CourseEnrollmentAllowed or has_staff_access_access
if course.invitation_only:
debug("Deny: invitation only")
return False
if reg_method_ok and start < now < end:
debug("Allow: in enrollment period")
return True
def see_exists():
"""
Can see if can enroll, but also if can load it: if user enrolled in a course and now
it's past the enrollment period, they should still see it.
TODO (vshnayder): This means that courses with limited enrollment periods will not appear
to non-staff visitors after the enrollment period is over. If this is not what we want, will
need to change this logic.
"""
# VS[compat] -- this setting should go away once all courses have
# properly configured enrollment_start times (if course should be
# staff-only, set enrollment_start far in the future.)
if settings.FEATURES.get('ACCESS_REQUIRE_STAFF_FOR_COURSE'):
# if this feature is on, only allow courses that have ispublic set to be
# seen by non-staff
if course.ispublic:
debug("Allow: ACCESS_REQUIRE_STAFF_FOR_COURSE and ispublic")
return True
return _has_staff_access_to_descriptor(user, course, course.id)
return can_enroll() or can_load()
def can_see_in_catalog():
"""
Implements the "can see course in catalog" logic if a course should be visible in the main course catalog
In this case we use the catalog_visibility property on the course descriptor
but also allow course staff to see this.
"""
return (
course.catalog_visibility == CATALOG_VISIBILITY_CATALOG_AND_ABOUT or
_has_staff_access_to_descriptor(user, course, course.id)
)
def can_see_about_page():
"""
Implements the "can see course about page" logic if a course about page should be visible
In this case we use the catalog_visibility property on the course descriptor
but also allow course staff to see this.
"""
return (
course.catalog_visibility == CATALOG_VISIBILITY_CATALOG_AND_ABOUT or
course.catalog_visibility == CATALOG_VISIBILITY_ABOUT or
_has_staff_access_to_descriptor(user, course, course.id)
)
checkers = {
'load': can_load,
'load_forum': can_load_forum,
'load_mobile': can_load_mobile,
'load_mobile_no_enrollment_check': can_load_mobile_no_enroll_check,
'enroll': can_enroll,
'see_exists': see_exists,
'staff': lambda: _has_staff_access_to_descriptor(user, course, course.id),
'instructor': lambda: _has_instructor_access_to_descriptor(user, course, course.id),
'see_in_catalog': can_see_in_catalog,
'see_about_page': can_see_about_page,
}
return _dispatch(checkers, action, user, course)
def _has_access_error_desc(user, action, descriptor, course_key):
"""
Only staff should see error descriptors.
Valid actions:
'load' -- load this descriptor, showing it to the user.
'staff' -- staff access to descriptor.
"""
def check_for_staff():
return _has_staff_access_to_descriptor(user, descriptor, course_key)
checkers = {
'load': check_for_staff,
'staff': check_for_staff,
'instructor': lambda: _has_instructor_access_to_descriptor(user, descriptor, course_key)
}
return _dispatch(checkers, action, user, descriptor)
def _has_access_descriptor(user, action, descriptor, course_key=None):
"""
Check if user has access to this descriptor.
Valid actions:
'load' -- load this descriptor, showing it to the user.
'staff' -- staff access to descriptor.
NOTE: This is the fallback logic for descriptors that don't have custom policy
(e.g. courses). If you call this method directly instead of going through
has_access(), it will not do the right thing.
"""
def can_load():
"""
NOTE: This does not check that the student is enrolled in the course
that contains this module. We may or may not want to allow non-enrolled
students to see modules. If not, views should check the course, so we
don't have to hit the enrollments table on every module load.
"""
if descriptor.visible_to_staff_only and not _has_staff_access_to_descriptor(user, descriptor, course_key):
return False
# If start dates are off, can always load
if settings.FEATURES['DISABLE_START_DATES'] and not is_masquerading_as_student(user):
debug("Allow: DISABLE_START_DATES")
return True
# Check start date
if 'detached' not in descriptor._class_tags and descriptor.start is not None:
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(
user,
descriptor,
course_key=course_key
)
if now > effective_start:
# after start date, everyone can see it
debug("Allow: now > effective start date")
return True
# otherwise, need staff access
return _has_staff_access_to_descriptor(user, descriptor, course_key)
# No start date, so can always load.
debug("Allow: no start date")
return True
checkers = {
'load': can_load,
'staff': lambda: _has_staff_access_to_descriptor(user, descriptor, course_key),
'instructor': lambda: _has_instructor_access_to_descriptor(user, descriptor, course_key)
}
return _dispatch(checkers, action, user, descriptor)
def _has_access_xmodule(user, action, xmodule, course_key):
"""
Check if user has access to this xmodule.
Valid actions:
- same as the valid actions for xmodule.descriptor
"""
# Delegate to the descriptor
return has_access(user, action, xmodule.descriptor, course_key)
def _has_access_location(user, action, location, course_key):
"""
Check if user has access to this location.
Valid actions:
'staff' : True if the user has staff access to this location
NOTE: if you add other actions, make sure that
has_access(user, location, action) == has_access(user, get_item(location), action)
"""
checkers = {
'staff': lambda: _has_staff_access_to_location(user, location, course_key)
}
return _dispatch(checkers, action, user, location)
def _has_access_course_key(user, action, course_key):
"""
Check if user has access to the course with this course_key
Valid actions:
'staff' : True if the user has staff access to this location
'instructor' : True if the user has staff access to this location
"""
checkers = {
'staff': lambda: _has_staff_access_to_location(user, None, course_key),
'instructor': lambda: _has_instructor_access_to_location(user, None, course_key),
}
return _dispatch(checkers, action, user, course_key)
def _has_access_string(user, action, perm, course_key):
"""
Check if user has certain special access, specified as string. Valid strings:
'global'
Valid actions:
'staff' -- global staff access.
"""
def check_staff():
if perm != 'global':
debug("Deny: invalid permission '%s'", perm)
return False
return GlobalStaff().has_user(user)
checkers = {
'staff': check_staff
}
return _dispatch(checkers, action, user, perm)
##### Internal helper methods below
def _dispatch(table, action, user, obj):
"""
Helper: call table[action], raising a nice pretty error if there is no such key.
user and object passed in only for error messages and debugging
"""
if action in table:
result = table[action]()
debug("%s user %s, object %s, action %s",
'ALLOWED' if result else 'DENIED',
user,
obj.location.to_deprecated_string() if isinstance(obj, XBlock) else str(obj),
action)
return result
raise ValueError(u"Unknown action for object type '{0}': '{1}'".format(
type(obj), action))
def _adjust_start_date_for_beta_testers(user, descriptor, course_key=None): # pylint: disable=invalid-name
"""
If user is in a beta test group, adjust the start date by the appropriate number of
days.
Arguments:
user: A django user. May be anonymous.
descriptor: the XModuleDescriptor the user is trying to get access to, with a
non-None start date.
Returns:
A datetime. Either the same as start, or earlier for beta testers.
NOTE: number of days to adjust should be cached to avoid looking it up thousands of
times per query.
NOTE: For now, this function assumes that the descriptor's location is in the course
the user is looking at. Once we have proper usages and definitions per the XBlock
design, this should use the course the usage is in.
NOTE: If testing manually, make sure FEATURES['DISABLE_START_DATES'] = False
in envs/dev.py!
"""
if descriptor.days_early_for_beta is None:
# bail early if no beta testing is set up
return descriptor.start
if CourseBetaTesterRole(course_key).has_user(user):
debug("Adjust start time: user in beta role for %s", descriptor)
delta = timedelta(descriptor.days_early_for_beta)
effective = descriptor.start - delta
return effective
return descriptor.start
def _has_instructor_access_to_location(user, location, course_key=None):
if course_key is None:
course_key = location.course_key
return _has_access_to_course(user, 'instructor', course_key)
def _has_staff_access_to_location(user, location, course_key=None):
if course_key is None:
course_key = location.course_key
return _has_access_to_course(user, 'staff', course_key)
def _has_access_to_course(user, access_level, course_key):
'''
Returns True if the given user has access_level (= staff or
instructor) access to the course with the given course_key.
This ensures the user is authenticated and checks if global staff or has
staff / instructor access.
access_level = string, either "staff" or "instructor"
'''
if user is None or (not user.is_authenticated()):
debug("Deny: no user or anon user")
return False
if is_masquerading_as_student(user):
return False
if GlobalStaff().has_user(user):
debug("Allow: user.is_staff")
return True
if access_level not in ('staff', 'instructor'):
log.debug("Error in access._has_access_to_course access_level=%s unknown", access_level)
debug("Deny: unknown access level")
return False
staff_access = (
CourseStaffRole(course_key).has_user(user) or
OrgStaffRole(course_key.org).has_user(user)
)
if staff_access and access_level == 'staff':
debug("Allow: user has course staff access")
return True
instructor_access = (
CourseInstructorRole(course_key).has_user(user) or
OrgInstructorRole(course_key.org).has_user(user)
)
if instructor_access and access_level in ('staff', 'instructor'):
debug("Allow: user has course instructor access")
return True
debug("Deny: user did not have correct access")
return False
def _has_instructor_access_to_descriptor(user, descriptor, course_key): # pylint: disable=invalid-name
"""Helper method that checks whether the user has staff access to
the course of the location.
descriptor: something that has a location attribute
"""
return _has_instructor_access_to_location(user, descriptor.location, course_key)
def _has_staff_access_to_descriptor(user, descriptor, course_key):
"""Helper method that checks whether the user has staff access to
the course of the location.
descriptor: something that has a location attribute
"""
return _has_staff_access_to_location(user, descriptor.location, course_key)
def get_user_role(user, course_key):
"""
Return corresponding string if user has staff, instructor or student
course role in LMS.
"""
if is_masquerading_as_student(user):
return 'student'
elif has_access(user, 'instructor', course_key):
return 'instructor'
elif has_access(user, 'staff', course_key):
return 'staff'
else:
return 'student'
| UQ-UQx/edx-platform_lti | lms/djangoapps/courseware/access.py | Python | agpl-3.0 | 20,899 |
from django.db import models
import datetime
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from iati_synchroniser.dataset_syncer import DatasetSyncer
from iati_synchroniser.codelist_importer import CodeListImporter
from iati.parser import Parser
from iati_synchroniser.admin_tools import AdminTools
INTERVAL_CHOICES = (
(u'YEARLY', _(u"Parse yearly")),
(u'MONTHLY', _(u"Parse monthly")),
(u'WEEKLY', _(u"Parse weekly")),
(u'DAILY', _(u"Parse daily")),
)
class Publisher(models.Model):
org_id = models.CharField(max_length=100, blank=True, null=True)
org_abbreviate = models.CharField(max_length=55, blank=True, null=True)
org_name = models.CharField(max_length=255)
default_interval = models.CharField(verbose_name=_(u"Interval"), max_length=55, choices=INTERVAL_CHOICES, default=u'MONTHLY')
XML_total_activity_count = models.IntegerField(null=True, default=None)
OIPA_total_activity_count = models.IntegerField(null=True, default=None)
def __unicode__(self):
return self.org_id
class IatiXmlSource(models.Model):
TYPE_CHOICES = (
(1, _(u"Activity Files")),
(2, _(u"Organisation Files")),
)
INTERVAL_CHOICES = (
("day", _(u"Day")),
("week", _(u"Week")),
("month", _(u"Month")),
("year", _(u"Year")),
)
ref = models.CharField(verbose_name=_(u"Reference"), max_length=70, help_text=_(u"Reference for the XML file. Preferred usage: 'collection' or single country or region name"))
title = models.CharField(max_length=255, null=True)
type = models.IntegerField(choices=TYPE_CHOICES, default=1)
publisher = models.ForeignKey(Publisher)
source_url = models.CharField(max_length=255, unique=True, help_text=_(u"Hyperlink to an iati activity or organisation XML file."))
date_created = models.DateTimeField(auto_now_add=True, editable=False)
date_updated = models.DateTimeField(auto_now_add=True, editable=False)
update_interval = models.CharField(max_length=20, choices=INTERVAL_CHOICES, default="month", null=True, blank=True)
last_found_in_registry = models.DateTimeField(default=None, null=True)
xml_activity_count = models.IntegerField(null=True, default=None)
oipa_activity_count = models.IntegerField(null=True, default=None)
iati_standard_version = models.CharField(max_length=10, null=True, default=None)
class Meta:
verbose_name_plural = "iati XML sources"
ordering = ["ref"]
def __unicode__(self):
return self.ref
def get_parse_status(self):
return mark_safe("<img class='loading' src='/static/img/loading.gif' alt='loading' style='display:none;' /><a data-xml='xml_%i' class='parse'><img src='/static/img/utils.parse.png' style='cursor:pointer;' /></a>") % self.id
get_parse_status.allow_tags = True
get_parse_status.short_description = _(u"Parse status")
def process(self):
parser = Parser()
parser.parse_url(self.source_url, self.ref)
self.date_updated = datetime.datetime.now()
activity_counter = AdminTools()
self.xml_activity_count = activity_counter.get_xml_activity_amount(self.source_url)
self.oipa_activity_count = activity_counter.get_oipa_activity_amount(self.ref)
self.save(process=False)
def save(self, process=True, *args, **kwargs):
super(IatiXmlSource, self).save()
if process:
self.process()
class DatasetSync(models.Model):
TYPE_CHOICES = (
(1, _(u"Activity Files")),
(2, _(u"Organisation Files")),
)
interval = models.CharField(verbose_name=_(u"Interval"), max_length=55, choices=INTERVAL_CHOICES)
date_updated = models.DateTimeField(auto_now=True, editable=False)
type = models.IntegerField(choices=TYPE_CHOICES, default=1)
def __unicode__(self):
return self.interval
class Meta:
verbose_name_plural = "dataset synchronisers"
def sync_now(self):
return mark_safe("<img class='loading' src='/static/img/loading.gif' alt='loading' style='display:none;' /><a data-sync='sync_%i' class='sync '><img src='/static/img/utils.parse.png' style='cursor:pointer;' /></a>") % self.id
sync_now.allow_tags = True
sync_now.short_description = _(u"Sync now?")
def _add_month(self, d,months=1):
year, month, day = d.timetuple()[:3]
new_month = month + months
return datetime.date(year + ((new_month-1) / 12), (new_month-1) % 12 +1, day)
def process(self):
if self.interval == u'YEARLY' and (self._add_month(self.date_updated, 12) <= datetime.datetime.now().date()):
self.sync_dataset_with_iati_api()
elif self.interval == u'MONTHLY' and (self._add_month(self.date_updated) <= datetime.datetime.now().date()):
self.sync_dataset_with_iati_api()
elif self.interval == u'WEEKLY' and (self.date_updated+datetime.timedelta(7) <= datetime.datetime.today()):
self.sync_dataset_with_iati_api()
elif self.interval == u'DAILY' and (self.date_updated+datetime.timedelta(1) <= datetime.datetime.today()):
self.sync_dataset_with_iati_api()
def sync_dataset_with_iati_api(self):
syncer = DatasetSyncer()
syncer.synchronize_with_iati_api(self.type)
class CodelistSync(models.Model):
date_updated = models.DateTimeField(auto_now=True, editable=False)
class Meta:
verbose_name_plural = "codelist synchronisers"
def sync_now(self):
return mark_safe("<img class='loading' src='/static/img/loading.gif' alt='loading' style='display:none;' /><a data-sync='sync_%i' class='sync '><img src='/static/img/utils.parse.png' style='cursor:pointer;' /></a>") % self.id
sync_now.allow_tags = True
sync_now.short_description = _(u"Sync now?")
def sync_codelist(self):
syncer = CodeListImporter()
syncer.synchronise_with_codelists() | schlos/OIPA-V2.1 | OIPA/iati_synchroniser/models.py | Python | agpl-3.0 | 5,972 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import requests
import json
import re
from flask import current_app as app
import time
import logging
from flask import render_template_string
from superdesk import get_resource_service
from superdesk.utils import config
cpi_url = 'CPI/2.50.999901.20.Q'
cpi_token = '__CPI__'
unemployment_url = 'LF/0.14.3.1599.20.M'
unemployment_token = '__UNEMPLOYMENT__'
lending_url = 'HF/3.0.99.20.140_1.M'
lending_token = '__LENDING__'
retail_url = 'RT/0.1.20.20.M'
retail_trade_token = '__RETAIL_TRADE__'
bop_url = 'BOP/1.100.20.Q'
bop_token = '__BOP__'
token_map = {cpi_token: cpi_url, unemployment_token: unemployment_url, lending_token: lending_url,
retail_trade_token: retail_url}
logger = logging.getLogger(__name__)
def expand_token(token, item, template_map):
url_prefix = app.config.get('ABS_WEB_SERVICE_URL')
abs_web_service_token = app.config.get('ABS_WEB_SERVICE_TOKEN')
url_suffix = '/all?dimensionAtObservation=allDimensions&detail=DataOnly'
# convert the token in the item to one that is jinja compliant
jinja_token = re.sub('\\.|/|\\+|#', '_', token)
item['body_html'] = item.get('body_html').replace(token, jinja_token)
item['headline'] = item.get('headline').replace(token, jinja_token)
item['abstract'] = item.get('abstract').replace(token, jinja_token)
# Get the token for the primary value
temp_token = re.sub('\\.|/|\\+', '_', token)
value_token = temp_token.split('#')[0] + '__' if '#' in token else temp_token
# If we have handled the value we don't need to do it again
if template_map.get(value_token):
return
data_identifier = token_map.get(token).split('#')[0] if '#' in token_map.get(token) else token_map.get(token)
logger.info('ABS request : {}'.format(url_prefix + data_identifier + url_suffix))
r = requests.get(url_prefix + data_identifier + url_suffix, headers={'x-api-key': abs_web_service_token},
verify=False)
if r.status_code == 200:
logger.info('Response Text [{}]'.format(r.text))
try:
response = json.loads(r.text)
except:
logger.error('Exception parsing json for {}'.format(data_identifier))
return
# get the number of dimensions in the dataset
dimensions = len(response.get('structure').get('dimensions').get('observation'))
# Assume that the time period is the last dimension get it's name
last_period_name = response.get('structure').get('dimensions').get('observation')[-1].get('values')[-1].get(
'name')
# get the index into the dimensions of the last time period
last_period_index = len(response.get('structure').get('dimensions').get('observation')[-1].get('values')) - 1
# construct the dimension key of the last data item
dimension_key = '0:' * (dimensions - 1) + str(last_period_index)
raw_value = response['dataSets'][0]['observations'][dimension_key][0]
if raw_value is None:
value = 'N/A'
elif isinstance(raw_value, float):
value = str(round(raw_value, 2))
else:
value = str(response['dataSets'][0]['observations'][dimension_key][0])
template_map[value_token] = value
# the token for the period
template_map[value_token[:-1] + 'PERIOD__'] = last_period_name
# calculate the change from the preceding value
last_period_index -= 1
if last_period_index >= 0:
# construct the dimension key of the last data item
dimension_key = '0:' * (dimensions - 1) + str(last_period_index)
prev_value = response['dataSets'][0]['observations'][dimension_key][0]
if raw_value and prev_value:
if prev_value > raw_value:
adjective = 'fell'
elif prev_value < raw_value:
adjective = 'rose'
else:
adjective = 'held steady'
else:
adjective = 'N/A'
template_map[value_token[:-1] + 'ADJECTIVE__'] = adjective
if prev_value is None:
value = 'N/A'
elif isinstance(prev_value, float):
value = str(round(prev_value, 2))
else:
value = str(response['dataSets'][0]['observations'][dimension_key][0])
template_map[value_token[:-1] + 'PREV__'] = value
prev_period_name = response.get('structure').get('dimensions').get('observation')[-1].get('values')[-2].get(
'name')
template_map[value_token[:-1] + 'PREVPERIOD__'] = prev_period_name
else:
logger.info('ABS API returned {} for {}'.format(r.status_code, data_identifier))
time.sleep(.120)
def abs_expand(item, **kwargs):
template_map = {}
# find the primary tokens, delimitered double underscores
tokens = re.findall('__(.*?)__', item['body_html'])
for t in tokens:
if '__' + t + '__' not in token_map:
token_map['__' + t + '__'] = t
for e in token_map:
if e in item['body_html'] or e in item['headline'] or e in item['abstract']:
expand_token(e, item, template_map)
try:
item['body_html'] = render_template_string(item.get('body_html', ''), **template_map)
item['abstract'] = render_template_string(item.get('abstract', ''), **template_map)
item['headline'] = render_template_string(item.get('headline', ''), **template_map)
except Exception as ex:
logger.warning(ex)
# If the macro is being executed by a stage macro then update the item directly
if 'desk' in kwargs and 'stage' in kwargs:
update = {'body_html': item.get('body_html', ''),
'abstract': item.get('abstract', ''),
'headline': item.get('headline', '')}
get_resource_service('archive').system_update(item[config.ID_FIELD], update, item)
return get_resource_service('archive').find_one(req=None, _id=item[config.ID_FIELD])
return item
name = 'Expand ABS indicator tokens into the story'
label = 'ABS indicator expand'
callback = abs_expand
access_type = 'frontend'
action_type = 'direct'
| superdesk/superdesk-aap | server/aap/macros/abs_indicators.py | Python | agpl-3.0 | 6,529 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2014, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import collections
import itertools
import operator
import numpy
from openquake.hazardlib.imt import from_string
from openquake.hazardlib.calc import gmf, filters
from openquake.hazardlib.site import SiteCollection
from openquake.commonlib.readinput import \
get_gsims, get_rupture, get_correl_model, get_imts
MAX_INT = 2 ** 31 - 1 # this is used in the random number generator
# in this way even on 32 bit machines Python will not have to convert
# the generated seed into a long integer
# ############## utilities for the classical calculator ############### #
SourceRuptureSites = collections.namedtuple(
'SourceRuptureSites',
'source rupture sites')
def gen_ruptures(sources, site_coll, maximum_distance, monitor):
"""
Yield (source, rupture, affected_sites) for each rupture
generated by the given sources.
:param sources: a sequence of sources
:param site_coll: a SiteCollection instance
:param maximum_distance: the maximum distance
:param monitor: a Monitor object
"""
filtsources_mon = monitor('filtering sources')
genruptures_mon = monitor('generating ruptures')
filtruptures_mon = monitor('filtering ruptures')
for src in sources:
with filtsources_mon:
s_sites = src.filter_sites_by_distance_to_source(
maximum_distance, site_coll)
if s_sites is None:
continue
with genruptures_mon:
ruptures = list(src.iter_ruptures())
if not ruptures:
continue
for rupture in ruptures:
with filtruptures_mon:
r_sites = filters.filter_sites_by_distance_to_rupture(
rupture, maximum_distance, s_sites)
if r_sites is None:
continue
yield SourceRuptureSites(src, rupture, r_sites)
filtsources_mon.flush()
genruptures_mon.flush()
filtruptures_mon.flush()
def gen_ruptures_for_site(site, sources, maximum_distance, monitor):
"""
Yield source, <ruptures close to site>
:param site: a Site object
:param sources: a sequence of sources
:param monitor: a Monitor object
"""
source_rupture_sites = gen_ruptures(
sources, SiteCollection([site]), maximum_distance, monitor)
for src, rows in itertools.groupby(
source_rupture_sites, key=operator.attrgetter('source')):
yield src, [row.rupture for row in rows]
# ############## utilities for the scenario calculators ############### #
def calc_gmfs_fast(oqparam, sitecol):
"""
Build all the ground motion fields for the whole site collection in
a single step.
"""
max_dist = oqparam.maximum_distance
correl_model = get_correl_model(oqparam)
seed = oqparam.random_seed
imts = get_imts(oqparam)
[gsim] = get_gsims(oqparam)
trunc_level = oqparam.truncation_level
n_gmfs = oqparam.number_of_ground_motion_fields
rupture = get_rupture(oqparam)
res = gmf.ground_motion_fields(
rupture, sitecol, imts, gsim,
trunc_level, n_gmfs, correl_model,
filters.rupture_site_distance_filter(max_dist), seed)
return {str(imt): matrix for imt, matrix in res.items()}
# ######################### hazard maps ################################### #
# cutoff value for the poe
EPSILON = 1E-30
def compute_hazard_maps(curves, imls, poes):
"""
Given a set of hazard curve poes, interpolate a hazard map at the specified
``poe``.
:param curves:
2D array of floats. Each row represents a curve, where the values
in the row are the PoEs (Probabilities of Exceedance) corresponding to
``imls``. Each curve corresponds to a geographical location.
:param imls:
Intensity Measure Levels associated with these hazard ``curves``. Type
should be an array-like of floats.
:param poes:
Value(s) on which to interpolate a hazard map from the input
``curves``. Can be an array-like or scalar value (for a single PoE).
:returns:
An array of shape P x N, where N is the number of curves and P the
number of poes.
"""
curves = numpy.array(curves)
poes = numpy.array(poes)
if len(poes.shape) == 0:
# `poes` was passed in as a scalar;
# convert it to 1D array of 1 element
poes = poes.reshape(1)
if len(curves.shape) == 1:
# `curves` was passed as 1 dimensional array, there is a single site
curves = curves.reshape((1,) + curves.shape) # 1 x L
result = []
imls = numpy.log(numpy.array(imls[::-1]))
for curve in curves:
# the hazard curve, having replaced the too small poes with EPSILON
curve_cutoff = [max(poe, EPSILON) for poe in curve[::-1]]
hmap_val = []
for poe in poes:
# special case when the interpolation poe is bigger than the
# maximum, i.e the iml must be smaller than the minumum
if poe > curve_cutoff[-1]: # the greatest poes in the curve
# extrapolate the iml to zero as per
# https://bugs.launchpad.net/oq-engine/+bug/1292093
# a consequence is that if all poes are zero any poe > 0
# is big and the hmap goes automatically to zero
hmap_val.append(0)
else:
# exp-log interpolation, to reduce numerical errors
# see https://bugs.launchpad.net/oq-engine/+bug/1252770
val = numpy.exp(
numpy.interp(
numpy.log(poe), numpy.log(curve_cutoff), imls))
hmap_val.append(val)
result.append(hmap_val)
return numpy.array(result)
# ######################### GMF->curves #################################### #
# NB (MS): the approach used here will not work for non-poissonian models
def gmvs_to_haz_curve(gmvs, imls, invest_time, duration):
"""
Given a set of ground motion values (``gmvs``) and intensity measure levels
(``imls``), compute hazard curve probabilities of exceedance.
:param gmvs:
A list of ground motion values, as floats.
:param imls:
A list of intensity measure levels, as floats.
:param float invest_time:
Investigation time, in years. It is with this time span that we compute
probabilities of exceedance.
Another way to put it is the following. When computing a hazard curve,
we want to answer the question: What is the probability of ground
motion meeting or exceeding the specified levels (``imls``) in a given
time span (``invest_time``).
:param float duration:
Time window during which GMFs occur. Another was to say it is, the
period of time over which we simulate ground motion occurrences.
NOTE: Duration is computed as the calculation investigation time
multiplied by the number of stochastic event sets.
:returns:
Numpy array of PoEs (probabilities of exceedance).
"""
# convert to numpy array and redimension so that it can be broadcast with
# the gmvs for computing PoE values; there is a gmv for each rupture
# here is an example: imls = [0.03, 0.04, 0.05], gmvs=[0.04750576]
# => num_exceeding = [1, 1, 0] coming from 0.04750576 > [0.03, 0.04, 0.05]
imls = numpy.array(imls).reshape((len(imls), 1))
num_exceeding = numpy.sum(numpy.array(gmvs) >= imls, axis=1)
poes = 1 - numpy.exp(- (invest_time / duration) * num_exceeding)
return poes
# ################## utilities for classical calculators ################ #
def make_uhs(maps):
"""
Make Uniform Hazard Spectra curves for each location.
It is assumed that the `lons` and `lats` for each of the ``maps`` are
uniform.
:param maps:
A composite array with shape N x P, where N is the number of
sites and P is the number of poes in the hazard maps
:returns:
an array N x I x P where I the number of intensity measure types of
kind SA (with PGA = SA(0)), containing the hazard maps
"""
sorted_imts = list(map(str, sorted(
from_string(imt) for imt in maps.dtype.fields
if imt.startswith('SA') or imt == 'PGA')))
hmaps = numpy.array([maps[imt] for imt in sorted_imts]) # I * N * P
return hmaps.transpose(1, 0, 2) # N * I * P
def build_dict(shape, factory):
"""
Build a dictionary key -> factory(), where the key is a multi-index
obtained from indices of the given shape. For instance
>>> sorted(build_dict((2, 2), list).items())
[((0, 0), []), ((0, 1), []), ((1, 0), []), ((1, 1), [])]
"""
return {k: factory() for k in itertools.product(*map(range, shape))}
| g-weatherill/oq-risklib | openquake/calculators/calc.py | Python | agpl-3.0 | 9,525 |
# -*- coding: utf-8 -*-
# Copyright 2017 Matmoz d.o.o. (<http://www.matmoz.si>).
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from . import models
| sysadminmatmoz/pmis | change_management/__init__.py | Python | agpl-3.0 | 173 |
# -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import test_sale
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | odoo-arg/odoo_l10n_ar | l10n_ar_sale/tests/__init__.py | Python | agpl-3.0 | 955 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
#
# ODOO (ex OpenERP)
# Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<https://micronaet.com>)
# Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.tools.translate import _
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class SaleOrderLine(orm.Model):
""" Model name: SaleOrderLine
"""
_inherit = 'sale.order.line'
def restore_stock_status_user_value(
self, cr, uid, no_inventory_status, context=None):
''' Update with previous value
'''
return self.pool.get('res.users').write(
cr, uid, [uid], {
'no_inventory_status': no_inventory_status,
}, context=context)
def return_view_assign_wizard(self, cr, uid, ids, context=None):
''' Open wizard view:
'''
wiz_pool = self.pool.get('sale.order.line.assign.stock.wizard')
# Activate stock status:
user_pool = self.pool.get('res.users')
user = user_pool.browse(cr, uid, uid, context=context)
no_inventory_status = user.no_inventory_status
user_pool.write(cr, uid, [uid], {
'no_inventory_status': False,
}, context=context)
# ---------------------------------------------------------------------
# Check data:
# ---------------------------------------------------------------------
# A. Check previsional order:
line = self.browse(cr, uid, ids, context=context)[0]
order = line.order_id
if order.previsional:
self.restore_stock_status_user_value(
cr, uid, no_inventory_status, context=context)
raise osv.except_osv(
_('Errore'),
_('''Ordine previsionale, non permessa una assegnazione da
magazzino in quando viene fatto per caricare il magazzino
'''))
# B. State of order:
if order.state not in ('manual', 'progress'):
self.restore_stock_status_user_value(
cr, uid, no_inventory_status, context=context)
raise osv.except_osv(
_('Errore'),
_('''Ordine non nel corretto stato:
solo gli ordini attivi non chiusi possono avere
assegnazioni da magazzino.
'''))
# C. Available in stock:
product = line.product_id
available = product.mx_net_mrp_qty - product.mx_mrp_b_locked
# To reenter if this product has assigned
if available + line.mx_assigned_qty <= 0.0:
self.restore_stock_status_user_value(
cr, uid, no_inventory_status, context=context)
raise osv.except_osv(
_(u'Errore'),
_(u'Il prodotto %s non ha disponibilità a magazzino!' % (
product.default_code or product.name or '?'
)),
)
# D. Remain positive:
oc_qty = line.product_uom_qty
delivery_qty = line.delivered_qty
assigned = line.mx_assigned_qty # current
to_assign = oc_qty # all ordered
maked = 0.0
warning = ''
if 'product_uom_maked_sync_qty' in line._columns:
maked = line.product_uom_maked_sync_qty
# XXX if yet production use wait the production?
if line.mrp_id:
warning = 'PRESENTE UNA PRODUZIONE COLLEGATA'
if maked:
to_assign = oc_qty - maked # remain to produce
warning += ' CON MATERIALE PRECEDENTEMENTE CARICATO'
if warning:
warning += '!!!'
if to_assign <= 0:
self.restore_stock_status_user_value(
cr, uid, no_inventory_status, context=context)
raise osv.except_osv(
_(u'Errore'),
_(u'Al prodotto %s non servono assegnazioni di magazzino!' % (
product.default_code or product.name or '?'
)),
)
# XXX To remove assign I cannot add this check!!!
#elif abs(to_assign - assigned) <= 0.01: # approx check
# self.restore_stock_status_user_value(
# cr, uid, no_inventory_status, context=context)
# raise osv.except_osv(
# _(u'Errore'),
# _(u'Al prodotto %s sono già assegnati %s!' % (
# product.default_code or product.name or '?',
# assigned,
# )),
# )
# ---------------------------------------------------------------------
# Create record for wizard and open:
# ---------------------------------------------------------------------
# Default assignement:
if to_assign >= (available + assigned):
new_assigned_qty = available + assigned
else:
new_assigned_qty = to_assign
wiz_id = wiz_pool.create(cr, uid, {
'new_assigned_qty': new_assigned_qty,
'line_id': ids[0],
'status': '''
OC originale: <b>[ %s ]</b> -
Prodotte: <b>[ %s ]</b> -
Consegnate: <b>[ %s ]</b><br/><br/>
<i>Disponibili a magazzino: <b>[ %s ]</b> +
Assegnati in precedenza <b>[ %s ]</b> =
Disponibili <b>[ %s ]</b>
<br/>
</i>
<font color="red"><b>%s</b></font>
''' % (
oc_qty,
maked,
delivery_qty,
available,
assigned,
available + assigned,
warning,
)
}, context=context)
# Get and return correct view:
model_pool = self.pool.get('ir.model.data')
view_id = model_pool.get_object_reference(
cr, uid,
'inventory_status_assign_wizard',
'sale_order_line_assign_stock_wizard_view')[1]
self.restore_stock_status_user_value(
cr, uid, no_inventory_status, context=context)
return {
'type': 'ir.actions.act_window',
'name': _('Assegna q. magazzino'),
'view_type': 'form',
'view_mode': 'form',
'res_id': wiz_id,
'res_model': 'sale.order.line.assign.stock.wizard',
'view_id': view_id,
'views': [(view_id, 'form')],
'domain': [],
'context': context,
'target': 'new',
'nodestroy': False,
}
class SaleOrderLineAssignStockWizard(orm.TransientModel):
''' Wizard for stock wizard
'''
_name = 'sale.order.line.assign.stock.wizard'
# -------------------------------------------------------------------------
# Wizard button event:
# -------------------------------------------------------------------------
def action_remove_qty(self, cr, uid, ids, context=None):
''' Event for button done
'''
if context is None:
context = {}
# Remove assignement:
line_pool = self.pool.get('sale.order.line')
wiz_browse = self.browse(cr, uid, ids, context=context)[0]
line_id = wiz_browse.line_id
return line_pool.write(cr, uid, line_id.id, {
'mx_assigned_qty': 0,
}, context=context)
def action_assign_qty(self, cr, uid, ids, context=None):
''' Event for button done
'''
if context is None:
context = {}
line_pool = self.pool.get('sale.order.line')
wiz_browse = self.browse(cr, uid, ids, context=context)[0]
# Parameters:
line_id = wiz_browse.line_id
new_assigned_qty = wiz_browse.new_assigned_qty
# Update new assignement:
return line_pool.write(cr, uid, line_id.id, {
'mx_assigned_qty': new_assigned_qty,
}, context=context)
_columns = {
'line_id': fields.many2one(
'sale.order.line', 'Sale line'),
'status': fields.text('Stato riga'),
'new_assigned_qty': fields.float('Nuova assegnazione', digits=(16, 2)),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Micronaet/micronaet-product | inventory_status_assign_wizard/assign_wizard.py | Python | agpl-3.0 | 9,908 |
import os
import pickle
import csv
import pandas as pd
import math
from functools import lru_cache, reduce
from collections import defaultdict
USE_ROME_SLICING_DATASET = False # Rome slicing dataset is not ready yet
if USE_ROME_SLICING_DATASET:
OGR_ROME_FILE = "rome_slicing_dataset/ogr_rome_mapping.csv"
ROME_FILE = "rome_slicing_dataset/rome_labels.csv"
ROME_NAF_FILE = "rome_slicing_dataset/rome_naf_mapping.csv"
else:
OGR_ROME_FILE = "ogr_rome_mapping.csv"
ROME_FILE = "rome_labels.csv"
ROME_NAF_FILE = "rome_naf_mapping.csv"
def load_file(func, filename):
full_filename = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "data/%s" % filename)
return func(full_filename)
def load_pickle_file(filename):
def f(full_filename):
return pickle.load(open(full_filename, "r"))
return load_file(f, filename)
def load_pd_dataframe(filename, delimiter='', dtype=None):
def f(full_filename):
return pd.read_csv(open(full_filename, "r"), dtype=dtype)
return load_file(f, filename)
def load_csv_file(filename, delimiter='|'):
def f(full_filename):
csv_file = open(full_filename, 'r')
reader = csv.reader(csv_file, delimiter=delimiter)
return reader
reader = load_file(f, filename)
rows = []
len_previous_row = None
for row in reader:
if len_previous_row:
# at least second line of CSV file
if len(row) == 0:
# skip empty rows
continue
elif len(row) != len_previous_row:
raise IndexError(
"found row with abnormal number of fields : %s" % row)
rows.append(row)
else:
# first line of CSV file: headers should be ignored
pass
len_previous_row = len(row)
return rows
def load_rows_as_set(rows):
for row in rows:
if len(row) != 1:
raise IndexError("wrong number of fields")
return set([row[0] for row in rows])
def load_rows_as_dict(rows):
d = {}
for row in rows:
if len(row) != 2:
raise IndexError("wrong number of fields")
if row[0] in d:
raise ValueError("duplicate key")
d[row[0]] = row[1]
return d
def load_rows_as_dict_of_dict(rows):
d = {}
for row in rows:
if len(row) != 3:
raise IndexError("wrong number of fields")
# values of 3 fields
f1 = row[0]
f2 = row[1]
f3 = row[2]
if f1 in d:
if f2 in d[f1]:
raise ValueError("duplicate key")
else:
d[f1][f2] = f3
else:
d[f1] = {f2: f3}
return d
@lru_cache(maxsize=None)
def load_related_rome_areas():
"""
Build a dict with department code (code insee) as keys and area code as values (bassins d'emploi).
Used for PSE study in 2021.
"""
rows = load_csv_file("lbb-pse_bassin-emploi_code-insee.csv", delimiter=',')
return reduce(reduceRelateRomesAreas, rows, {})
def reduceRelateRomesAreas(aggr, row):
[code_insee, code_area] = row
aggr[code_insee] = code_area
return aggr
@lru_cache(maxsize=None)
def load_related_rome():
"""
Build a dict with area code (bassin d'emploi) as keys.
The values are dict with rome code as keys and a list of related rome codes as values.
Each related rome is a dict with `rome` and `score` properties.
Used for PSE study.
"""
rows = load_csv_file("lbb-pse_bassin-emploi_rome-connexe.csv", delimiter=',')
return reduce(reduceRelateRomes, rows, {})
def reduceRelateRomes(aggr, row):
[code_area, rome, rome_connexe, score] = row
entry_code_area = aggr.get(code_area, {})
entry_rome = entry_code_area.get(rome, [])
entry_rome.append({'rome': rome_connexe, 'score': float(score)})
entry_code_area[rome] = entry_rome
aggr[code_area] = entry_code_area
return aggr
@lru_cache(maxsize=None)
def load_city_codes():
rows = load_csv_file("city_codes.csv")
commune_id_to_commune_name = load_rows_as_dict(rows)
return commune_id_to_commune_name
@lru_cache(maxsize=None)
def load_contact_modes():
"""
Use comma delimiter instead of pipe so that it is recognized by github
and can easily be edited online by the intrapreneurs.
"""
rows = load_csv_file("contact_modes.csv", delimiter=',')
naf_prefix_to_rome_to_contact_mode = load_rows_as_dict_of_dict(rows)
return naf_prefix_to_rome_to_contact_mode
@lru_cache(maxsize=None)
def load_ogr_labels():
rows = load_csv_file("ogr_labels.csv")
ogr_to_label = load_rows_as_dict(rows)
return ogr_to_label
@lru_cache(maxsize=None)
def load_groupements_employeurs():
rows = load_csv_file("groupements_employeurs.csv")
sirets = load_rows_as_set(rows)
return sirets
@lru_cache(maxsize=None)
def load_ogr_rome_mapping():
rows = load_csv_file(OGR_ROME_FILE)
OGR_COLUMN = 0
ROME_COLUMN = 1
ogr_to_rome = {}
for row in rows:
ogr = row[OGR_COLUMN]
if ogr not in load_ogr_labels():
raise ValueError("missing label for OGR %s" % ogr)
rome = row[ROME_COLUMN]
if rome not in load_rome_labels():
raise ValueError("missing label for ROME %s" % rome)
ogr_to_rome[ogr] = rome
return ogr_to_rome
@lru_cache(maxsize=None)
def load_rome_labels():
rows = load_csv_file(ROME_FILE)
rome_to_label = load_rows_as_dict(rows)
return rome_to_label
@lru_cache(maxsize=None)
def load_naf_labels():
rows = load_csv_file("naf_labels.csv")
naf_to_label = load_rows_as_dict(rows)
return naf_to_label
@lru_cache(maxsize=None)
def load_rome_naf_mapping():
return load_csv_file(ROME_NAF_FILE, delimiter=',')
@lru_cache(maxsize=None)
def load_metiers_tension():
csv_metiers_tension = load_csv_file("metiers_tension.csv", ',')
rome_to_tension = defaultdict(int)
for row in csv_metiers_tension:
tension_pct = row[2]
rome_code = row[3]
# FIXME : remove rows where tension is #N/A in the dataset, to remove this ugly check ?
if tension_pct != '#N/A':
tension_pct = float(tension_pct)
if 0 <= tension_pct <= 100:
# As a single ROME can have multiple tensions,
# It has been decided to take the higher tension for a rome
rome_to_tension[rome_code] = max(rome_to_tension[rome_code], tension_pct)
else:
raise ValueError
return rome_to_tension
#Used for PSE study, it returns a list of SIRET that must not b be seen on LBB
@lru_cache(maxsize=None)
def load_siret_to_remove():
rows = load_csv_file("untreated_BB.csv", ',')
sirets_to_remove = load_rows_as_set(rows)
return sirets_to_remove
#Used by importer job to extract etablissement
@lru_cache(maxsize=None)
def load_effectif_labels():
'''
Dataframe to load look like this.
code label
0 0 0-0
1 1 1-2
2 2 3-5
3 3 6-9
4 11 10-19
5 12 20-49
6 21 50-99
7 22 100-199
8 31 200-249
9 32 250-499
10 41 500-999
11 42 1000-1999
12 51 2000-4999
13 52 5000-9999
14 53 10000+
'''
def create_column(row, which='start_effectif'):
'''
From the label, we want to create a start and end column to delimitate the interval
We'll be able to use it to simply determine from a number of employees in an office, in which category the office belongs to
'''
#we split on the label which is from type "10-19" OR 10000+
splitted_label = row['label'].split('-')
if len(splitted_label) == 1: #10000+
value = math.inf if which == 'end_effectif' else 10000
else:
if which == 'start_effectif':
value = int(splitted_label[0])
else:
value = int(splitted_label[1])
return value
df = load_pd_dataframe("helpers/effectif_labels.csv", ',', dtype={'code':str})
df['start_effectif'] = df.apply(lambda row: create_column(row,'start_effectif'), axis=1)
df['end_effectif'] = df.apply(lambda row: create_column(row,'end_effectif'), axis=1)
return df
OGR_ROME_CODES = load_ogr_rome_mapping()
ROME_CODES = list(OGR_ROME_CODES.values())
| StartupsPoleEmploi/labonneboite | labonneboite/common/load_data.py | Python | agpl-3.0 | 8,478 |
"""
Forms for the project application
"""
# Django
from django import forms
# Third Party
from dal import forward
from dal.autocomplete import TaggitSelect2
from taggit.forms import TagField
# MuckRock
from muckrock.core import autocomplete
from muckrock.project.models import Project
class ProjectCreateForm(forms.ModelForm):
"""Form for the basic fields of a project."""
tags = TagField(
widget=TaggitSelect2(
url="tag-autocomplete",
attrs={"data-placeholder": "Search tags", "data-width": "100%"},
),
help_text="Separate tags with commas.",
required=False,
)
class Meta:
model = Project
fields = ["title", "summary", "image", "tags"]
help_texts = {
"summary": "A short description of the project and its goals.",
"image": "Image should be large and high-resolution.",
}
class ProjectUpdateForm(forms.ModelForm):
"""Form for updating a project instance"""
tags = TagField(
widget=TaggitSelect2(
url="tag-autocomplete",
attrs={"data-placeholder": "Search tags", "data-width": "100%"},
),
help_text="Separate tags with commas.",
required=False,
)
class Meta:
model = Project
fields = [
"title",
"summary",
"image",
"tags",
"description",
"contributors",
"requests",
"articles",
]
widgets = {
"description": forms.Textarea(attrs={"class": "prose-editor"}),
"contributors": autocomplete.ModelSelect2Multiple(
url="user-autocomplete",
attrs={
"data-placeholder": "Search for users",
"data-minimum-input-length": 2,
},
),
"requests": autocomplete.ModelSelect2Multiple(
url="foia-request-autocomplete",
attrs={"data-placeholder": "Search for requests"},
),
"articles": autocomplete.ModelSelect2Multiple(
url="article-autocomplete",
attrs={"data-placeholder": "Search for articles"},
),
}
help_texts = {
"title": "Changing the title will change the URL of your project."
}
class ProjectPublishForm(forms.Form):
"""Form for publishing a project."""
notes = forms.CharField(required=False, widget=forms.Textarea)
class ProjectManagerForm(forms.Form):
"""Form for managing a list of projects"""
projects = forms.ModelMultipleChoiceField(
required=False,
queryset=Project.objects.none(),
widget=autocomplete.ModelSelect2Multiple(
url="project-autocomplete",
attrs={"placeholder": "Search for a project"},
forward=(forward.Const(True, "manager"),),
),
)
def __init__(self, *args, **kwargs):
user = kwargs.pop("user")
super(ProjectManagerForm, self).__init__(*args, **kwargs)
self.fields["projects"].queryset = Project.objects.get_manager(user)
| MuckRock/muckrock | muckrock/project/forms.py | Python | agpl-3.0 | 3,167 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import tempfile
import unittest
import random
import csv
import numpy
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.regions.AnomalyLikelihoodRegion_capnp import\
AnomalyLikelihoodRegionProto
from nupic.regions.anomaly_likelihood_region import AnomalyLikelihoodRegion
from nupic.algorithms.anomaly_likelihood import AnomalyLikelihood
from pkg_resources import resource_filename
_INPUT_DATA_FILE = resource_filename(
"nupic.datafiles", "extra/hotgym/hotgym-anomaly.csv"
)
""" Unit tests for the anomaly likelihood region """
class AnomalyLikelihoodRegionTest(unittest.TestCase):
"""Tests for anomaly likelihood region"""
def testParamterError(self):
""" ensure historicWindowSize is greater than estimationSamples """
try:
anomalyLikelihoodRegion = AnomalyLikelihoodRegion(estimationSamples=100,
historicWindowSize=99)
self.assertEqual(False, True, "Should have failed with ValueError")
except ValueError:
pass
def testLikelihoodValues(self):
""" test to see if the region keeps track of state correctly and produces
the same likelihoods as the AnomalyLikelihood module """
anomalyLikelihoodRegion = AnomalyLikelihoodRegion()
anomalyLikelihood = AnomalyLikelihood()
inputs = AnomalyLikelihoodRegion.getSpec()['inputs']
outputs = AnomalyLikelihoodRegion.getSpec()['outputs']
with open (_INPUT_DATA_FILE) as f:
reader = csv.reader(f)
reader.next()
for record in reader:
consumption = float(record[1])
anomalyScore = float(record[2])
likelihood1 = anomalyLikelihood.anomalyProbability(
consumption, anomalyScore)
inputs['rawAnomalyScore'] = numpy.array([anomalyScore])
inputs['metricValue'] = numpy.array([consumption])
anomalyLikelihoodRegion.compute(inputs, outputs)
likelihood2 = outputs['anomalyLikelihood'][0]
self.assertEqual(likelihood1, likelihood2)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testSerialization(self):
""" test to ensure serialization preserves the state of the region
correctly. """
anomalyLikelihoodRegion1 = AnomalyLikelihoodRegion()
inputs = AnomalyLikelihoodRegion.getSpec()['inputs']
outputs = AnomalyLikelihoodRegion.getSpec()['outputs']
parameters = AnomalyLikelihoodRegion.getSpec()['parameters']
# Make sure to calculate distribution by passing the probation period
learningPeriod = parameters['learningPeriod']['defaultValue']
reestimationPeriod = parameters['reestimationPeriod']['defaultValue']
probation = learningPeriod + reestimationPeriod
for _ in xrange(0, probation + 1):
inputs['rawAnomalyScore'] = numpy.array([random.random()])
inputs['metricValue'] = numpy.array([random.random()])
anomalyLikelihoodRegion1.compute(inputs, outputs)
score1 = outputs['anomalyLikelihood'][0]
proto1 = AnomalyLikelihoodRegionProto.new_message()
anomalyLikelihoodRegion1.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = AnomalyLikelihoodRegionProto.read(f)
# # Load the deserialized proto
anomalyLikelihoodRegion2 = AnomalyLikelihoodRegion.read(proto2)
self.assertEqual(anomalyLikelihoodRegion1, anomalyLikelihoodRegion2)
window = parameters['historicWindowSize']['defaultValue']
for _ in xrange(0, window + 1):
inputs['rawAnomalyScore'] = numpy.array([random.random()])
inputs['metricValue'] = numpy.array([random.random()])
anomalyLikelihoodRegion1.compute(inputs, outputs)
score1 = outputs['anomalyLikelihood'][0]
anomalyLikelihoodRegion2.compute(inputs, outputs)
score2 = outputs['anomalyLikelihood'][0]
self.assertEqual(score1, score2)
if __name__ == "__main__":
unittest.main()
| scottpurdy/nupic | tests/unit/nupic/regions/anomaly_likelihood_region_test.py | Python | agpl-3.0 | 4,989 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Resource information of graphite
"""
def get_name(friendly=False):
"""Get name of this resource
:return: name of this resource
:rtype: str
"""
if friendly: # pragma: no cover
return 'Graphite connection'
return 'graphite'
def get_doc(): # pragma: no cover
"""Get documentation of this resource
:return: rst string
:rtype: str
"""
return """
The ``graphite`` model contains information to provide the monitored system performance
data to Carbon/Graphite.
The Alignak backend will use those information to connect to a Carbon daemon and send the
timeseries data. If you are using a StatsD daemon as a front-end of the Carbon daemon
create a relation with a StatsD data model instance. To make the Alignak backend create
some Grafana panels for the metrics sent to Graphite create a relation with a Grafana
data model instance.
"""
def get_schema():
"""Schema structure of this resource
:return: schema dictionary
:rtype: dict
"""
return {
'schema': {
'schema_version': {
'type': 'integer',
'default': 2,
},
'name': {
'schema_version': 1,
'title': 'Graphite connection name',
'comment': 'Unique Graphite connection name',
'type': 'string',
'required': True,
'empty': False,
'unique': True,
},
'carbon_address': {
'schema_version': 1,
'title': 'Carbon daemon address',
'comment': '',
'type': 'string',
'required': True,
'empty': False,
},
'carbon_port': {
'schema_version': 1,
'title': 'Carbon daemon port',
'comment': '',
'type': 'integer',
'empty': False,
'default': 2004
},
'graphite_address': {
'schema_version': 1,
'title': 'Graphite address',
'comment': '',
'type': 'string',
'required': True,
'empty': False,
},
'graphite_port': {
'schema_version': 1,
'title': 'Graphite port',
'comment': '',
'type': 'integer',
'empty': False,
'default': 8080
},
'prefix': {
'schema_version': 1,
'title': 'Metrics prefix',
'comment': 'Prefix that will be prepended to the metrics sent to this TS DB.',
'type': 'string',
'default': '',
},
'realms_prefix': {
'schema_version': 2,
"title": "Realms prefix",
"comment": "Include the realms prefix for the metrics sent to this TS DB.",
'type': 'boolean',
'default': True
},
'grafana': {
'schema_version': 1,
'title': 'Grafana relation',
'comment': 'If set, the Alignak backend will use this Grafana relation for '
'the metrics sent to the Influx DB. It will create/update the '
'Grafana panels accordindgly.',
'type': 'objectid',
'data_relation': {
'resource': 'grafana',
'embeddable': True
},
'nullable': True,
'default': None
},
'statsd': {
'schema_version': 1,
'title': 'StatsD relation',
'comment': 'If set, the Alignak backend will use this StatsD relation for '
'the metrics sent to the Influx DB.',
'type': 'objectid',
'data_relation': {
'resource': 'statsd',
'embeddable': True
},
'nullable': True,
'default': None
},
# Realm
'_realm': {
'schema_version': 1,
'title': 'Realm',
'comment': 'Realm this element belongs to.',
'type': 'objectid',
'data_relation': {
'resource': 'realm',
'embeddable': True
},
'required': True,
},
'_sub_realm': {
'schema_version': 1,
'title': 'Sub-realms',
'comment': 'Is this element visible in the sub-realms of its realm?',
'type': 'boolean',
'default': True
},
# Users CRUD permissions
'_users_read': {
'schema_version': 1,
'type': 'list',
'schema': {
'type': 'objectid',
'data_relation': {
'resource': 'user',
'embeddable': True,
}
},
},
'_users_update': {
'schema_version': 1,
'type': 'list',
'schema': {
'type': 'objectid',
'data_relation': {
'resource': 'user',
'embeddable': True,
}
},
},
'_users_delete': {
'schema_version': 1,
'type': 'list',
'schema': {
'type': 'objectid',
'data_relation': {
'resource': 'user',
'embeddable': True,
}
},
},
},
'schema_deleted': {}
}
| Alignak-monitoring-contrib/alignak-backend | alignak_backend/models/graphite.py | Python | agpl-3.0 | 6,079 |
##############################################################################
#
# Copyright (C) 2020 Compassion CH (http://www.compassion.ch)
# @author: Théo Nikles <theo.nikles@gmail.com>
#
# The licence is in the file __manifest__.py
#
##############################################################################
def migrate(cr, version):
if not version:
return
cr.execute("""
INSERT INTO mobile_app_messages(partner_id)
SELECT id FROM res_partner;
UPDATE res_partner p
SET app_messages = (
SELECT id FROM mobile_app_messages
WHERE partner_id = p.id
);
""")
| CompassionCH/compassion-modules | mobile_app_connector/migrations/12.0.1.0.1/post-migration.py | Python | agpl-3.0 | 660 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from datetime import timedelta, timezone, datetime
from flask import current_app as app
from werkzeug.exceptions import HTTPException
import superdesk
from superdesk.activity import ACTIVITY_EVENT, notify_and_add_activity
from superdesk.celery_app import celery
from superdesk.celery_task_utils import get_lock_id, get_host_id
from superdesk.errors import ProviderError
from superdesk.io import registered_feeding_services, registered_feed_parsers
from superdesk.io.iptc import subject_codes
from superdesk.lock import lock, unlock
from superdesk.media.media_operations import download_file_from_url, process_file
from superdesk.media.renditions import generate_renditions
from superdesk.metadata.item import GUID_NEWSML, GUID_FIELD, FAMILY_ID, ITEM_TYPE, CONTENT_TYPE, CONTENT_STATE, \
ITEM_STATE
from superdesk.metadata.utils import generate_guid
from superdesk.notification import push_notification
from superdesk.stats import stats
from superdesk.upload import url_for_media
from superdesk.utc import utcnow, get_expiry_date
from superdesk.workflow import set_default_state
UPDATE_SCHEDULE_DEFAULT = {'minutes': 5}
LAST_UPDATED = 'last_updated'
LAST_ITEM_UPDATE = 'last_item_update'
IDLE_TIME_DEFAULT = {'hours': 0, 'minutes': 0}
logger = logging.getLogger(__name__)
def is_service_and_parser_registered(provider):
"""
Tests if the Feed Service and Feed Parser associated with are registered with application.
:param provider:
:type provider: dict :py:class:`superdesk.io.ingest_provider_model.IngestProviderResource`
:return: True if both Feed Service and Feed Parser are registered. False otherwise.
:rtype: bool
"""
return provider.get('feeding_service') in registered_feeding_services and \
provider.get('feed_parser') is None or provider.get('feed_parser') in registered_feed_parsers
def is_scheduled(provider):
"""Test if given provider should be scheduled for update.
:param provider: ingest provider
"""
now = utcnow()
last_updated = provider.get(LAST_UPDATED, now - timedelta(days=100)) # if never updated run now
update_schedule = provider.get('update_schedule', UPDATE_SCHEDULE_DEFAULT)
return last_updated + timedelta(**update_schedule) < now
def is_closed(provider):
"""Test if provider is closed.
:param provider: ingest provider
"""
return provider.get('is_closed', False)
def filter_expired_items(provider, items):
"""
Filters out the item from the list of articles to be ingested
if they are expired and item['type'] not in provider['content_types'].
:param provider: Ingest Provider Details.
:type provider: dict :py:class: `superdesk.io.ingest_provider_model.IngestProviderResource`
:param items: list of items received from the provider
:type items: list
:return: list of items which can be saved into ingest collection
:rtype: list
"""
def is_not_expired(item):
if item.get('expiry') or item.get('versioncreated'):
expiry = item.get('expiry', item['versioncreated'] + delta)
if expiry.tzinfo:
return expiry > utcnow()
return False
try:
delta = timedelta(minutes=provider.get('content_expiry', app.config['INGEST_EXPIRY_MINUTES']))
filtered_items = [item for item in items if is_not_expired(item) and
item[ITEM_TYPE] in provider['content_types']]
if len(items) != len(filtered_items):
logger.debug('Received {0} articles from provider {1}, but only {2} are eligible to be saved in ingest'
.format(len(items), provider['name'], len(filtered_items)))
return filtered_items
except Exception as ex:
raise ProviderError.providerFilterExpiredContentError(ex, provider)
def get_provider_rule_set(provider):
if provider.get('rule_set'):
return superdesk.get_resource_service('rule_sets').find_one(_id=provider['rule_set'], req=None)
def get_provider_routing_scheme(provider):
"""Returns the ingests provider's routing scheme configuration.
If provider has a routing scheme defined (i.e. scheme ID is not None), the
scheme is fetched from the database. If not, nothing is returned.
For all scheme rules that have a reference to a content filter defined,
that filter's configuration is fetched from the database as well and
embedded into the corresponding scheme rule.
:param dict provider: ingest provider configuration
:return: fetched provider's routing scheme configuration (if any)
:rtype: dict or None
"""
if not provider.get('routing_scheme'):
return None
schemes_service = superdesk.get_resource_service('routing_schemes')
filters_service = superdesk.get_resource_service('content_filters')
scheme = schemes_service.find_one(_id=provider['routing_scheme'], req=None)
# for those routing rules that have a content filter defined,
# get that filter from DB and embed it into the rule...
rules_filters = (
(rule, str(rule['filter']))
for rule in scheme['rules'] if rule.get('filter'))
for rule, filter_id in rules_filters:
content_filter = filters_service.find_one(_id=filter_id, req=None)
rule['filter'] = content_filter
return scheme
def get_task_ttl(provider):
update_schedule = provider.get('update_schedule', UPDATE_SCHEDULE_DEFAULT)
return update_schedule.get('minutes', 0) * 60 + update_schedule.get('hours', 0) * 3600
def get_is_idle(provider):
last_item = provider.get(LAST_ITEM_UPDATE)
idle_time = provider.get('idle_time', IDLE_TIME_DEFAULT)
if isinstance(idle_time['hours'], datetime):
idle_hours = 0
else:
idle_hours = idle_time['hours']
if isinstance(idle_time['minutes'], datetime):
idle_minutes = 0
else:
idle_minutes = idle_time['minutes']
# there is an update time and the idle time is none zero
if last_item and (idle_hours != 0 or idle_minutes != 0):
if utcnow() > last_item + timedelta(hours=idle_hours, minutes=idle_minutes):
return True
return False
def get_task_id(provider):
return 'update-ingest-{0}-{1}'.format(provider.get('name'), provider.get(superdesk.config.ID_FIELD))
class UpdateIngest(superdesk.Command):
"""Update ingest providers."""
option_list = {superdesk.Option('--provider', '-p', dest='provider_name')}
def run(self, provider_name=None):
lookup = {} if not provider_name else {'name': provider_name}
for provider in superdesk.get_resource_service('ingest_providers').get(req=None, lookup=lookup):
if not is_closed(provider) and is_service_and_parser_registered(provider) and is_scheduled(provider):
kwargs = {
'provider': provider,
'rule_set': get_provider_rule_set(provider),
'routing_scheme': get_provider_routing_scheme(provider)
}
update_provider.apply_async(expires=get_task_ttl(provider), kwargs=kwargs)
@celery.task(soft_time_limit=1800, bind=True)
def update_provider(self, provider, rule_set=None, routing_scheme=None):
"""
Fetches items from ingest provider as per the configuration, ingests them into Superdesk and
updates the provider.
:param self:
:type self:
:param provider: Ingest Provider Details
:type provider: dict :py:class:`superdesk.io.ingest_provider_model.IngestProviderResource`
:param rule_set: Translation Rule Set if one is associated with Ingest Provider.
:type rule_set: dict :py:class:`apps.rules.rule_sets.RuleSetsResource`
:param routing_scheme: Routing Scheme if one is associated with Ingest Provider.
:type routing_scheme: dict :py:class:`apps.rules.routing_rules.RoutingRuleSchemeResource`
"""
lock_name = get_lock_id('ingest', provider['name'], provider[superdesk.config.ID_FIELD])
host_name = get_host_id(self)
if not lock(lock_name, host_name, expire=1800):
return
try:
feeding_service = registered_feeding_services[provider['feeding_service']]
feeding_service = feeding_service.__class__()
update = {LAST_UPDATED: utcnow()}
for items in feeding_service.update(provider):
ingest_items(items, provider, feeding_service, rule_set, routing_scheme)
stats.incr('ingest.ingested_items', len(items))
if items:
update[LAST_ITEM_UPDATE] = utcnow()
# Some Feeding Services update the collection and by this time the _etag might have been changed.
# So it's necessary to fetch it once again. Otherwise, OriginalChangedError is raised.
ingest_provider_service = superdesk.get_resource_service('ingest_providers')
provider = ingest_provider_service.find_one(req=None, _id=provider[superdesk.config.ID_FIELD])
ingest_provider_service.system_update(provider[superdesk.config.ID_FIELD], update, provider)
if LAST_ITEM_UPDATE not in update and get_is_idle(provider):
admins = superdesk.get_resource_service('users').get_users_by_user_type('administrator')
notify_and_add_activity(
ACTIVITY_EVENT,
'Provider {{name}} has gone strangely quiet. Last activity was on {{last}}',
resource='ingest_providers', user_list=admins, name=provider.get('name'),
last=provider[LAST_ITEM_UPDATE].replace(tzinfo=timezone.utc).astimezone(tz=None).strftime("%c"))
logger.info('Provider {0} updated'.format(provider[superdesk.config.ID_FIELD]))
if LAST_ITEM_UPDATE in update: # Only push a notification if there has been an update
push_notification('ingest:update', provider_id=str(provider[superdesk.config.ID_FIELD]))
finally:
unlock(lock_name, host_name)
def process_anpa_category(item, provider):
try:
anpa_categories = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories')
if anpa_categories:
for item_category in item['anpa_category']:
for anpa_category in anpa_categories['items']:
if anpa_category['is_active'] is True \
and item_category['qcode'].lower() == anpa_category['qcode'].lower():
item_category['name'] = anpa_category['name']
# make the case of the qcode match what we hold in our dictionary
item_category['qcode'] = anpa_category['qcode']
break
except Exception as ex:
raise ProviderError.anpaError(ex, provider)
def derive_category(item, provider):
"""
Assuming that the item has at least one itpc subject use the vocabulary map to derive an anpa category
:param item:
:return: An item with a category if possible
"""
try:
categories = []
subject_map = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='iptc_category_map')
if subject_map:
for entry in (map_entry for map_entry in subject_map['items'] if map_entry['is_active']):
for subject in item.get('subject', []):
if subject['qcode'] == entry['subject']:
if not any(c['qcode'] == entry['category'] for c in categories):
categories.append({'qcode': entry['category']})
if len(categories):
item['anpa_category'] = categories
process_anpa_category(item, provider)
except Exception as ex:
logger.exception(ex)
def process_iptc_codes(item, provider):
"""
Ensures that the higher level IPTC codes are present by inserting them if missing, for example
if given 15039001 (Formula One) make sure that 15039000 (motor racing) and 15000000 (sport) are there as well
:param item: A story item
:return: A story item with possible expanded subjects
"""
try:
def iptc_already_exists(code):
for entry in item['subject']:
if 'qcode' in entry and code == entry['qcode']:
return True
return False
for subject in item['subject']:
if 'qcode' in subject and len(subject['qcode']) == 8:
top_qcode = subject['qcode'][:2] + '000000'
if not iptc_already_exists(top_qcode):
item['subject'].append({'qcode': top_qcode, 'name': subject_codes[top_qcode]})
mid_qcode = subject['qcode'][:5] + '000'
if not iptc_already_exists(mid_qcode):
item['subject'].append({'qcode': mid_qcode, 'name': subject_codes[mid_qcode]})
except Exception as ex:
raise ProviderError.iptcError(ex, provider)
def derive_subject(item):
"""
Assuming that the item has an anpa category try to derive a subject using the anpa category vocabulary
:param item:
:return:
"""
try:
category_map = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories')
if category_map:
for cat in item['anpa_category']:
map_entry = next(
(code for code in category_map['items'] if code['qcode'] == cat['qcode'] and code['is_active']),
None)
if map_entry and 'subject' in map_entry:
item['subject'] = [
{'qcode': map_entry.get('subject'), 'name': subject_codes[map_entry.get('subject')]}]
except Exception as ex:
logger.exception(ex)
def apply_rule_set(item, provider, rule_set=None):
"""
Applies rules set on the item to be ingested into the system. If there's no rule set then the item will
be returned without any change.
:param item: Item to be ingested
:param provider: provider object from whom the item was received
:return: item
"""
try:
if rule_set is None and provider.get('rule_set') is not None:
rule_set = superdesk.get_resource_service('rule_sets').find_one(_id=provider['rule_set'], req=None)
if rule_set and 'body_html' in item:
body = item['body_html']
for rule in rule_set['rules']:
body = body.replace(rule['old'], rule['new'])
item['body_html'] = body
return item
except Exception as ex:
raise ProviderError.ruleError(ex, provider)
def ingest_cancel(item):
"""
Given an item that has a pubstatus of canceled finds all versions of this item and mark them as canceled as well.
Uses the URI to identify those items in ingest that are related to this cancellation.
:param item:
:return:
"""
ingest_service = superdesk.get_resource_service('ingest')
lookup = {'uri': item.get('uri')}
family_members = ingest_service.get_from_mongo(req=None, lookup=lookup)
for relative in family_members:
update = {'pubstatus': 'canceled', ITEM_STATE: CONTENT_STATE.KILLED}
ingest_service.patch(relative['_id'], update)
def ingest_items(items, provider, feeding_service, rule_set=None, routing_scheme=None):
all_items = filter_expired_items(provider, items)
items_dict = {doc[GUID_FIELD]: doc for doc in all_items}
items_in_package = []
failed_items = set()
for item in [doc for doc in all_items if doc.get(ITEM_TYPE) == CONTENT_TYPE.COMPOSITE]:
items_in_package = [ref['residRef'] for group in item.get('groups', [])
for ref in group.get('refs', []) if 'residRef' in ref]
for item in [doc for doc in all_items if doc.get(ITEM_TYPE) != CONTENT_TYPE.COMPOSITE]:
ingested = ingest_item(item, provider, feeding_service, rule_set,
routing_scheme=routing_scheme if not item[GUID_FIELD] in items_in_package else None)
if not ingested:
failed_items.add(item[GUID_FIELD])
for item in [doc for doc in all_items if doc.get(ITEM_TYPE) == CONTENT_TYPE.COMPOSITE]:
for ref in [ref for group in item.get('groups', [])
for ref in group.get('refs', []) if 'residRef' in ref]:
if ref['residRef'] in failed_items:
failed_items.add(item[GUID_FIELD])
continue
ref.setdefault('location', 'ingest')
itemRendition = items_dict.get(ref['residRef'], {}).get('renditions')
if itemRendition:
ref.setdefault('renditions', itemRendition)
ref[GUID_FIELD] = ref['residRef']
if items_dict.get(ref['residRef']):
ref['residRef'] = items_dict.get(ref['residRef'], {}).get(superdesk.config.ID_FIELD)
if item[GUID_FIELD] in failed_items:
continue
ingested = ingest_item(item, provider, feeding_service, rule_set, routing_scheme)
if not ingested:
failed_items.add(item[GUID_FIELD])
app.data._search_backend('ingest').bulk_insert('ingest', [item for item in all_items
if item[GUID_FIELD] not in failed_items])
if failed_items:
logger.error('Failed to ingest the following items: %s', failed_items)
return failed_items
def ingest_item(item, provider, feeding_service, rule_set=None, routing_scheme=None):
try:
item.setdefault(superdesk.config.ID_FIELD, generate_guid(type=GUID_NEWSML))
item[FAMILY_ID] = item[superdesk.config.ID_FIELD]
item['ingest_provider'] = str(provider[superdesk.config.ID_FIELD])
item.setdefault('source', provider.get('source', ''))
set_default_state(item, CONTENT_STATE.INGESTED)
item['expiry'] = get_expiry_date(provider.get('content_expiry', app.config['INGEST_EXPIRY_MINUTES']),
item.get('versioncreated'))
if 'anpa_category' in item:
process_anpa_category(item, provider)
if 'subject' in item:
process_iptc_codes(item, provider)
if 'anpa_category' not in item:
derive_category(item, provider)
elif 'anpa_category' in item:
derive_subject(item)
apply_rule_set(item, provider, rule_set)
ingest_service = superdesk.get_resource_service('ingest')
if item.get('ingest_provider_sequence') is None:
ingest_service.set_ingest_provider_sequence(item, provider)
old_item = ingest_service.find_one(guid=item[GUID_FIELD], req=None)
if item.get('pubstatus', '') == 'canceled':
item[ITEM_STATE] = CONTENT_STATE.KILLED
ingest_cancel(item)
rend = item.get('renditions', {})
if rend:
baseImageRend = rend.get('baseImage') or next(iter(rend.values()))
if baseImageRend:
href = feeding_service.prepare_href(baseImageRend['href'])
update_renditions(item, href, old_item)
new_version = True
if old_item:
# In case we already have the item, preserve the _id
item[superdesk.config.ID_FIELD] = old_item[superdesk.config.ID_FIELD]
ingest_service.put_in_mongo(item[superdesk.config.ID_FIELD], item)
# if the feed is versioned and this is not a new version
if 'version' in item and 'version' in old_item and item.get('version') == old_item.get('version'):
new_version = False
else:
try:
ingest_service.post_in_mongo([item])
except HTTPException as e:
logger.error("Exception while persisting item in ingest collection", e)
if routing_scheme and new_version:
routed = ingest_service.find_one(_id=item[superdesk.config.ID_FIELD], req=None)
superdesk.get_resource_service('routing_schemes').apply_routing_scheme(routed, provider, routing_scheme)
except Exception as ex:
logger.exception(ex)
try:
superdesk.app.sentry.captureException()
except:
pass
return False
return True
def update_renditions(item, href, old_item):
"""
If the old_item has renditions uploaded in to media then the old rendition details are
assigned to the item, this avoids repeatedly downloading the same image and leaving the media entries orphaned.
If there is no old_item the original is downloaded and renditions are
generated.
:param item: parsed item from source
:param href: reference to original
:param old_item: the item that we have already ingested, if it exists
:return: item with renditions
"""
inserted = []
try:
# If there is an existing set of renditions we keep those
if old_item:
media = old_item.get('renditions', {}).get('original', {}).get('media', {})
if media:
item['renditions'] = old_item['renditions']
item['mimetype'] = old_item.get('mimetype')
item['filemeta'] = old_item.get('filemeta')
return
content, filename, content_type = download_file_from_url(href)
file_type, ext = content_type.split('/')
metadata = process_file(content, file_type)
file_guid = app.media.put(content, filename, content_type, metadata)
inserted.append(file_guid)
rendition_spec = app.config.get('RENDITIONS', {}).get('picture', {})
renditions = generate_renditions(content, file_guid, inserted, file_type,
content_type, rendition_spec, url_for_media)
item['renditions'] = renditions
item['mimetype'] = content_type
item['filemeta'] = metadata
except Exception:
for file_id in inserted:
app.media.delete(file_id)
raise
superdesk.command('ingest:update', UpdateIngest())
| sivakuna-aap/superdesk-core | superdesk/io/commands/update_ingest.py | Python | agpl-3.0 | 22,300 |
from . import db
from .assoc import section_professor
class Professor(db.Model):
__tablename__ = 'professors'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), unique=True)
first_name = db.Column(db.Text, nullable=False)
last_name = db.Column(db.Text)
university_id = db.Column(db.Integer, db.ForeignKey('universities.id'), nullable=False)
university = db.relationship('University', back_populates='professors')
sections = db.relationship('Section', secondary=section_professor, back_populates='professors')
evaluations = db.relationship('Evaluation', back_populates='professor')
__mapper_args__ = {
'polymorphic_identity': 'p',
}
def to_dict(self):
return {
'id': self.id,
'first_name': self.first_name,
'last_name': self.last_name
}
| SCUEvals/scuevals-api | scuevals_api/models/professor.py | Python | agpl-3.0 | 905 |
# -*- coding: utf-8 -*-
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
from . import product
from . import stock
from . import wizard
from . import product_price_history
from . import account_anglo_saxon_pos
from . import purchase
| Elico-Corp/openerp-7.0 | stock_landed_costs/__init__.py | Python | agpl-3.0 | 303 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import zmq
import threading
import logging
import logging.handlers
import util
class Logger(threading.Thread):
"""logger for all messages and events"""
def __init__(self, stop_logging, filename='bus.log'):
super(Logger, self).__init__()
self.filename = filename
self.stop_logging = stop_logging
# receiving socket
self.context = zmq.Context.instance()
self.log_in = self.context.socket(zmq.PAIR)
self.log_in.connect("inproc://logging")
self.log_in.setsockopt(zmq.RCVTIMEO, 1000)
# logger parameters for stdout and compressed file
log_format = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
file_log_handler = util.TimedCompressingRotatingFileHandler(self.filename, when='midnight', backupCount=7)
file_log_handler.setFormatter(log_format)
stream_log_handler = logging.StreamHandler()
stream_log_handler.setFormatter(log_format)
self.logger = logging.getLogger('logger')
self.logger.setLevel(logging.INFO)
self.logger.addHandler(file_log_handler)
self.logger.addHandler(stream_log_handler)
def run(self):
while not self.stop_logging.is_set():
try:
# receive message
message = self.log_in.recv_multipart()
if len(message) > 1:
# message with content
[topic, contents] = message
self.logger.info("[msg] {%s} %s", topic, contents)
else:
# subscribe/unsubscribe
message = message[0]
topic = message[1:]
if message.startswith(b'\x00'):
# unsubscribe
self.logger.info("[unsub] {%s}", topic)
elif message.startswith(b'\x01'):
# subscribe
self.logger.info("[sub] {%s}", topic)
else:
self.logger.warning("[unknown message] %s", message)
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
self.logger.error("socket error, stopped logging")
break
elif e.errno == zmq.EAGAIN:
pass
else:
print(e)
self.logger.error("unknown error occurred during logging")
def main():
context = zmq.Context.instance()
# socket facing clients
frontend = context.socket(zmq.XSUB)
frontend.bind("tcp://*:5559")
# socket facing services
backend = context.socket(zmq.XPUB)
backend.bind("tcp://*:5560")
# log socket
log_out = context.socket(zmq.PAIR)
log_out.bind("inproc://logging")
# start logging thread
stop_logging = threading.Event()
logger = Logger(stop_logging)
logger.start()
try:
zmq.proxy(frontend, backend, log_out)
except KeyboardInterrupt:
print("shutting down")
finally:
frontend.close()
backend.close()
stop_logging.set()
logger.join()
if __name__ == "__main__":
main()
| ross128/pybus | bus.py | Python | agpl-3.0 | 2,629 |
# Copyright 2015 Antiun Ingenieria S.L. - Javier Iniesta
# Copyright 2016 Tecnativa S.L. - Vicent Cubells
# Copyright 2016 Tecnativa S.L. - Pedro M. Baeza
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo.exceptions import UserError, ValidationError
from odoo.tests import common
class TestResPartnerIndustry(common.SavepointCase):
@classmethod
def setUpClass(cls):
super(TestResPartnerIndustry, cls).setUpClass()
cls.industry_model = cls.env["res.partner.industry"]
cls.industry_main = cls.industry_model.create({"name": "Test"})
cls.industry_child = cls.industry_model.create(
{"name": "Test child", "parent_id": cls.industry_main.id}
)
def test_check_industries(self):
with self.assertRaises(ValidationError):
self.env["res.partner"].create(
{
"name": "Test",
"industry_id": self.industry_main.id,
"secondary_industry_ids": [(4, self.industry_main.id)],
}
)
def test_check_copy(self):
industry_copy = self.industry_child.copy()
self.assertEqual(industry_copy.name, "Test child 2")
def test_check_uniq_name(self):
with self.assertRaises(ValidationError):
self.industry_model.create({"name": "Test"})
def test_check_recursion(self):
with self.assertRaises(UserError):
self.industry_main.parent_id = self.industry_child.id
with self.assertRaises(ValidationError) as e:
self.industry_main._check_parent_id()
error_message = "Error! You cannot create recursive industries."
self.assertEqual(e.exception.args[0], error_message)
def test_name(self):
self.assertEqual(self.industry_child.display_name, "Test / Test child")
| OCA/partner-contact | partner_industry_secondary/tests/test_res_partner_industry.py | Python | agpl-3.0 | 1,860 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-05 09:13
from __future__ import unicode_literals
import uuid
from django.core.exceptions import FieldDoesNotExist
from django.db import migrations
def set_uuid_field(apps, schema_editor):
"""
Set a random uuid value to all existing rows in all models containing an 'uuid' attribute in database.
"""
base = apps.get_app_config('base')
for model_class in base.get_models():
ids = model_class.objects.values_list('id', flat=True)
if ids:
for pk in ids:
try:
model_class.objects.filter(pk=pk).update(uuid=uuid.uuid4())
except FieldDoesNotExist:
break
class Migration(migrations.Migration):
dependencies = [
('base', '0062_add_uuid_field'),
]
operations = [
migrations.RunPython(set_uuid_field, elidable=True, reverse_code=migrations.RunPython.noop),
]
| uclouvain/osis | base/migrations/0063_populate_uuid_values.py | Python | agpl-3.0 | 969 |
"""
Urls for idea app
"""
from django.conf.urls import url
from openedx.features.idea.api_views import FavoriteAPIView
from openedx.features.idea.views import ChallengeLandingView, IdeaCreateView, IdeaDetailView, IdeaListingView
urlpatterns = [
url(
r'^overview/$',
ChallengeLandingView.as_view(),
name='challenge-landing'
),
url(
r'^$',
IdeaListingView.as_view(),
name='idea-listing'
),
url(
r'^create/$',
IdeaCreateView.as_view(),
name='idea-create'
),
url(
r'^(?P<pk>[0-9]+)/$',
IdeaDetailView.as_view(),
name='idea-details'
),
url(
r'^api/favorite/(?P<idea_id>[0-9]+)/$',
FavoriteAPIView.as_view(),
name='mark-favorite-api-view'
)
]
| philanthropy-u/edx-platform | openedx/features/idea/urls.py | Python | agpl-3.0 | 799 |
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
import clv_medicament_template_wkf
| CLVsol/odoo_addons | clv_medicament_template/wkf/__init__.py | Python | agpl-3.0 | 1,439 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.21 on 2020-04-21 06:55
from __future__ import unicode_literals
from django.db import migrations
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('idea', '0002_idea_favorites'),
]
operations = [
migrations.AddField(
model_name='idea',
name='created',
field=model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created'),
),
migrations.AddField(
model_name='idea',
name='modified',
field=model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified'),
),
]
| philanthropy-u/edx-platform | openedx/features/idea/migrations/0003_auto_20200421_0255.py | Python | agpl-3.0 | 804 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Numdiff(AutotoolsPackage):
"""Numdiff is a little program that can be used to compare putatively
similar files line by line and field by field, ignoring small numeric
differences or/and different numeric formats."""
homepage = 'https://www.nongnu.org/numdiff'
url = 'http://nongnu.askapache.com/numdiff/numdiff-5.8.1.tar.gz'
maintainers = ['davydden']
version('5.9.0', '794461a7285d8b9b1f2c4a8149889ea6')
version('5.8.1', 'a295eb391f6cb1578209fc6b4f9d994e')
variant('nls', default=False,
description="Enable Natural Language Support")
variant('gmp', default=False,
description="Use GNU Multiple Precision Arithmetic Library")
depends_on('gettext', when='+nls')
depends_on('gmp', when='+gmp')
def configure_args(self):
spec = self.spec
args = []
if '+nls' in spec:
args.append('--enable-nls')
else:
args.append('--disable-nls')
if '+gmp' in spec:
# compile with -O0 as per upstream known issue with optimization
# and GMP; https://launchpad.net/ubuntu/+source/numdiff/+changelog
# http://www.nongnu.org/numdiff/#issues
# keep this variant off by default as one still encounter
# GNU MP: Cannot allocate memory (size=2305843009206983184)
args.extend([
'--enable-gmp',
'CFLAGS=-O0'
])
else:
args.append('--disable-gmp')
return args
| skosukhin/spack | var/spack/repos/builtin/packages/numdiff/package.py | Python | lgpl-2.1 | 2,791 |
"""
httpdatehelper
==============
:Module: pyfileserver.httpdatehelper
:Author: Ho Chun Wei, fuzzybr80(at)gmail.com
:Project: PyFileServer, http://pyfilesync.berlios.de/
:Copyright: Lesser GNU Public License, see LICENSE file attached with package
HTTP dates helper - an assorted library of helpful date functions:
* getstrftime(secs) - returns the rfc 1123 date/time format of secs, where secs is the number
of seconds since the epoch. if secs is not given, the current system time is used
* getsecstime(timetypestring) - returns as the number of seconds since the epoch, the date/time
described in timetypestring. Returns None for invalid input
* getgmtime(timetypestring) - returns as a standard time tuple (see time and calendar), the date/time
described in timetypestring. Returns None for invalid input
The following time type strings are supported by getsecstime() and getgmtime()::
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
"""
__docformat__ = 'reStructuredText'
import calendar
import time
def getstrftime(secs=None):
# rfc 1123 date/time format
return time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(secs))
def getsecstime(timeformat):
result = getgmtime(timeformat)
if result:
return calendar.timegm(result)
else:
return None
def getgmtime(timeformat):
# Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
try:
vtime = time.strptime(timeformat, "%a, %d %b %Y %H:%M:%S GMT")
return vtime
except:
pass
# Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
try:
vtime = time.strptime(timeformat, "%A %d-%b-%y %H:%M:%S GMT")
return vtime
except:
pass
# Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
try:
vtime = time.strptime(timeformat, "%a %b %d %H:%M:%S %Y")
return vtime
except:
pass
return None
| cwho/pyfileserver | PyFileServer/pyfileserver/httpdatehelper.py | Python | lgpl-2.1 | 2,135 |
#!/usr/bin/python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# LVHDoFCoESR: LVHD over Fibre Channel over Ethernet driver
#
import SR
import LVHDoHBASR
import LVHDSR
import SRCommand
import sys
import xs_errors
import util
CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_METADATA", "SR_TRIM",
"VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH",
"VDI_GENERATE_CONFIG", "VDI_SNAPSHOT", "VDI_CLONE",
"VDI_RESIZE", "ATOMIC_PAUSE", "VDI_RESET_ON_BOOT/2",
"VDI_UPDATE"]
CONFIGURATION = [['SCSIid', 'The scsi_id of the destination LUN'],
['allocation', 'Valid values are thick or thin(optional,\
defaults to thick)']]
DRIVER_INFO = {
'name': 'LVHD over FCoE',
'description': 'SR plugin which represents disks as VHDs on Logical \
Volumes within a Volume Group created on a FCoE LUN',
'vendor': 'Citrix Systems Inc',
'copyright': '(C) 2015 Citrix Systems Inc',
'driver_version': '1.0',
'required_api_version': '1.0',
'capabilities': CAPABILITIES,
'configuration': CONFIGURATION
}
class LVHDoFCoESR(LVHDoHBASR.LVHDoHBASR):
"""LVHD over FCoE storage repository"""
def handles(type):
if __name__ == '__main__':
name = sys.argv[0]
else:
name = __name__
if name.endswith("LVMoFCoESR"):
return type == "lvmofcoe" # for the initial switch from LVM
if type == "lvhdofcoe":
return True
return False
handles = staticmethod(handles)
def load(self, sr_uuid):
driver = SR.driver('hba')
if 'type' not in self.original_srcmd.params['device_config'] or \
'type' in self.original_srcmd.params['device_config'] and \
self.original_srcmd.dconf['type'] == "any":
self.original_srcmd.dconf['type'] = "fcoe"
self.hbasr = driver(self.original_srcmd, sr_uuid)
pbd = None
try:
pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref)
except:
pass
if not self.dconf.has_key('SCSIid') or not self.dconf['SCSIid']:
print >>sys.stderr, self.hbasr.print_devs()
raise xs_errors.XenError('ConfigSCSIid')
self.SCSIid = self.dconf['SCSIid']
self._pathrefresh(LVHDoFCoESR)
LVHDSR.LVHDSR.load(self, sr_uuid)
def vdi(self, uuid):
return LVHDoFCoEVDI(self, uuid)
class LVHDoFCoEVDI(LVHDoHBASR.LVHDoHBAVDI):
pass
if __name__ == '__main__':
SRCommand.run(LVHDoFCoESR, DRIVER_INFO)
else:
SR.registerSR(LVHDoFCoESR)
| pritha-srivastava/sm | drivers/LVHDoFCoESR.py | Python | lgpl-2.1 | 3,290 |
# -*- coding: utf-8 -*-
from pysignfe.xml_sped import *
from .ConsultarSituacaoLoteRps import ListaMensagemRetorno
from .Rps import IdentificacaoPrestador, IdentificacaoRps
from .Nfse import CompNfse
import os
DIRNAME = os.path.dirname(__file__)
class ConsultarLoteRpsEnvio(XMLNFe):
def __init__(self):
super(ConsultarLoteRpsEnvio, self).__init__()
self.versao = TagDecimal(nome=u'ConsultarLoteRpsEnvio', propriedade=u'versao', namespace=NAMESPACE_NFSE, valor=u'1.00', raiz=u'/')
self.Prestador = IdentificacaoPrestador()
self.Protocolo = TagCaracter(nome=u'Protocolo', tamanho=[ 1, 50], raiz=u'/')
self.caminho_esquema = os.path.join(DIRNAME, u'schema/')
self.arquivo_esquema = u'nfse.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<ConsultarLoteRpsEnvio xmlns="'+ NAMESPACE_NFSE + '">'
xml += self.Prestador.xml.replace(ABERTURA, u'')
xml += self.Protocolo.xml
xml += u'</ConsultarLoteRpsEnvio>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.Prestador.xml = arquivo
self.Protocolo.xml = arquivo
xml = property(get_xml, set_xml)
class ConsultarLoteRpsResposta(XMLNFe):
def __init__(self):
super(ConsultarLoteRpsResposta, self).__init__()
self.CompNfse = []
self.ListaMensagemRetorno = ListaMensagemRetorno()
self.caminho_esquema = os.path.join(DIRNAME, u'schema/')
self.arquivo_esquema = u'nfse.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<ConsultarLoteRpsResposta xmlns="'+ NAMESPACE_NFSE + '">'
if len(self.ListaMensagemRetorno.MensagemRetorno) != 0:
xml += self.ListaMensagemRetorno.xml.replace(ABERTURA, u'')
else:
xml += u'<ListaNfse>'
for c in self.CompNfse:
xml += tira_abertura(c.xml)
xml += u'</ListaNfse>'
xml += u'</ConsultarLoteRpsResposta>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CompNfse = self.le_grupo('[nfse]//ConsultarLoteRpsResposta/CompNfse', CompNfse)
self.ListaMensagemRetorno.xml = arquivo
xml = property(get_xml, set_xml) | thiagopena/PySIGNFe | pysignfe/nfse/bhiss/v10/ConsultarLoteRps.py | Python | lgpl-2.1 | 2,448 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Fastqvalidator(MakefilePackage):
"""The fastQValidator validates the format of fastq files."""
homepage = "http://genome.sph.umich.edu/wiki/FastQValidator"
url = "https://github.com/statgen/fastQValidator/archive/v0.1.1a.tar.gz"
version('2017-01-10', commit='6d619a34749e9d33c34ef0d3e0e87324ca77f320',
git='https://github.com/statgen/fastQValidator.git')
resource(
name='libStatGen',
git='https://github.com/statgen/libStatGen.git',
commit='9db9c23e176a6ce6f421a3c21ccadedca892ac0c'
)
@property
def build_targets(self):
return ['LIB_PATH_GENERAL={0}'.format(
join_path(self.stage.source_path, 'libStatGen'))]
@property
def install_targets(self):
return [
'INSTALLDIR={0}'.format(self.prefix.bin),
'LIB_PATH_GENERAL={0}'.format(
join_path(self.stage.source_path, 'libStatGen')),
'install'
]
| lgarren/spack | var/spack/repos/builtin/packages/fastqvalidator/package.py | Python | lgpl-2.1 | 2,230 |
# -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2011-2012 Johan Dahlin <johan@gnome.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
"""
PyGTK compatibility layer.
This modules goes a little bit longer to maintain PyGTK compatibility than
the normal overrides system.
It is recommended to not depend on this layer, but only use it as an
intermediate step when porting your application to PyGI.
Compatibility might never be 100%, but the aim is to make it possible to run
a well behaved PyGTK application mostly unmodified on top of PyGI.
"""
import sys
import warnings
try:
# Python 3
from collections import UserList
from imp import reload
except ImportError:
# Python 2 ships that in a different module
from UserList import UserList
import gi
from gi.repository import GObject
def _install_enums(module, dest=None, strip=''):
if dest is None:
dest = module
modname = dest.__name__.rsplit('.', 1)[1].upper()
for attr in dir(module):
try:
obj = getattr(module, attr, None)
except:
continue
try:
if issubclass(obj, GObject.GEnum):
for value, enum in obj.__enum_values__.items():
name = enum.value_name
name = name.replace(modname + '_', '')
if strip and name.startswith(strip):
name = name[len(strip):]
setattr(dest, name, enum)
except TypeError:
continue
try:
if issubclass(obj, GObject.GFlags):
for value, flag in obj.__flags_values__.items():
name = flag.value_names[-1].replace(modname + '_', '')
setattr(dest, name, flag)
except TypeError:
continue
def enable():
# gobject
from gi.repository import GLib
sys.modules['glib'] = GLib
# gobject
from gi.repository import GObject
sys.modules['gobject'] = GObject
from gi._gobject import propertyhelper
sys.modules['gobject.propertyhelper'] = propertyhelper
# gio
from gi.repository import Gio
sys.modules['gio'] = Gio
_unset = object()
def enable_gtk(version='2.0'):
# set the default encoding like PyGTK
reload(sys)
if sys.version_info < (3,0):
sys.setdefaultencoding('utf-8')
# atk
gi.require_version('Atk', '1.0')
from gi.repository import Atk
sys.modules['atk'] = Atk
_install_enums(Atk)
# pango
gi.require_version('Pango', '1.0')
from gi.repository import Pango
sys.modules['pango'] = Pango
_install_enums(Pango)
# pangocairo
gi.require_version('PangoCairo', '1.0')
from gi.repository import PangoCairo
sys.modules['pangocairo'] = PangoCairo
# gdk
gi.require_version('Gdk', version)
gi.require_version('GdkPixbuf', '2.0')
from gi.repository import Gdk
from gi.repository import GdkPixbuf
sys.modules['gtk.gdk'] = Gdk
_install_enums(Gdk)
_install_enums(GdkPixbuf, dest=Gdk)
Gdk._2BUTTON_PRESS = 5
Gdk.BUTTON_PRESS = 4
Gdk.screen_get_default = Gdk.Screen.get_default
Gdk.Pixbuf = GdkPixbuf.Pixbuf
Gdk.pixbuf_new_from_file = GdkPixbuf.Pixbuf.new_from_file
Gdk.PixbufLoader = GdkPixbuf.PixbufLoader.new_with_type
orig_get_frame_extents = Gdk.Window.get_frame_extents
def get_frame_extents(window):
try:
try:
rect = Gdk.Rectangle(0, 0, 0, 0)
except TypeError:
rect = Gdk.Rectangle()
orig_get_frame_extents(window, rect)
except TypeError:
rect = orig_get_frame_extents(window)
return rect
Gdk.Window.get_frame_extents = get_frame_extents
orig_get_origin = Gdk.Window.get_origin
def get_origin(self):
return orig_get_origin(self)[1:]
Gdk.Window.get_origin = get_origin
# gtk
gi.require_version('Gtk', version)
from gi.repository import Gtk
sys.modules['gtk'] = Gtk
Gtk.gdk = Gdk
Gtk.pygtk_version = (2, 99, 0)
Gtk.gtk_version = (Gtk.MAJOR_VERSION,
Gtk.MINOR_VERSION,
Gtk.MICRO_VERSION)
_install_enums(Gtk)
# Action
def set_tool_item_type(menuaction, gtype):
warnings.warn('set_tool_item_type() is not supported',
DeprecationWarning, stacklevel=2)
Gtk.Action.set_tool_item_type = classmethod(set_tool_item_type)
# Alignment
orig_Alignment = Gtk.Alignment
class Alignment(orig_Alignment):
def __init__(self, xalign=0.0, yalign=0.0, xscale=0.0, yscale=0.0):
orig_Alignment.__init__(self)
self.props.xalign = xalign
self.props.yalign = yalign
self.props.xscale = xscale
self.props.yscale = yscale
Gtk.Alignment = Alignment
# Box
orig_pack_end = Gtk.Box.pack_end
def pack_end(self, child, expand=True, fill=True, padding=0):
orig_pack_end(self, child, expand, fill, padding)
Gtk.Box.pack_end = pack_end
orig_pack_start = Gtk.Box.pack_start
def pack_start(self, child, expand=True, fill=True, padding=0):
orig_pack_start(self, child, expand, fill, padding)
Gtk.Box.pack_start = pack_start
# TreeViewColumn
orig_tree_view_column_pack_end = Gtk.TreeViewColumn.pack_end
def tree_view_column_pack_end(self, cell, expand=True):
orig_tree_view_column_pack_end(self, cell, expand)
Gtk.TreeViewColumn.pack_end = tree_view_column_pack_end
orig_tree_view_column_pack_start = Gtk.TreeViewColumn.pack_start
def tree_view_column_pack_start(self, cell, expand=True):
orig_tree_view_column_pack_start(self, cell, expand)
Gtk.TreeViewColumn.pack_start = tree_view_column_pack_start
# TreeView
def insert_column_with_attributes(view, position, title, cell, *args, **kwargs):
pass
Gtk.TreeView.insert_column_with_attributes = insert_column_with_attributes
# CellLayout
orig_cell_pack_end = Gtk.CellLayout.pack_end
def cell_pack_end(self, cell, expand=True):
orig_cell_pack_end(self, cell, expand)
Gtk.CellLayout.pack_end = cell_pack_end
orig_cell_pack_start = Gtk.CellLayout.pack_start
def cell_pack_start(self, cell, expand=True):
orig_cell_pack_start(self, cell, expand)
Gtk.CellLayout.pack_start = cell_pack_start
orig_set_cell_data_func = Gtk.CellLayout.set_cell_data_func
def set_cell_data_func(self, cell, func, user_data=_unset):
def callback(*args):
if args[-1] == _unset:
args = args[:-1]
return func(*args)
orig_set_cell_data_func(self, cell, callback, user_data)
Gtk.CellLayout.set_cell_data_func = set_cell_data_func
# CellRenderer
class GenericCellRenderer(Gtk.CellRenderer):
pass
Gtk.GenericCellRenderer = GenericCellRenderer
# ComboBox
orig_combo_row_separator_func = Gtk.ComboBox.set_row_separator_func
def combo_row_separator_func(self, func, user_data=_unset):
def callback(*args):
if args[-1] == _unset:
args = args[:-1]
return func(*args)
orig_combo_row_separator_func(self, callback, user_data)
Gtk.ComboBox.set_row_separator_func = combo_row_separator_func
# ComboBoxEntry
class ComboBoxEntry(Gtk.ComboBox):
def __init__(self, **kwds):
Gtk.ComboBox.__init__(self, has_entry=True, **kwds)
def set_text_column (self, text_column):
self.set_entry_text_column(text_column)
def get_text_column (self):
return self.get_entry_text_column()
Gtk.ComboBoxEntry = ComboBoxEntry
def combo_box_entry_new():
return Gtk.ComboBoxEntry()
Gtk.combo_box_entry_new = combo_box_entry_new
def combo_box_entry_new_with_model(model):
return Gtk.ComboBoxEntry(model=model)
Gtk.combo_box_entry_new_with_model = combo_box_entry_new_with_model
# Container
def install_child_property(container, flag, pspec):
warnings.warn('install_child_property() is not supported',
DeprecationWarning, stacklevel=2)
Gtk.Container.install_child_property = classmethod(install_child_property)
def new_text():
combo = Gtk.ComboBox()
model = Gtk.ListStore(str)
combo.set_model(model)
combo.set_entry_text_column(0)
return combo
Gtk.combo_box_new_text = new_text
def append_text(self, text):
model = self.get_model()
model.append([text])
Gtk.ComboBox.append_text = append_text
Gtk.expander_new_with_mnemonic = Gtk.Expander.new_with_mnemonic
Gtk.icon_theme_get_default = Gtk.IconTheme.get_default
Gtk.image_new_from_pixbuf = Gtk.Image.new_from_pixbuf
Gtk.image_new_from_stock = Gtk.Image.new_from_stock
Gtk.settings_get_default = Gtk.Settings.get_default
Gtk.window_set_default_icon = Gtk.Window.set_default_icon
# Scale
orig_HScale = Gtk.HScale
orig_VScale = Gtk.VScale
class HScale(orig_HScale):
def __init__(self, adjustment=None):
orig_HScale.__init__(self, adjustment=adjustment)
Gtk.HScale = HScale
class VScale(orig_VScale):
def __init__(self, adjustment=None):
orig_VScale.__init__(self, adjustment=adjustment)
Gtk.VScale = VScale
Gtk.stock_add = lambda items: None
# Widget
Gtk.widget_get_default_direction = Gtk.Widget.get_default_direction
orig_size_request = Gtk.Widget.size_request
def size_request(widget):
class SizeRequest(UserList):
def __init__(self, req):
self.height = req.height
self.width = req.width
UserList.__init__(self, [self.width,
self.height])
return SizeRequest(orig_size_request(widget))
Gtk.Widget.size_request = size_request
Gtk.Widget.hide_all = Gtk.Widget.hide
class BaseGetter(object):
def __init__(self, context):
self.context = context
def __getitem__(self, state):
color = self.context.get_background_color(state)
return Gdk.Color(red=color.red,
green=color.green,
blue=color.blue)
class Styles(object):
def __init__(self, widget):
context = widget.get_style_context()
self.base = BaseGetter(context)
self.black = Gdk.Color(red=0, green=0, blue=0)
class StyleDescriptor(object):
def __get__(self, instance, class_):
return Styles(instance)
Gtk.Widget.style = StyleDescriptor()
# gtk.unixprint
class UnixPrint(object):
pass
unixprint = UnixPrint()
sys.modules['gtkunixprint'] = unixprint
# gtk.keysyms
class Keysyms(object):
pass
keysyms = Keysyms()
sys.modules['gtk.keysyms'] = keysyms
Gtk.keysyms = keysyms
for name in dir(Gdk):
if name.startswith('KEY_'):
target = name[4:]
if target[0] in '0123456789':
target = '_' + target
value = getattr(Gdk, name)
setattr(keysyms, target, value)
def enable_vte():
gi.require_version('Vte', '0.0')
from gi.repository import Vte
sys.modules['vte'] = Vte
def enable_poppler():
gi.require_version('Poppler', '0.18')
from gi.repository import Poppler
sys.modules['poppler'] = Poppler
Poppler.pypoppler_version = (1, 0, 0)
def enable_webkit(version='1.0'):
gi.require_version('WebKit', version)
from gi.repository import WebKit
sys.modules['webkit'] = WebKit
WebKit.WebView.get_web_inspector = WebKit.WebView.get_inspector
def enable_gudev():
gi.require_version('GUdev', '1.0')
from gi.repository import GUdev
sys.modules['gudev'] = GUdev
def enable_gst():
gi.require_version('Gst', '0.10')
from gi.repository import Gst
sys.modules['gst'] = Gst
_install_enums(Gst)
Gst.registry_get_default = Gst.Registry.get_default
Gst.element_register = Gst.Element.register
Gst.element_factory_make = Gst.ElementFactory.make
Gst.caps_new_any = Gst.Caps.new_any
Gst.get_pygst_version = lambda : (0, 10, 19)
Gst.get_gst_version = lambda : (0, 10, 40)
from gi.repository import GstInterfaces
sys.modules['gst.interfaces'] = GstInterfaces
_install_enums(GstInterfaces)
from gi.repository import GstAudio
sys.modules['gst.audio'] = GstAudio
_install_enums(GstAudio)
from gi.repository import GstVideo
sys.modules['gst.video'] = GstVideo
_install_enums(GstVideo)
from gi.repository import GstBase
sys.modules['gst.base'] = GstBase
_install_enums(GstBase)
Gst.BaseTransform = GstBase.BaseTransform
Gst.BaseSink = GstBase.BaseSink
from gi.repository import GstController
sys.modules['gst.controller'] = GstController
_install_enums(GstController, dest=Gst)
from gi.repository import GstPbutils
sys.modules['gst.pbutils'] = GstPbutils
_install_enums(GstPbutils)
def enable_goocanvas():
gi.require_version('GooCanvas', '2.0')
from gi.repository import GooCanvas
sys.modules['goocanvas'] = GooCanvas
_install_enums(GooCanvas, strip='GOO_CANVAS_')
GooCanvas.ItemSimple = GooCanvas.CanvasItemSimple
GooCanvas.Item = GooCanvas.CanvasItem
GooCanvas.Image = GooCanvas.CanvasImage
GooCanvas.Group = GooCanvas.CanvasGroup
GooCanvas.Rect = GooCanvas.CanvasRect
| ceibal-tatu/pygobject | gi/pygtkcompat.py | Python | lgpl-2.1 | 14,279 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyBsddb3(PythonPackage):
"""This module provides a nearly complete wrapping of the Oracle/Sleepycat
C API for the Database Environment, Database, Cursor, Log Cursor,
Sequence and Transaction objects, and each of these is exposed
as a Python type in the bsddb3.db module."""
homepage = "https://pypi.python.org/pypi/bsddb3/6.2.5"
url = "https://pypi.io/packages/source/b/bsddb3/bsddb3-6.2.5.tar.gz"
version('6.2.5', '610267c189964c905a931990e1ba438c')
depends_on('python@2.6:')
depends_on('py-setuptools')
| EmreAtes/spack | var/spack/repos/builtin/packages/py-bsddb3/package.py | Python | lgpl-2.1 | 1,821 |
from collections.abc import Sequence, Iterable
from functools import total_ordering
import fnmatch
import linecache
import os.path
import pickle
# Import types and functions implemented in C
from _tracemalloc import *
from _tracemalloc import _get_object_traceback, _get_traces
def _format_size(size, sign):
for unit in ('B', 'KiB', 'MiB', 'GiB', 'TiB'):
if abs(size) < 100 and unit != 'B':
# 3 digits (xx.x UNIT)
if sign:
return "%+.1f %s" % (size, unit)
else:
return "%.1f %s" % (size, unit)
if abs(size) < 10 * 1024 or unit == 'TiB':
# 4 or 5 digits (xxxx UNIT)
if sign:
return "%+.0f %s" % (size, unit)
else:
return "%.0f %s" % (size, unit)
size /= 1024
class Statistic:
"""
Statistic difference on memory allocations between two Snapshot instance.
"""
__slots__ = ('traceback', 'size', 'count')
def __init__(self, traceback, size, count):
self.traceback = traceback
self.size = size
self.count = count
def __hash__(self):
return hash((self.traceback, self.size, self.count))
def __eq__(self, other):
return (self.traceback == other.traceback
and self.size == other.size
and self.count == other.count)
def __str__(self):
text = ("%s: size=%s, count=%i"
% (self.traceback,
_format_size(self.size, False),
self.count))
if self.count:
average = self.size / self.count
text += ", average=%s" % _format_size(average, False)
return text
def __repr__(self):
return ('<Statistic traceback=%r size=%i count=%i>'
% (self.traceback, self.size, self.count))
def _sort_key(self):
return (self.size, self.count, self.traceback)
class StatisticDiff:
"""
Statistic difference on memory allocations between an old and a new
Snapshot instance.
"""
__slots__ = ('traceback', 'size', 'size_diff', 'count', 'count_diff')
def __init__(self, traceback, size, size_diff, count, count_diff):
self.traceback = traceback
self.size = size
self.size_diff = size_diff
self.count = count
self.count_diff = count_diff
def __hash__(self):
return hash((self.traceback, self.size, self.size_diff,
self.count, self.count_diff))
def __eq__(self, other):
return (self.traceback == other.traceback
and self.size == other.size
and self.size_diff == other.size_diff
and self.count == other.count
and self.count_diff == other.count_diff)
def __str__(self):
text = ("%s: size=%s (%s), count=%i (%+i)"
% (self.traceback,
_format_size(self.size, False),
_format_size(self.size_diff, True),
self.count,
self.count_diff))
if self.count:
average = self.size / self.count
text += ", average=%s" % _format_size(average, False)
return text
def __repr__(self):
return ('<StatisticDiff traceback=%r size=%i (%+i) count=%i (%+i)>'
% (self.traceback, self.size, self.size_diff,
self.count, self.count_diff))
def _sort_key(self):
return (abs(self.size_diff), self.size,
abs(self.count_diff), self.count,
self.traceback)
def _compare_grouped_stats(old_group, new_group):
statistics = []
for traceback, stat in new_group.items():
previous = old_group.pop(traceback, None)
if previous is not None:
stat = StatisticDiff(traceback,
stat.size, stat.size - previous.size,
stat.count, stat.count - previous.count)
else:
stat = StatisticDiff(traceback,
stat.size, stat.size,
stat.count, stat.count)
statistics.append(stat)
for traceback, stat in old_group.items():
stat = StatisticDiff(traceback, 0, -stat.size, 0, -stat.count)
statistics.append(stat)
return statistics
@total_ordering
class Frame:
"""
Frame of a traceback.
"""
__slots__ = ("_frame",)
def __init__(self, frame):
# frame is a tuple: (filename: str, lineno: int)
self._frame = frame
@property
def filename(self):
return self._frame[0]
@property
def lineno(self):
return self._frame[1]
def __eq__(self, other):
return (self._frame == other._frame)
def __lt__(self, other):
return (self._frame < other._frame)
def __hash__(self):
return hash(self._frame)
def __str__(self):
return "%s:%s" % (self.filename, self.lineno)
def __repr__(self):
return "<Frame filename=%r lineno=%r>" % (self.filename, self.lineno)
@total_ordering
class Traceback(Sequence):
"""
Sequence of Frame instances sorted from the oldest frame
to the most recent frame.
"""
__slots__ = ("_frames",)
def __init__(self, frames):
Sequence.__init__(self)
# frames is a tuple of frame tuples: see Frame constructor for the
# format of a frame tuple; it is reversed, because _tracemalloc
# returns frames sorted from most recent to oldest, but the
# Python API expects oldest to most recent
self._frames = tuple(reversed(frames))
def __len__(self):
return len(self._frames)
def __getitem__(self, index):
if isinstance(index, slice):
return tuple(Frame(trace) for trace in self._frames[index])
else:
return Frame(self._frames[index])
def __contains__(self, frame):
return frame._frame in self._frames
def __hash__(self):
return hash(self._frames)
def __eq__(self, other):
return (self._frames == other._frames)
def __lt__(self, other):
return (self._frames < other._frames)
def __str__(self):
return str(self[0])
def __repr__(self):
return "<Traceback %r>" % (tuple(self),)
def format(self, limit=None, most_recent_first=False):
lines = []
if limit is not None:
if limit > 0:
frame_slice = self[-limit:]
else:
frame_slice = self[:limit]
else:
frame_slice = self
if most_recent_first:
frame_slice = reversed(frame_slice)
for frame in frame_slice:
lines.append(' File "%s", line %s'
% (frame.filename, frame.lineno))
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
lines.append(' %s' % line)
return lines
def get_object_traceback(obj):
"""
Get the traceback where the Python object *obj* was allocated.
Return a Traceback instance.
Return None if the tracemalloc module is not tracing memory allocations or
did not trace the allocation of the object.
"""
frames = _get_object_traceback(obj)
if frames is not None:
return Traceback(frames)
else:
return None
class Trace:
"""
Trace of a memory block.
"""
__slots__ = ("_trace",)
def __init__(self, trace):
# trace is a tuple: (domain: int, size: int, traceback: tuple).
# See Traceback constructor for the format of the traceback tuple.
self._trace = trace
@property
def domain(self):
return self._trace[0]
@property
def size(self):
return self._trace[1]
@property
def traceback(self):
return Traceback(self._trace[2])
def __eq__(self, other):
return (self._trace == other._trace)
def __hash__(self):
return hash(self._trace)
def __str__(self):
return "%s: %s" % (self.traceback, _format_size(self.size, False))
def __repr__(self):
return ("<Trace domain=%s size=%s, traceback=%r>"
% (self.domain, _format_size(self.size, False), self.traceback))
class _Traces(Sequence):
def __init__(self, traces):
Sequence.__init__(self)
# traces is a tuple of trace tuples: see Trace constructor
self._traces = traces
def __len__(self):
return len(self._traces)
def __getitem__(self, index):
if isinstance(index, slice):
return tuple(Trace(trace) for trace in self._traces[index])
else:
return Trace(self._traces[index])
def __contains__(self, trace):
return trace._trace in self._traces
def __eq__(self, other):
return (self._traces == other._traces)
def __repr__(self):
return "<Traces len=%s>" % len(self)
def _normalize_filename(filename):
filename = os.path.normcase(filename)
if filename.endswith('.pyc'):
filename = filename[:-1]
return filename
class BaseFilter:
def __init__(self, inclusive):
self.inclusive = inclusive
def _match(self, trace):
raise NotImplementedError
class Filter(BaseFilter):
def __init__(self, inclusive, filename_pattern,
lineno=None, all_frames=False, domain=None):
super().__init__(inclusive)
self.inclusive = inclusive
self._filename_pattern = _normalize_filename(filename_pattern)
self.lineno = lineno
self.all_frames = all_frames
self.domain = domain
@property
def filename_pattern(self):
return self._filename_pattern
def _match_frame_impl(self, filename, lineno):
filename = _normalize_filename(filename)
if not fnmatch.fnmatch(filename, self._filename_pattern):
return False
if self.lineno is None:
return True
else:
return (lineno == self.lineno)
def _match_frame(self, filename, lineno):
return self._match_frame_impl(filename, lineno) ^ (not self.inclusive)
def _match_traceback(self, traceback):
if self.all_frames:
if any(self._match_frame_impl(filename, lineno)
for filename, lineno in traceback):
return self.inclusive
else:
return (not self.inclusive)
else:
filename, lineno = traceback[0]
return self._match_frame(filename, lineno)
def _match(self, trace):
domain, size, traceback = trace
res = self._match_traceback(traceback)
if self.domain is not None:
if self.inclusive:
return res and (domain == self.domain)
else:
return res or (domain != self.domain)
return res
class DomainFilter(BaseFilter):
def __init__(self, inclusive, domain):
super().__init__(inclusive)
self._domain = domain
@property
def domain(self):
return self._domain
def _match(self, trace):
domain, size, traceback = trace
return (domain == self.domain) ^ (not self.inclusive)
class Snapshot:
"""
Snapshot of traces of memory blocks allocated by Python.
"""
def __init__(self, traces, traceback_limit):
# traces is a tuple of trace tuples: see _Traces constructor for
# the exact format
self.traces = _Traces(traces)
self.traceback_limit = traceback_limit
def dump(self, filename):
"""
Write the snapshot into a file.
"""
with open(filename, "wb") as fp:
pickle.dump(self, fp, pickle.HIGHEST_PROTOCOL)
@staticmethod
def load(filename):
"""
Load a snapshot from a file.
"""
with open(filename, "rb") as fp:
return pickle.load(fp)
def _filter_trace(self, include_filters, exclude_filters, trace):
if include_filters:
if not any(trace_filter._match(trace)
for trace_filter in include_filters):
return False
if exclude_filters:
if any(not trace_filter._match(trace)
for trace_filter in exclude_filters):
return False
return True
def filter_traces(self, filters):
"""
Create a new Snapshot instance with a filtered traces sequence, filters
is a list of Filter or DomainFilter instances. If filters is an empty
list, return a new Snapshot instance with a copy of the traces.
"""
if not isinstance(filters, Iterable):
raise TypeError("filters must be a list of filters, not %s"
% type(filters).__name__)
if filters:
include_filters = []
exclude_filters = []
for trace_filter in filters:
if trace_filter.inclusive:
include_filters.append(trace_filter)
else:
exclude_filters.append(trace_filter)
new_traces = [trace for trace in self.traces._traces
if self._filter_trace(include_filters,
exclude_filters,
trace)]
else:
new_traces = self.traces._traces.copy()
return Snapshot(new_traces, self.traceback_limit)
def _group_by(self, key_type, cumulative):
if key_type not in ('traceback', 'filename', 'lineno'):
raise ValueError("unknown key_type: %r" % (key_type,))
if cumulative and key_type not in ('lineno', 'filename'):
raise ValueError("cumulative mode cannot by used "
"with key type %r" % key_type)
stats = {}
tracebacks = {}
if not cumulative:
for trace in self.traces._traces:
domain, size, trace_traceback = trace
try:
traceback = tracebacks[trace_traceback]
except KeyError:
if key_type == 'traceback':
frames = trace_traceback
elif key_type == 'lineno':
frames = trace_traceback[:1]
else: # key_type == 'filename':
frames = ((trace_traceback[0][0], 0),)
traceback = Traceback(frames)
tracebacks[trace_traceback] = traceback
try:
stat = stats[traceback]
stat.size += size
stat.count += 1
except KeyError:
stats[traceback] = Statistic(traceback, size, 1)
else:
# cumulative statistics
for trace in self.traces._traces:
domain, size, trace_traceback = trace
for frame in trace_traceback:
try:
traceback = tracebacks[frame]
except KeyError:
if key_type == 'lineno':
frames = (frame,)
else: # key_type == 'filename':
frames = ((frame[0], 0),)
traceback = Traceback(frames)
tracebacks[frame] = traceback
try:
stat = stats[traceback]
stat.size += size
stat.count += 1
except KeyError:
stats[traceback] = Statistic(traceback, size, 1)
return stats
def statistics(self, key_type, cumulative=False):
"""
Group statistics by key_type. Return a sorted list of Statistic
instances.
"""
grouped = self._group_by(key_type, cumulative)
statistics = list(grouped.values())
statistics.sort(reverse=True, key=Statistic._sort_key)
return statistics
def compare_to(self, old_snapshot, key_type, cumulative=False):
"""
Compute the differences with an old snapshot old_snapshot. Get
statistics as a sorted list of StatisticDiff instances, grouped by
group_by.
"""
new_group = self._group_by(key_type, cumulative)
old_group = old_snapshot._group_by(key_type, cumulative)
statistics = _compare_grouped_stats(old_group, new_group)
statistics.sort(reverse=True, key=StatisticDiff._sort_key)
return statistics
def take_snapshot():
"""
Take a snapshot of traces of memory blocks allocated by Python.
"""
if not is_tracing():
raise RuntimeError("the tracemalloc module must be tracing memory "
"allocations to take a snapshot")
traces = _get_traces()
traceback_limit = get_traceback_limit()
return Snapshot(traces, traceback_limit)
| prefetchnta/questlab | bin/x64bin/python/37/Lib/tracemalloc.py | Python | lgpl-2.1 | 17,610 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# * This Program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU Lesser General Public
# * License as published by the Free Software Foundation; either
# * version 2.1 of the License, or (at your option) any later version.
# *
# * Libav is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# * Lesser General Public License for more details.
# *
# * You should have received a copy of the GNU Lesser General Public
# * License along with Libav; if not, write to the Free Software
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
GstGengui: runner for non-installed package
"""
from gst_gtklaunch import gst_gtklaunch
if __name__ == '__main__':
gst_gtklaunch.main()
| UbiCastTeam/gst-gtklaunch-1.0 | main.py | Python | lgpl-2.1 | 941 |
import pytest
from forte.solvers import solver_factory, HF, ActiveSpaceSolver
def test_detci_4():
"""CASCI test of Forte DETCI using the SparseList algorithm to build the sigma vector"""
ref_hf_energy = -99.977636678461636
ref_fci_energy = -100.113732484560970
xyz = """
F
H 1 1.0
"""
input = solver_factory(molecule=xyz, basis='6-31g')
state = input.state(charge=0, multiplicity=1, sym='a1')
hf = HF(input, state=state, e_convergence=1.0e-12, d_convergence=1.0e-8)
# create a detci solver
fci = ActiveSpaceSolver(
hf,
type='detci',
states=state,
mo_spaces=input.mo_spaces(frozen_docc=[1, 0, 0, 0]),
options={'active_ref_type': 'cas'}
)
fci.run()
# check results
assert hf.value('hf energy') == pytest.approx(ref_hf_energy, 1.0e-10)
assert fci.value('active space energy')[state] == pytest.approx([ref_fci_energy], 1.0e-10)
if __name__ == "__main__":
test_detci_4()
| evangelistalab/forte | tests/pytest-methods/detci/test_detci-4.py | Python | lgpl-3.0 | 986 |
"""Glyph-specific queries on font-files"""
from ttfquery import describe
try:
from OpenGLContext.debug.logs import text_log
except ImportError:
text_log = None
def hasGlyph( font, char, encoding=None ):
"""Check to see if font appears to have explicit glyph for char"""
glyfName = explicitGlyph( font, char, encoding )
if glyfName is None:
return False
return True
def explicitGlyph( font, char, encoding=None ):
"""Return glyphName or None if there is not explicit glyph for char"""
cmap = font['cmap']
if encoding is None:
encoding = describe.guessEncoding( font )
table = cmap.getcmap( *encoding )
glyfName = table.cmap.get( ord(char))
return glyfName
def glyphName( font, char, encoding=None, warnOnFailure=1 ):
"""Retrieve the glyph name for the given character
XXX
Not sure what the effect of the Unicode mapping
will be given the use of ord...
"""
glyfName = explicitGlyph( font, char, encoding )
if glyfName is None:
encoding = describe.guessEncoding( font ) #KH
cmap = font['cmap'] #KH
table = cmap.getcmap( *encoding ) #KH
glyfName = table.cmap.get( -1)
if glyfName is None:
glyfName = font['glyf'].glyphOrder[0]
if text_log and warnOnFailure:
text_log.warn(
"""Unable to find glyph name for %r, in %r using first glyph in table (%r)""",
char,
describe.shortName(font),
glyfName
)
return glyfName
def width( font, glyphName ):
"""Retrieve the width of the giving character for given font
The horizontal metrics table provides both the
width and the left side bearing, we should really
be using the left side bearing to adjust the
character, but that's a later project.
"""
try:
return font['hmtx'].metrics[ glyphName ][0]
except KeyError:
raise ValueError( """Couldn't find glyph for glyphName %r"""%(
glyphName,
))
def lineHeight( font ):
"""Get the base-line to base-line height for the font
XXX
There is some fudging going on here as I
workaround what appears to be a problem with the
specification for sTypoDescender, which states
that it should normally be a negative value, but
winds up being positive in at least one font that
defines points below the zero axis.
XXX The entire OS/2 table doesn't appear in a few
fonts (symbol fonts in particular), such as Corel's
BeeHive and BlackLight 686.
"""
return charHeight(font) + font['OS/2'].sTypoLineGap
def charHeight( font ):
"""Determine the general character height for the font (for scaling)"""
ascent = font['OS/2'].sTypoAscender
descent = font['OS/2'].sTypoDescender
if descent > 0:
descent = - descent
return ascent - descent
def charDescent( font ):
"""Determine the general descent for the font (for scaling)"""
return font['OS/2'].sTypoDescender
| stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/ttfquery/glyphquery.py | Python | lgpl-3.0 | 3,146 |
import attr
import struct
import math
import re
import copy
from datablock import Datablock
from record import Record
from rowid import Rowid
@attr.s
class TableDatablock(Datablock):
header = attr.ib(default=[])
records = attr.ib(default=[])
def get_data(self):
"""
Convert header and records to bytes
"""
records_buffer = bytearray(self.records_size())
for i in range(0,len(self.records)):
struct.pack_into('%ss' % self.header[2*i+1], records_buffer, self.header[2*i], self.records[i].pack())
fmt = 'BH%sH%ss' % (len(self.header), len(records_buffer))
data = struct.pack(fmt, self.type, self.count_record, *self.header, records_buffer)
return data
def save_record(self, record):
"""
Saves a Record to the datablock
"""
if type(record) is not Record:
raise TypeError("Wrong type for save_record()")
# TODO: check if there's room in the Datablock
# TODO: save to the Datablock
def records_size(self):
return TableDatablock.DATABLOCK_SIZE - ((len(self.header) * 2))
def free_contiguous_space(self, space_needed):
if(len(self.header) == 0):
return 0
last_offset = 0
for i in range(0, len(self.header), 2):
if self.header[last_offset] < self.header[i]:
last_offset = i
#Check for space between records
if(i+2 < len(self.header)):
space_between = self.header[i+2]-(self.header[i]+self.header[i+1])
if(self.header[i+1] == 0): #If header wanted is deleted, ignore header space
space_between += 4
if(space_needed <= space_between):
return self.header[i]+self.header[i+1]
#Check for space in the end
if(self.records_size() -(self.header[last_offset]+self.header[last_offset+1]) >= space_needed):
return self.header[last_offset]+self.header[last_offset+1]
return -1
def write_data(self, record, position=None):
if(position is None):
position = self.free_contiguous_space(record.size()+4)
if(position == -1):
print('Error writing data')
return False
# Insert Header in the right position
place = -1
for i in range(0, len(self.header), 2):
if(self.header[i] == position and self.header[i+1] == 0): # Going to use header that was delated
place = i
self.header[i+1] = record.size()
return self._insert_new_record(record, place, True)
elif(self.header[i] > position):
place = i
self.header.insert(i, position)
self.header.insert(i+1, record.size())
return self._insert_new_record(record, place)
if(place == -1):
place = len(self.header)
self.header.append(position)
self.header.append(record.size())
return self._insert_new_record(record, place)
def update_record(self, record, desc):
tmp_record = copy.copy(record)
tmp_record.description = desc
pos = record.rowid.pos*2
can_store = False
if(pos+2 >= len(self.header)):
can_store =((self.header[pos+1] + (self.records_size() - (self.header[pos]+self.header[pos+1]))) >= tmp_record.size())
else:
can_store = ((self.header[pos+1]+(self.header[pos+2]-self.header[pos+1])) >= tmp_record.size())
#Check for space between records
if(can_store):
record.description = desc
self.header[pos+1] = record.size()
self._dirty = True
return True
else:
self.delete_record(record)
return None
def delete_record(self, record):
pos = record.rowid.pos
self.header[pos*2+1] = 0 #set record removed size to 0 to mark it was removed
self.records[pos].deleted = True
self._dirty = True
return True
def search_by(self, value, field):
found_records = []
for record in self.records:
if(field == 'code'):
if(record.code == value and not record.deleted):
return [record]
elif(field == 'description'):
if(record.description == value and not record.deleted):
found_records.append(record)
return found_records
def get_record_by_pos(self, position):
"""
Get specific record by its position
"""
return self.records[position]
@classmethod
def from_bytes(cls, address, data=None, count_record=0):
"""
Creates a new TableDatablock in memory from a string of bytes
"""
if(count_record == 0 and data is None):
return cls(address=address, count_record=count_record, type=1,
header=[], records=[])
header = []
header_info, record_info = TableDatablock.unpack(count_record, data)
for i in range(0, count_record * 2, 2):
header.append(header_info[i+2]) #Get record begin position
header.append(header_info[i + 2 + 1]) #Get record length
records = TableDatablock.unpack_records(record_info[0], header, address)
return cls(address=address, count_record=count_record, type=1,
header=header, records=records)
@staticmethod
def unpack(count_record, data):
records_size = TableDatablock.DATABLOCK_SIZE - ((count_record * 4) + 4) # Calculate the remaining space in the record data area
fmt_header = 'BH%sH%sx' % (count_record * 2, records_size)
fmt_record = '%ss' % records_size
header = struct.unpack(fmt_header, data) # Get binary header data
records = struct.unpack_from(fmt_record, data, (count_record * 4) + 4) # Get binary records data
return header, records
@staticmethod
def unpack_records(record_str, header, address):
"""
Returns a list of Records included in the datablock
"""
records = []
for i in range(0, len(header), 2):
if(header[i+1] != 0):
info = struct.unpack_from('I%ss' % (header[i+1]-4), record_str, header[i])
rowid = Rowid(dblock=address, pos=int(math.ceil(i/2.0)))
desc = re.sub(r'[^\w]', '', info[1].decode())
records.append(Record(code=info[0], description=desc, rowid=rowid))
else:
rowid = Rowid(dblock=address, pos=int(math.ceil(i/2.0)))
records.append(Record(code=0, description='', rowid=rowid, deleted=True))
return records
def _insert_new_record(self, record, place, reuse=False):
if(record.rowid is None):
record.rowid = Rowid(dblock=self.address, pos=int(math.ceil(place/2.0)))
if(reuse):
self.records[place] = record
else:
self.records.insert(place, record)
self._dirty = True
self.count_record = len(self.records)
return record
| pedrovanzella/bstar-tree | table_datablock.py | Python | lgpl-3.0 | 7,201 |
"""
Created on June 10, 2012
@author: peta15
"""
from wtforms import fields
from wtforms import Form
from wtforms import validators
from lib import utils
from webapp2_extras.i18n import lazy_gettext as _
from webapp2_extras.i18n import ngettext, gettext
FIELD_MAXLENGTH = 50 # intended to stop maliciously long input
class FormTranslations(object):
def gettext(self, string):
return gettext(string)
def ngettext(self, singular, plural, n):
return ngettext(singular, plural, n)
class BaseForm(Form):
def __init__(self, request_handler):
super(BaseForm, self).__init__(request_handler.request.POST)
def _get_translations(self):
return FormTranslations()
# ==== Mixins ====
class PasswordConfirmMixin(BaseForm):
password = fields.TextField(_('Password'), [validators.Required(),
validators.Length(max=FIELD_MAXLENGTH, message=_(
"Field cannot be longer than %(max)d characters."))])
c_password = fields.TextField(_('Confirm Password'),
[validators.Required(), validators.EqualTo('password', _('Passwords must match.')),
validators.Length(max=FIELD_MAXLENGTH,
message=_("Field cannot be longer than %(max)d characters."))])
class UsernameMixin(BaseForm):
username = fields.TextField(_('Username'), [validators.Required(),
validators.Length(max=FIELD_MAXLENGTH, message=_(
"Field cannot be longer than %(max)d characters.")),
validators.regexp(utils.EMAIL_REGEXP, message=_(
"Username / Email invalid."))])
class NameMixin(BaseForm):
name = fields.TextField(_('Name'), [
validators.Length(max=FIELD_MAXLENGTH, message=_("Field cannot be longer than %(max)d characters.")),
validators.regexp(utils.NAME_LASTNAME_REGEXP, message=_(
"Name invalid. Use only letters and numbers."))])
last_name = fields.TextField(_('Last Name'), [
validators.Length(max=FIELD_MAXLENGTH, message=_("Field cannot be longer than %(max)d characters.")),
validators.regexp(utils.NAME_LASTNAME_REGEXP, message=_(
"Last Name invalid. Use only letters and numbers."))])
class EmailMixin(BaseForm):
email = fields.TextField(_('Email'), [validators.Required(),
validators.Length(min=8, max=FIELD_MAXLENGTH, message=_(
"Field must be between %(min)d and %(max)d characters long.")),
validators.regexp(utils.EMAIL_REGEXP, message=_('Invalid email address.'))])
# ==== Forms ====
class PasswordResetCompleteForm(PasswordConfirmMixin):
pass
class LoginForm(UsernameMixin):
password = fields.TextField(_('Password'), [validators.Required(),
validators.Length(max=FIELD_MAXLENGTH, message=_(
"Field cannot be longer than %(max)d characters."))],
id='l_password')
pass
class ContactForm(EmailMixin):
name = fields.TextField(_('Name'), [validators.Required(),
validators.Length(max=FIELD_MAXLENGTH, message=_(
"Field cannot be longer than %(max)d characters.")),
validators.regexp(utils.NAME_LASTNAME_REGEXP, message=_(
"Name invalid. Use only letters and numbers."))])
message = fields.TextAreaField(_('Message'), [validators.Required(), validators.Length(max=65536)])
pass
class RegisterForm(PasswordConfirmMixin, UsernameMixin, NameMixin, EmailMixin):
country = fields.SelectField(_('Country'), choices=[])
tz = fields.SelectField(_('Timezone'), choices=[])
pass
class EditProfileForm(UsernameMixin, NameMixin):
country = fields.SelectField(_('Country'), choices=[])
tz = fields.SelectField(_('Timezone'), choices=[])
pass
class EditPasswordForm(PasswordConfirmMixin):
current_password = fields.TextField(_('Password'), [validators.Required(),
validators.Length(max=FIELD_MAXLENGTH, message=_(
"Field cannot be longer than %(max)d characters."))])
pass
class EditEmailForm(BaseForm):
new_email = fields.TextField(_('Email'), [validators.Required(),
validators.Length(min=8, max=FIELD_MAXLENGTH, message=_(
"Field must be between %(min)d and %(max)d characters long.")),
validators.regexp(utils.EMAIL_REGEXP,
message=_('Invalid email address.'))])
password = fields.TextField(_('Password'), [validators.Required(),
validators.Length(max=FIELD_MAXLENGTH, message=_(
"Field cannot be longer than %(max)d characters."))])
pass
| roninio/gae-boilerplate | boilerplate/forms.py | Python | lgpl-3.0 | 5,505 |
import pyblish.api
import maya.cmds as cmds
import pymel
class ValidateDisplaylayer(pyblish.api.Validator):
""" Ensure no construction history exists on the nodes in the instance """
families = ['scene']
optional = True
label = 'Modeling - Display Layers'
def process(self, instance):
"""Process all the nodes in the instance """
layers = []
for layer in cmds.ls(type='displayLayer'):
# skipping references
if pymel.core.PyNode(layer).isReferenced():
return
if layer != 'defaultLayer':
layers.append(layer)
assert not layers, 'Scene has displayLayers: %s' % layers
| ProgressiveFX/pyblish-pfx | pyblish_pfx/plugins/maya/modeling/_validate_displaylayer.py | Python | lgpl-3.0 | 692 |
"""
YANK, a package for alchemical free energy calculations using OpenMM
"""
__author__ = "John D. Chodera"
__license__ = "GPL"
__maintainer__ = "John D. Chodera"
__email__ = "jchodera@gmail.com"
__version__ = "1.0beta"
from yank import Yank
import analysis
__all__ = ['Yank', 'analysis']
| choderalab/brokenyank | src/yank/__init__.py | Python | lgpl-3.0 | 294 |
#Run this file after automatic conversion of the VisualStudio 2008 solution by VisualStudio 2010.
#This can be done whenever the 2008 solution changes.
#It will make the necessary cleanup and updates to the vcxproj files
#the .props files need to be maintained by hand if the .vsprops files change
from __future__ import with_statement
import sys
import os
import os.path
def vs9to10(src, dest):
for name in os.listdir(src):
path, ext = os.path.splitext(name)
if ext.lower() not in ('.vcxproj',):
continue
filename = os.path.normpath(os.path.join(src, name))
destname = os.path.normpath(os.path.join(dest, name))
print("%s -> %s" % (filename, destname))
lines = []
lastline = b""
importgroup = False
with open(filename, 'rb') as fin:
for line in fin:
#remove redundant linker output info
if b"<OutputLine>" in line:
continue
if b"<ProgramDatabaseFile>" in line:
continue
if b"<ImportLibrary>" in line and b"</ImportLibrary>" in line:
continue
#add new property sheet to the pythoncore
if importgroup and "pythoncore" in name.lower():
if b"</ImportGroup>" in line:
if b"debug.props" in lastline:
lines.append(b' <Import Project="pythoncore_d.props" />\r\n')
elif b"pythoncore" not in lastline:
lines.append(b' <Import Project="pythoncore.props" />\r\n')
if b"<ImportGroup Condition" in line:
importgroup = True
elif b"</ImportGroup>" in line:
importgroup = False
lines.append(line)
lastline = line
with open(destname, 'wb') as fout:
for line in lines:
fout.write(line)
if __name__ == "__main__":
src = "." if len(sys.argv) < 2 else sys.argv[1]
name = os.path.basename(os.path.abspath(src))
dest = os.path.abspath(os.path.join(src, "..", name + "Upd"))
os.makedirs(dest)
vs9to10(src, dest)
| Orav/kbengine | kbe/src/lib/python/PCbuild/vs9to10.py | Python | lgpl-3.0 | 2,296 |
# Copyright Vinicius Castanheira (vncastanheira@gmail.com) - 2012
# This program is a part of the Noir Game.
# This program is under the Gnu LGPL.
from pygame import *
from pyganim import *
from entity import Entity
from bullet import Bullet
class Player(Entity):
def __init__(self, x, y, images):
Entity.__init__(self, x, y, images)
# Properties
self.alive = True
self.isHidden = True
self.bullets = 6
# Directions variables
self.goLeft = self.goRight = False
self.facingDirection = 'RIGHT'
# An exclamation mark, showing that the player is visible
self.exclamation = Entity(self.rect.centerx, self.rect.top + 16, ["exclamation.png"])
# Group of bullets objects, for updating and drawing
self.bulletGroup = sprite.Group()
# Dead animation
self.animationDead = PygAnimation([("player_dead.png", 0.5)])
def events(self, event):
if self.alive:
if event.type == KEYDOWN and event.key == K_RIGHT:
self.goRight = True
self.goLeft = False
if self.facingDirection == 'LEFT':
self.flip()
self.facingDirection = 'RIGHT'
if event.type == KEYDOWN and event.key == K_LEFT:
self.goRight = False
self.goLeft = True
if self.facingDirection == 'RIGHT':
self.flip()
self.facingDirection = 'LEFT'
if event.type == KEYDOWN and event.key == K_z and not self.isHidden and self.bullets > 0:
shoot = mixer.Sound("shoot.ogg")
shoot.play()
bulletDirection = 0
if self.facingDirection == 'RIGHT':
bulletDirection = self.rect.right
if self.facingDirection == 'LEFT':
bulletDirection = self.rect.left
self.bulletGroup.add(Bullet(bulletDirection, self.rect.centery, self.facingDirection, ["bullet.png"]))
self.bullets -= 1
if event.type == KEYUP and event.key == K_RIGHT:
self.goRight = False
if event.type == KEYUP and event.key == K_LEFT:
self.goLeft = False
def update(self):
movement = 4
if self.alive:
if self.rect.centerx - movement <=0 :
self.rect.centerx = 4
if self.goRight:
self.move(movement,0)
if self.goLeft:
self.move(-movement,0)
if self.rect.centerx >= 640:
return 'NEXTLEVEL'
def draw(self, display):
if self.alive:
self.animation.play()
self.animation.blit(display, (self.rect.x,self.rect.y))
if not self.isHidden:
self.exclamation.animation.play()
self.exclamation.animation.blit(display, (self.rect.centerx - 4, self.rect.top - 20))
else:
self.animationDead.play()
self.animationDead.blit(display, (self.rect.x,self.rect.y))
# Another functions
def flip(self):
self.animation.flip(True, False)
def spotlightCollision(self, spolight):
if sprite.collide_rect(self, spolight):
self.isHidden = False
return True
else:
self.isHidden = True
return False
def bulletCollision(self, bullet):
if sprite.collide_rect(self, bullet) and not self.isHidden and self.alive:
die = mixer.Sound("dead.ogg")
die.play()
self.alive = False
return True # Collision occurred
return False # Otherwise
| vncastanheira/noirgame | player.py | Python | lgpl-3.0 | 3,021 |
# from pytigon_js.tools import history_push_state, correct_href, remove_element, process_resize
# from pytigon_js.ajax_region import mount_html
class Page:
def __init__(self, id, page):
self.id = id
self.page = page
def set_href(self, href):
self.page.attr("_href", href)
def get_href(self):
return self.page.attr("_href")
class TabMenuItem:
def __init__(self, id, title, url, data=None):
self.id = id
self.title = jQuery.trim(title)
self.url = url
self.data = data
class TabMenu:
def __init__(self):
self.id = 0
self.titles = {}
self.active_item = None
def get_active_item(self):
return self.active_item
def is_open(self, title):
if self.titles and title in self.titles and self.titles[title]:
return True
else:
return False
def activate(self, title, push_state=True):
menu_item = self.titles[title]
jQuery(sprintf("#li_%s a", menu_item.id)).tab("show")
if push_state and window.PUSH_STATE:
history_push_state(menu_item.title, menu_item.url)
def register(self, title):
self.titles[title] = "$$$"
def new_page(self, title, data_or_html, href, title_alt=None):
_id = "tab" + self.id
menu_item = TabMenuItem(_id, title, href, data_or_html)
self.titles[title] = menu_item
if title_alt and title_alt != title:
self.titles[title_alt] = menu_item
menu_pos = vsprintf(
"<li id='li_%s' class ='nav-item'><a href='#%s' class='nav-link bg-info' data-toggle='tab' data-bs-toggle='tab' role='tab' title='%s'>%s    </a> <button id = 'button_%s' class='close btn btn-outline-danger btn-xs' title='remove page' type='button'><span class='fa fa-times'></span></button></li>",
[_id, _id, title, title, _id],
)
append_left = jQuery("#tabs2").hasClass("append-left")
if append_left:
jQuery("#tabs2").prepend(menu_pos)
else:
jQuery("#tabs2").append(menu_pos)
jQuery("#tabs2_content").append(
sprintf(
"<div class='tab-pane container-fluid ajax-region ajax-frame ajax-link win-content content page' id='%s' data-region='page' href='%s'></div>",
_id,
href,
)
)
window.ACTIVE_PAGE = Page(_id, jQuery("#" + _id))
self.active_item = menu_item
if window.PUSH_STATE:
history_push_state(title, href)
def _on_show_tab(self, e):
nonlocal menu_item
window.ACTIVE_PAGE = Page(_id, jQuery("#" + _id), menu_item)
menu = get_menu()
menu_item = menu.titles[jQuery.trim(e.target.text)]
self.active_item = menu_item
if window.PUSH_STATE:
history_push_state(menu_item.title, menu_item.url)
process_resize(document.getElementById(menu_item.id))
if append_left:
jQuery("#tabs2 a:first").on("shown.bs.tab", _on_show_tab)
jQuery("#tabs2 a:first").tab("show")
else:
jQuery("#tabs2 a:last").on("shown.bs.tab", _on_show_tab)
jQuery("#tabs2 a:last").tab("show")
mount_html(document.getElementById(_id), data_or_html, None)
def _on_button_click(self, event):
get_menu().remove_page(jQuery(this).attr("id").replace("button_", ""))
jQuery(sprintf("#button_%s", _id)).click(_on_button_click)
scripts = jQuery("#" + _id + " script")
def _local_fun(index, element):
eval(this.innerHTML)
scripts.each(_local_fun)
self.id += 1
return _id
def remove_page(self, id):
def _local_fun(index, value):
if value and value.id == id:
self.titles[index] = None
jQuery.each(self.titles, _local_fun)
remove_element(sprintf("#li_%s", id))
remove_element(sprintf("#%s", id))
last_a = jQuery("#tabs2 a:last")
if last_a.length > 0:
last_a.tab("show")
else:
window.ACTIVE_PAGE = None
if window.PUSH_STATE:
history_push_state("", window.BASE_PATH)
if jQuery("#body_desktop").find(".content").length == 0:
window.init_start_wiki_page()
jQuery("#body_desktop").show()
#'standard' 'simple', 'traditional', 'mobile', 'tablet', 'hybrid'
def on_menu_href(self, elem, data_or_html, title, title_alt=None, url=None):
if window.APPLICATION_TEMPLATE == "modern":
if self.is_open(title):
self.activate(title)
else:
self.register(title)
if url:
href = url
else:
href = jQuery(elem).attr("href")
href2 = correct_href(href)
jQuery("#body_desktop").hide()
# self.new_page(title, data_or_html.innerHTML, href2, title_alt)
self.new_page(title, data_or_html, href2, title_alt)
jQuery(".auto-hide").trigger("click")
return False
else:
mount_html(document.querySelector("#body_desktop"), data_or_html, None)
jQuery(".auto-hide").trigger("click")
return False
def get_menu():
if not window.MENU:
window.MENU = TabMenu()
return window.MENU
| Splawik/pytigon | pytigon/static_src/pytigon_js/tabmenu.py | Python | lgpl-3.0 | 5,498 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
from openerp import models, fields, api, tools, _
class odisea_representative(models.Model):
"""Representative"""
_name = 'odisea.representative'
_description = 'Representative'
@api.multi
def _has_image(self):
return dict((p.id, bool(p.image)) for p in self)
name = fields.Char(string='Name', required=True)
cuit = fields.Char(string='CUIT', size=13)
title = fields.Many2one('res.partner.title', 'Title')
company = fields.Char(string='Company')
ref = fields.Char('Contact Reference', select=1)
website = fields.Char('Website', help="Website of Partner or Company")
comment = fields.Text('Notes')
category_id = fields.Many2many('res.partner.category', id1='partner_id', id2='category_id', string='Tags')
active = fields.Boolean('Active', default=True)
street = fields.Char('Street')
street2 = fields.Char('Street2')
zip = fields.Char('Zip', size=24, change_default=True)
city = fields.Char('City')
state_id = fields.Many2one("res.country.state", 'State', ondelete='restrict')
country_id = fields.Many2one('res.country', 'Country', ondelete='restrict')
email = fields.Char('Email')
phone = fields.Char('Phone')
fax = fields.Char('Fax')
mobile = fields.Char('Mobile')
birthdate = fields.Char('Birthdate')
function = fields.Char('Job Position')
is_company = fields.Boolean('Is a Company', help="Check if the contact is a company, otherwise it is a person")
use_parent_address = fields.Boolean('Use Company Address', help="Select this if you want to set company's address information for this contact")
# image: all image fields are base64 encoded and PIL-supported
image = fields.Binary("Image",
help="This field holds the image used as avatar for this contact, limited to 1024x1024px")
image_medium = fields.Binary(compute="_get_image",
string="Medium-sized image",
store= False,
help="Medium-sized image of this contact. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views.")
image_small = fields.Binary(compute="_get_image",
string="Small-sized image",
store= False,
help="Small-sized image of this contact. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required.")
has_image = fields.Boolean(compute=_has_image)
color = fields.Integer('Color Index')
@api.multi
def onchange_state(self, state_id):
if state_id:
state = self.env['res.country.state'].browse(state_id)
return {'value': {'country_id': state.country_id.id}}
return {}
@api.multi
def onchange_type(self, is_company):
value = {'title': False}
if is_company:
value['use_parent_address'] = False
domain = {'title': [('domain', '=', 'partner')]}
else:
domain = {'title': [('domain', '=', 'contact')]}
return {'value': value, 'domain': domain}
@api.one
@api.depends("image")
def _get_image(self):
""" calculate the images sizes and set the images to the corresponding
fields
"""
image = self.image
# check if the context contains the magic `bin_size` key
if self.env.context.get("bin_size"):
# refetch the image with a clean context
image = self.env[self._name].with_context({}).browse(self.id).image
data = tools.image_get_resized_images(image, return_big=True, avoid_resize_big=False)
self.image_big = data["image"]
self.image_medium = data["image_medium"]
self.image_small = data["image_small"]
return True
| vasconcelosfer/odoo-odisea | odisea/models/representative.py | Python | lgpl-3.0 | 4,870 |
#===============================================================================
import re
from untwisted.magic import sign
from channel import not_quiet
from runtime import later
import message
import util
import limit
import kakasi_lib
link, install, uninstall = util.LinkSet().triple()
#===============================================================================
'''
@link('MESSAGE')
@not_quiet()
def h_message(bot, id, target, msg):
if limit.mark_activity(bot, id): return
kakasi(bot, id, target or id.nick, msg, target is not None, auto=True)
@link('PROXY_MSG')
@not_quiet()
def h_proxy_message(
bot, id, target, msg, no_kakasi=False, no_auto=False, **kwds
):
if no_kakasi or no_auto: return
kakasi(bot, id, target, msg, target.startswith('#'), auto=True, **kwds)
'''
#===============================================================================
@link('HELP')
def h_help(bot, reply, args):
reply('romaji TEXT',
'Converts Japanese characters to romaji.')
@link(('HELP', 'romaji'))
@link(('HELP', 'rj'))
def h_help_romaji(bot, reply, args):
reply('romaji TEXT')
reply('rj TEXT',
'Shows the Hepburn romanisation(s) of Japanese characters occurring in TEXT,'
' according to KAKASI <http://kakasi.namazu.org>. The command prefix may be'
' omitted, leaving just TEXT, if the majority of TEXT is Japanese.')
@link('!romaji')
@link('!rj')
def h_romaji(bot, id, target, args, full_msg):
kakasi(bot, id, target or id.nick, args, target is not None)
#===============================================================================
def kakasi(bot, id, target, msg, prefix=True, auto=False, **kwds):
if auto and not kakasi_lib.is_ja(msg): return
raw_reply = kakasi_lib.kakasi(msg)
if auto and len(raw_reply) > 200: return
reply = ('<%s> %s' % (id.nick, raw_reply)) if prefix and id else raw_reply
bot.send_msg(target, reply)
bot.drive('runtime.later', sign(
'PROXY_MSG', bot, id, target, raw_reply, **dict(kwds, no_kakasi=True)))
| joodicator/PageBot | page/kakasi.py | Python | lgpl-3.0 | 2,024 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This exercise shows some important concepts that you should be aware about:
- using codecs module to write unicode files
- using authentication with web APIs
- using offset when accessing web APIs
To run this code locally you have to register at the NYTimes developer site
and get your own API key. You will be able to complete this exercise in our UI without doing so,
as we have provided a sample result.
Your task is to process the saved file that represents the most popular (by view count)
articles in the last day, and return the following data:
- list of dictionaries, where the dictionary key is "section" and value is "title"
- list of URLs for all media entries with "format": "Standard Thumbnail"
All your changes should be in the article_overview function.
The rest of functions are provided for your convenience, if you want to access the API by yourself.
"""
import json
import codecs
import requests
from jtorrente.openstreetmap.lesson1.exercise1_15 import pretty_print
URL_MAIN = "http://api.nytimes.com/svc/"
URL_POPULAR = URL_MAIN + "mostpopular/v2/"
API_KEY = { "popular": "8f59290cf487b2c40b3bbac5de38049c:17:72343821",
"article": "c3b8c7101c497404c903a3e1691ea96d:2:72343821"}
def get_from_file(kind, period):
filename = "popular-{0}-{1}.json".format(kind, period)
with open(filename, "r") as f:
return json.loads(f.read())
def article_overview(kind, period):
data = get_from_file(kind, period)
#data = get_popular(URL_POPULAR, kind, period)
titles = []
urls =[]
for article in data:
title_dic = {}
title_dic[article['section']] = article['title']
titles.append(title_dic)
media_list = article['media']
for media_metadata_container in media_list:
media_metadata_list = media_metadata_container['media-metadata']
for media_metadata in media_metadata_list:
if media_metadata['format'] == "Standard Thumbnail":
urls.append(media_metadata['url'])
print len(titles)
print len(urls)
return (titles, urls)
def query_site(url, target, offset):
# This will set up the query with the API key and offset
# Web services often use offset parameter to return data in small chunks
# NYTimes returns 20 articles per request, if you want the next 20
# You have to provide the offset parameter
if API_KEY["popular"] == "" or API_KEY["article"] == "":
print "You need to register for NYTimes Developer account to run this program."
print "See Intructor notes for information"
return False
params = {"api-key": API_KEY[target], "offset": offset}
r = requests.get(url, params = params)
if r.status_code == requests.codes.ok:
return r.json()
else:
r.raise_for_status()
def get_popular(url, kind, days, section="all-sections", offset=0):
# This function will construct the query according to the requirements of the site
# and return the data, or print an error message if called incorrectly
if days not in [1,7,30]:
print "Time period can be 1,7, 30 days only"
return False
if kind not in ["viewed", "shared", "emailed"]:
print "kind can be only one of viewed/shared/emailed"
return False
url = URL_POPULAR + "most{0}/{1}/{2}.json".format(kind, section, days)
data = query_site(url, "popular", offset)
return data
def save_file(kind, period):
# This will process all results, by calling the API repeatedly with supplied offset value,
# combine the data and then write all results in a file.
data = get_popular(URL_POPULAR, kind, period)
num_results = data["num_results"]
full_data = []
with codecs.open("popular-{0}-{1}-full.json".format(kind, period), encoding='utf-8', mode='w') as v:
for offset in range(0, num_results, 20):
data = get_popular(URL_POPULAR, kind, period, offset=offset)
full_data += data["results"]
v.write(json.dumps(full_data, indent=2))
def test():
save_file("viewed", 1)
titles, urls = article_overview("viewed", 1)
assert len(titles) == 20
assert len(urls) == 30
assert titles[2] == {'Opinion': 'Professors, We Need You!'}
assert urls[20] == 'http://graphics8.nytimes.com/images/2014/02/17/sports/ICEDANCE/ICEDANCE-thumbStandard.jpg'
if __name__ == "__main__":
test() | jtorrente/openstreetmap | jtorrente/openstreetmap/problemset1/ex3_nytimes.py | Python | lgpl-3.0 | 4,470 |
from datetime import datetime, timedelta
| ourbest/sns_app | utils/scheduler.py | Python | lgpl-3.0 | 41 |
import os
import apium.frameworks
def test_basic_task_run___state_is_consistent(port_num, running_worker):
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_test_project.settings'
apium.frameworks.setup()
| cryporchild/apium | tests/test_django.py | Python | lgpl-3.0 | 213 |
default_app_config = 'jobs.apps.JobConfig'
| SeiryuZ/HemeWeb | src/jobs/__init__.py | Python | lgpl-3.0 | 43 |
from button import Button
class SellButton(Button):
def __init__(self, image, x, y, parent):
super(SellButton, self).__init__(image, x, y, parent)
def get_clicked(self):
self.parent.sell_tower() | ToBaer94/PygameTowerDefense | buttons/sell_button.py | Python | lgpl-3.0 | 221 |
#!/usr/bin/env python
#
# Read doxygen xml files to find all members of the dolfin
# name space and generate API doc files per subdirectory of
# dolfin
#
# Written by Tormod Landet, 2017
#
from __future__ import print_function
import sys, os
import parse_doxygen
DOXYGEN_XML_DIR = 'doxygen/xml'
API_GEN_DIR = 'generated_rst_files'
SWIG_DIR = '../dolfin/swig/'
SWIG_FILE = 'docstrings.i'
MOCK_PY = 'mock_cpp_modules.py'
def get_subdir(hpp_file_name):
"""
Return "subdir" for a path name like
/path/to/dolfin/subdir/a_header.h
"""
path_components = hpp_file_name.split(os.sep)
path_components_rev = path_components[::-1]
idx = path_components_rev.index('dolfin')
subdir = path_components_rev[idx - 1]
return subdir
def get_short_path(hpp_file_name):
"""
Return "dolfin/subdir/a_header.h" for a path name like
/path/to/dolfin/subdir/a_header.h
"""
path_components = hpp_file_name.split(os.sep)
if 'dolfin' in path_components:
# dolfin header files
path_components_rev = path_components[::-1]
idx = path_components_rev.index('dolfin')
short_path = path_components_rev[:idx + 1]
else:
# ufc header files
short_path = path_components[-1:]
return os.sep.join(short_path[::-1])
def write_rst(subdir, subdir_members, api_gen_dir):
"""
Write files for Sphinx C++ API documentation
"""
rst_name = os.path.join(api_gen_dir, 'api_gen_%s.rst' % subdir)
print('Generating', rst_name)
# Make output directory
if not os.path.isdir(api_gen_dir):
os.mkdir(api_gen_dir)
prev_short_name = ''
with open(rst_name, 'wt') as rst:
rst.write('.. automatically generated by generate_api_rst.py and parse_doxygen.py\n')
#rst.write('dolfin/%s\n%s' % (subdir, '=' * 80))
#rst.write('\nDocumentation for C++ code found in dolfin/%s/*.h\n\n' % subdir)
rst.write('\n.. contents::\n\n\n')
kinds = [('typedef', 'Type definitions', 'doxygentypedef'),
('enum', 'Enumerations', 'doxygenenum'),
('function', 'Functions', 'doxygenfunction'),
('struct', 'Structures', 'doxygenstruct'),
('variable', 'Variables', 'doxygenvariable'),
('class', 'Classes', 'doxygenclass')]
for kind, kind_name, directive in kinds:
if kind in subdir_members:
# Write header H2
rst.write('%s\n%s\n\n' % (kind_name, '-'*70))
for name, member in sorted(subdir_members[kind].items()):
short_name = member.short_name
fn = get_short_path(member.hpp_file_name)
# Write header H3
if short_name != prev_short_name:
rst.write('%s\n%s\n\n' % (short_name, '~'*60))
prev_short_name = short_name
# Info about filename
rst.write('C++ documentation for ``%s`` from ``%s``:\n\n' % (short_name, fn))
# Write documentation for this item
rst.write(member.to_rst())
rst.write('\n\n')
def write_swig(subdir, subdir_members, swig_dir, swig_file_name, swig_header=''):
"""
Write files for SWIG so that we get docstrings in Python
"""
swig_subdir = os.path.join(swig_dir, subdir)
if not os.path.isdir(swig_subdir):
os.mkdir(swig_subdir)
swig_iface_name = os.path.join(swig_subdir, swig_file_name)
print('Generating', swig_iface_name)
with open(swig_iface_name, 'wt') as out:
out.write(swig_header)
out.write('// SWIG docstrings generated by doxygen and generate_api_rst.py / parse_doxygen.py\n\n')
for kind in subdir_members:
for name, member in sorted(subdir_members[kind].items()):
out.write(member.to_swig())
out.write('\n')
def write_mock_modules(namespace_members, mock_py_module):
"""
Write a mock module so that we can create documentation for
dolfin on ReadTheDocs where we cannot compile so that the
dolfin.cpp.* module are not available. We fake those, but
include the correct docstrings
"""
print('Generating', mock_py_module)
mydir = os.path.dirname(os.path.abspath(__file__))
swig_module_dir = os.path.join(mydir, '..', 'dolfin', 'swig', 'modules')
swig_module_dir = os.path.abspath(swig_module_dir)
if not os.path.isdir(swig_module_dir):
print('SWIG module directory is not present,', swig_module_dir)
print('No mock Python code will be generated')
return
with open(mock_py_module, 'wt') as out:
out.write('#!/usr/bin/env python\n')
out.write('#\n')
out.write('# This file is AUTO GENERATED!\n')
out.write('# This file is fake, full of mock stubs\n')
out.write('# This file is made by generate_api_rst.py\n')
out.write('#\n\n')
out.write('from __future__ import print_function\n')
out.write('from types import ModuleType\n')
out.write('import sys\n')
out.write('\n\nWARNING = "This is a mock object!"\n')
# Loop over SWIG modules and generate mock Python modules
for module_name in os.listdir(swig_module_dir):
module_i = os.path.join(swig_module_dir, module_name, 'module.i')
if not os.path.isfile(module_i):
continue
# Find out which headers are included in this SWIG module
included_headers = set()
for line in open(module_i):
if line.startswith('#include'):
header = line[8:].strip()[1:-1]
included_headers.add(header)
elif line.startswith('%import'):
header = line.split(')')[1].strip()[1:-1]
included_headers.add(header)
module_py_name = '_' + module_name
full_module_py_name = 'dolfin.cpp.' + module_py_name
out.write('\n\n' + '#'*80 + '\n')
out.write('%s = ModuleType("%s")\n' % (module_py_name, full_module_py_name))
out.write('sys.modules["%s"] = %s\n' % (full_module_py_name, module_py_name))
out.write('\n')
print(' Generating module', full_module_py_name)
for member in namespace_members:
# Check if this member is included in the given SWIG module
hpp_file_name = get_short_path(member.hpp_file_name)
if hpp_file_name not in included_headers:
continue
out.write(member.to_mock(modulename=module_py_name))
out.write('\n\n')
def parse_doxygen_xml_and_generate_rst_and_swig(xml_dir, api_gen_dir, swig_dir, swig_file_name,
swig_header='', mock_py_module=''):
# Read doxygen XML files and split namespace members into
# groups based on subdir and kind (class, function, enum etc)
create_subdir_groups_if_missing = False
if os.path.isdir(xml_dir):
namespaces = parse_doxygen.read_doxygen_xml_files(xml_dir, ['dolfin', 'ufc'])
else:
raise OSError('Missing doxygen XML directory %r' % xml_dir)
# Group all documented members into subdir groups (io, la, mesh, fem etc)
sorted_members = list(namespaces['dolfin'].members.values())
sorted_members.sort(key=lambda m: m.name)
all_members = {}
for member in sorted_members:
subdir = get_subdir(member.hpp_file_name)
sd = all_members.setdefault(subdir, {})
kd = sd.setdefault(member.kind, {})
kd[member.name] = member
# Generate Sphinx RST files and SWIG interface files
for subdir, subdir_members in sorted(all_members.items()):
if subdir:
if api_gen_dir:
write_rst(subdir, subdir_members, api_gen_dir)
if swig_dir:
write_swig(subdir, subdir_members, swig_dir, swig_file_name, swig_header)
# Write UFC documenttation, no SWIG for UFC, only RST
if api_gen_dir:
ufc_members = {}
for member in namespaces['ufc'].members.values():
kd = ufc_members.setdefault(member.kind, {})
kd[member.name] = member
write_rst('ufc', ufc_members, api_gen_dir)
# Generate a mock Python module
if mock_py_module:
write_mock_modules(sorted_members, mock_py_module)
if __name__ == '__main__':
swig_dir = SWIG_DIR
allow_empty_xml = False
if '--no-swig' in sys.argv:
swig_dir = None
parse_doxygen_xml_and_generate_rst_and_swig(DOXYGEN_XML_DIR, API_GEN_DIR, swig_dir,
SWIG_FILE, '', MOCK_PY)
| FEniCS/dolfin | doc/generate_api_rst.py | Python | lgpl-3.0 | 8,783 |
# -*- coding: utf8 -*-
from .player import PlayerEntity
from .base import MobEntity
from .item import ItemEntity
__all__ = [
'PlayerEntity',
'MobEntity',
'ItemEntity',
] | nosix/PyCraft | src/pycraft/service/composite/entity/__init__.py | Python | lgpl-3.0 | 188 |
#!/usr/local/bin/python
import pygame
from pygame.locals import *
import time
def init():
pygame.display.set_mode((800, 600))
def test_1():
def test(pressed, i):
if i == 0:
return
if pressed[K_a]:
print "K_a"
if pressed[K_b]:
print "K_b"
test(pressed, i -1)
pressed = pygame.key.get_pressed()
for x in xrange(10000):
# test 2 differents keys
test(pressed, 100)
def test_2():
def test(i):
pressed = pygame.key.get_pressed()
if i == 0:
return
if pressed[K_a]:
print "K_a"
if pressed[K_b]:
print "K_b"
test(i -1)
for x in xrange(10000):
# test 2 differents keys
test(100)
def bench(init, funclist):
init()
for f in funclist:
t0 = time.time()
f()
elapsed = time.time() - t0
print "Function: %s took: %.5fs" % (f.__name__, elapsed)
if __name__ == '__main__':
bench(init, [test_1, test_2])
| Hiestaa/cognitive-racer | bench.py | Python | lgpl-3.0 | 856 |
#!/usr/bin/env python
import fileinput
class NotValidIP(Exception):
pass
class NotValidIPLength(Exception):
pass
while True:
try:
ip_addr = input("Enter a network IP address: ")
ip_addr_split = ip_addr.split('.')
len1 = len(ip_addr_split)
ip_addr_split = ip_addr_split[:3]
ip_addr_split.append('0')
i=0
for element in ip_addr_split:
ip_addr_split[i] = int(element)
i = i+1
i = 0
for element in ip_addr_split:
if (element > 255 or element < 0):
raise NotValidIP
if (len1!=3 and len1!=4):
raise NotValidIPLength
print("The network IP address now is: %s" % ip_addr_split)
break
except ValueError:
print('Not a good value')
except NotValidIP:
print('this is not a valid IP address')
except NotValidIPLength:
print('this is not an IP address size')
print('%20s %20s %20s' % ('NETWORK_NUMBER', 'FIRST_OCTET_BINARY', 'FIRST_OCTET_HEX') )
a = '.'.join(str(q) for q in ip_addr_split)
b = bin(ip_addr_split[0])
c = hex(ip_addr_split[0])
print('%20s %20s %20s' % (a, b, c)) | Noeud/KirkByers_Course | Week2/IPaddValidity_hex_bin.py | Python | unlicense | 1,174 |
# -*- coding: utf-8 -*-
VERSION = (0, 1, 6, 'final')
if VERSION[-1] != "final": # pragma: no cover
__version__ = '.'.join(map(str, VERSION))
else: # pragma: no cover
__version__ = '.'.join(map(str, VERSION[:-1]))
| arteria/django-shop-simplenotifications | shop_simplenotifications/__init__.py | Python | unlicense | 222 |
from pippi import dsp
from pippi import tune
def ping(maxlen=44100, freqs=None):
out = ''
if freqs is None:
freqs = [ dsp.rand(20,10000) for i in range(4) ]
tlen = dsp.randint(10, maxlen)
tones = [ dsp.tone(length=tlen, freq=freq, amp=0.1, wavetype='random')
for freq in freqs ]
tones = [ dsp.split(tone, 64) for tone in tones ]
pcurves = [ dsp.breakpoint([ dsp.rand() for t in range(len(tones[i]) / 20) ],
len(tones[i])) for i in range(len(tones)) ]
tones = [ [ dsp.pan(t, pcurves[i][ti]) for ti, t in enumerate(tones[i]) ]
for i in range(len(tones)) ]
fcurves = [ dsp.breakpoint([ dsp.rand(0.0, 0.1) + 0.9 for t in range(len(tones[i]) / 20) ],
len(tones[i])) for i in range(len(tones)) ]
tones = [ [ dsp.transpose(t, fcurves[i][ti] + 0.1) for ti, t in enumerate(tones[i]) ]
for i in range(len(tones)) ]
out = dsp.mix([ dsp.env(''.join(tone), 'random') for tone in tones ])
out = dsp.env(out, 'random')
out = dsp.pad(out, 0, dsp.randint(0, maxlen * 3))
return out
#freqs = tune.fromdegrees([1,3,5,6,9], 6, 'a')
freqs = None
out = ''.join([ ping(dsp.mstf(200), freqs) for i in range(100)])
dsp.write(out, 'twenty')
| lettersonsounds/twenty | score.py | Python | unlicense | 1,251 |
import unittest
import utils
def remove_backspace(s):
result = []
for ch in s:
if ch == '#':
if result:
result.pop()
else:
result.append(ch)
return result
# O(n) time. O(n) space. Stack.
class Solution:
def backspaceCompare(self, S: str, T: str) -> bool:
return remove_backspace(S) == remove_backspace(T)
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().backspaceCompare(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
| chrisxue815/leetcode_python | problems/test_0844_stack.py | Python | unlicense | 744 |
#!/usr/bin/env python
__description__ = \
"""
compareAncestor.py
"""
__author__ = "Michael J. Harms"
__usage__ = "comapreAncestors.py ancestor_file1 ancestor_file2"
__date__ = "100726"
import sys, phyloBase
class CompareAncestorError(Exception):
"""
General error class for this module.
"""
pass
def readAncestorFile(ancestor_file):
"""
"""
f = open(ancestor_file,'r')
lines = f.readlines()
f.close()
# Skip comments and blank lines
lines = [l for l in lines if l.strip() != "" and l[0] != "#"]
out = []
num_states = (len(lines[0].split())-2)/2
for l in lines[1:]:
position = int(l[7:12])
tmp_out = []
for i in range(num_states):
aa = l[12+12*i:18+12*i].strip()
pp = float(l[18+12*i:24+12*i])
tmp_out.append((aa,pp))
out.append((position,tmp_out))
return out
def compareAncestors(ancestor1_file,ancestor2_file,ambiguous_cutoff=0.8):
"""
"""
anc1 = readAncestorFile(ancestor1_file)
anc2 = readAncestorFile(ancestor2_file)
anc1_pos = [p[0] for p in anc1]
anc2_pos = [p[0] for p in anc2]
only_in_anc1 = [p for p in anc1_pos if p not in anc2_pos]
only_in_anc2 = [p for p in anc2_pos if p not in anc1_pos]
if len(only_in_anc1) > 0:
print "# Warning: some sites only in ancestor 1:"
print "".join(["# %i\n" % p for p in only_in_anc1]),
if len(only_in_anc2) > 0:
print "# Warning: some sites only in ancestRr 2:"
print "".join(["# %i\n" % p for p in only_in_anc2]),
all_pos = [p for p in anc1_pos if p not in only_in_anc1]
all_pos.extend([p for p in anc2_pos if p not in only_in_anc2 and p not in all_pos])
anc1_dict = dict([a for a in anc1 if a[0] in anc1_pos])
anc2_dict = dict([a for a in anc2 if a[0] in anc2_pos])
out = []
out.append("# pos new_state old_state same? state_type?")
out.append(" ambiguity pp_new pp_old\n")
out.append("#\n# same?\n")
out.append("# \'*\' -> changed\n")
out.append("# \' \' -> no change\n")
out.append("# flipped_with_alternate?\n")
out.append("# \'*\' -> took new state\n")
out.append("# \'~\' -> took alternate state\n")
out.append("# \' \' -> no change in state\n")
out.append("# ambig_state key:\n")
out.append("# \'~\' -> ambiguous in both\n")
out.append("# \'-\' -> newly ambiguous\n")
out.append("# \'+\' -> newly well supported\n")
out.append("# \' \' -> well suppported in both\n")
for p in all_pos:
s1 = anc1_dict[p]
s2 = anc2_dict[p]
# See if the new reconstruction has the same residue at this position
same = "*"
if s1[0][0] == s2[0][0]:
same = " "
# Check to see if new state existed as less likely state in original
# reconstruction
flipped = " "
if same == "*":
if s1[0] in [a[0] for a in s2[1:]]:
flipped = "~"
else:
flipped = "*"
# Remained ambiguous
if s1[0][1] <= ambiguous_cutoff and s2[0][1] <= ambiguous_cutoff:
ambig_state = "~"
# Newly ambiguous
elif s1[0][1] <= ambiguous_cutoff and s2[0][1] > ambiguous_cutoff:
ambig_state = "+"
# Became well supported
elif s1[0][1] > ambiguous_cutoff and s2[0][1] <= ambiguous_cutoff:
ambig_state = "-"
# Remained well supported
else:
ambig_state = " "
check_me = " "
if ambig_state == "-" or \
(same == "*" and ambig_state == " "):
check_me = "!"
out.append("%5i %s %s %s %s %s %6.2f%6.2f %s\n" % (p,s1[0][0],s2[0][0],
same,flipped,ambig_state,s1[0][1],s2[0][1],check_me))
return "".join(out)
def main(argv=None):
"""
"""
if argv == None:
argv = sys.argv[1:]
try:
ancestor1_file = argv[0]
ancestor2_file = argv[1]
except IndexError:
err = "Incorrect number of arguments!\n\n%s\n\n" % __usage__
raise CompareAncestorError(err)
out = compareAncestors(ancestor1_file,ancestor2_file)
print out
if __name__ == "__main__":
main()
| harmsm/phylo_tools | compareAncestors.py | Python | unlicense | 4,269 |
import difflib
class Repository:
def __init__(self, fname=None):
if not fname:
fname = '/usr/share/dict/words'
with open(fname) as f:
self.repository = [x.rstrip('\n') for x in f.readlines()]
def find_close_matches(r, w, count=3):
return difflib.get_close_matches(w, r.repository, count)
if __name__ == '__main__':
r = Repository()
w = raw_input('Your word please: ')
if len(w.split()) != 1:
sys.exit('please enter a word only')
try:
count = int(raw_input('Number of matches: '))
except ValueError:
sys.exit('Enter a number please')
print find_close_matches(r, w, count)
| amitsaha/learning | python/strings/difflib_closest_match.py | Python | unlicense | 697 |
###############################################################################
# $Id$
#
# Project: Sub1 project of IRRI
# Purpose: Quality Assessment extraction from MODIS
# Author: Yann Chemin, <yann.chemin@gmail.com>
#
###############################################################################
# Copyright (c) 2008, Yann Chemin <yann.chemin@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
#!/usr/bin/python
import wx
import wx.lib.filebrowsebutton as filebrowse
import os
# For Image Processing
import numpy as N
from osgeo import gdalnumeric
from osgeo import gdal
from osgeo import gdal_array
from osgeo.gdalconst import *
# For icons, pngs, etc coming from images.py
from wx import ImageFromStream, BitmapFromImage, EmptyIcon
import cStringIO
import images
# Define satellite bands
# Based on Landsat channels
qc = ''
# Define output file name
output = ''
# Define list of MODIS types
NameMOD = ['250','500']
# Define list of QA types
NameQC = ['modland_qa_bits','cloud','data_quality','atcorr','adjcorr','diff_orbit_from_500m']
# Define band number
bandno = ['1','2','3','4','5','6','7']
# Define Info Message
overview = """MODIS Quality Assessment Extractor
Makes Human-readable images of Quality Assessment binary bits from MOD09 products.
500m does not have "cloud" and "diff_orbit_from_500m" options.
# MODLAND QA Bits 250m Unsigned Int bits[0-1]
#00 -> class 0: Corrected product produced at ideal quality -- all bands
#01 -> class 1: Corrected product produced at less than idel quality -- some or all bands
#10 -> class 2: Corrected product NOT produced due to cloud effect -- all bands
#11 -> class 3: Corrected product NOT produced due to other reasons -- some or all bands maybe fill value (Note that a value of [11] overrides a value of [01])
# Cloud State 250m Unsigned Int bits[2-3]
#00 -> class 0: Clear -- No clouds
#01 -> class 1: Cloudy
#10 -> class 2: Mixed
#11 -> class 3: Not Set ; Assumed Clear
# Band-wise Data Quality 250m Unsigned Int bits[4-7][8-11]
# Band-wise Data Quality 500m long Int bits[2-5][6-9][10-13][14-17][18-21][22-25][26-29]
#0000 -> class 0: highest quality
#0111 -> class 1: noisy detector
#1000 -> class 2: dead detector; data interpolated in L1B
#1001 -> class 3: solar zenith >= 86 degrees
#1010 -> class 4: solar zenith >= 85 and < 86 degrees
#1011 -> class 5: missing input
#1100 -> class 6: internal constant used in place of climatological data for at least one atmospheric constant
#1101 -> class 7: correction out of bounds, pixel constrained to extreme allowable value
#1110 -> class 8: L1B data faulty
#1111 -> class 9: not processed due to deep ocean or cloud
#Class 10-15: Combination of bits unused
# Atmospheric correction 250m Unsigned Int bit[12]
#0 -> class 0: Not Corrected product
#1 -> class 1: Corrected product
# Adjacency correction 250m Unsigned Int bit[13]
#0 -> class 0: Not Corrected product
#1 -> class 1: Corrected product
# Different orbit from 500m product, 250m Unsigned Int bit[14]
#0 -> class 0: same orbit as 500m
#1 -> class 1: different orbit from 500m
"""
class MyFrame(wx.Frame):
def __init__(self,parent, id=-1, title='MODIS Quality Bits Extractor',
pos=(0,0),
size=(400,650),
style=wx.DEFAULT_FRAME_STYLE):
wx.Frame.__init__(self, parent, id, title, pos, size, style)
ico = images.getPngGipeIcon()
self.SetIcon(ico)
self.lognull = wx.LogNull()
# Input Filenames
self.qc = qc
self.qc_type = 'modland_qa_bits'
self.pixelres = '250'
self.band_no = '1'
self.NameMOD = NameMOD
self.NameQC = NameQC
self.bandno = bandno
self.output = output
# Construct Interface
self.make_text()
self.make_buttons()
self.make_radiobuttons1()
self.make_radiobuttons2()
self.make_radiobuttons3()
self.make_fb()
self.mbox = wx.BoxSizer(wx.VERTICAL)
self.mbox.Add((10,10))
self.mbox.Add(self.text, 1, wx.EXPAND|wx.CENTER, 10)
self.mbox.Add(self.cc2, 1, wx.EXPAND, 0)
self.mbox.Add(self.cc6, 1, wx.EXPAND, 0)
self.mbox.Add(self.rbox1, 1, wx.CENTER, 0)
self.mbox.Add(self.rbox2, 1, wx.CENTER, 0)
self.mbox.Add(self.rbox3, 1, wx.CENTER, 0)
self.mbox.Add((10,10))
self.mbox.Add((50,10))
self.mbox.Add(self.bbox, 1, wx.CENTER, 10)
self.mbox.Add((10,10))
self.SetSizer(self.mbox)
self.bindEvents()
# Process Equations, Handling and saving of output
def OnOK(self,event):
#print "qc: ", self.qc
#print "out:", self.output
if(self.qc==''):
self.OnFileInError()
else:
self.qcF = gdal.Open(self.qc)
self.bqc = self.qcF.GetRasterBand(1)
self.test = gdal.Open(self.qc)
self.CrAr( self.qc, self.output, 'GTiff' )
self.result = gdal.Open(self.output, GA_Update)
for self.y in range(self.bqc.YSize - 1, -1, -1):
print self.y
self.scanline1=self.bqc.ReadAsArray(0, self.y, self.bqc.XSize, 1, self.bqc.XSize, 1)
for self.x in range(0, self.bqc.XSize - 1, 1):
self.pix1 = self.scanline1[0][self.x]
self.scanline1[0][self.x]=self.qcbits(self.pix1,self.qc_type,int(self.pixelres),int(self.band_no))
self.result.GetRasterBand(1).WriteArray(N.reshape(self.scanline1,(1,self.bqc.XSize)), 0, self.y)
self.Destroy()
#def bin(self,i):
#"""
#Convert Binary to Integer Bit Field
#Manish Jethani (manish.j at gmx.net)
#http://bytes.com/forum/thread20381.html
#"""
#b = ''
#while i > 0:
#j = i & 1
#b = str(j) + b
#i >>= 1
#return b
def qcbits(self,qcbit,qcflag,pixres,bandno):
outclas = 0
#calculate modland QA bits extraction
if (qcflag=="modland_qa_bits"):
if (pixres==500):
# 500m product
outclas = self.qc500a(qcbit)
else:
# 250m product
outclas = self.qc250a(qcbit)
#calculate cloud state
elif (qcflag=="cloud"):
if (pixres==500):
# 500m product
# Signal user that the flag name is badly written
# therefore not understood by the application
print "flag name unavailable for 500m, please restart"
self.OnQCInError()
else:
# ONLY 250m product!
outclas = self.qc250b(qcbit)
#calculate modland QA bits extraction
elif (qcflag=="data_quality"):
if (pixres==500):
# 500m product
outclas = self.qc500c(qcbit, bandno)
else:
# 250m product
outclas = self.qc250c(qcbit, bandno)
#calculate atmospheric correction flag
elif (qcflag=="atcorr"):
if (pixres==500):
# 500m product
outclas = self.qc500d(qcbit)
else:
# 250m product
outclas = self.qc250d(qcbit)
#calculate adjacency correction flag
elif (qcflag=="adjcorr"):
if (pixres==500):
# 500m product
outclas = self.qc500e(qcbit)
else:
# 250m product
outclas = self.qc250e(qcbit)
#calculate different orbit from 500m flag
elif (qcflag=="diff_orbit_from_500m"):
if (pixres==500):
# 500m product
# Signal user that the flag name is badly written
# therefore not understood by the application
print "flag name unavailable for 500m, please restart"
self.OnQCInError()
else:
# ONLY 250m product!
outclas = self.qc250f(qcbit)
else:
# Signal user that the flag name is badly written
# therefore not understood by the application
print "Unknown flag name, please check spelling"
self.OnQCInError()
return outclas
def qc250a(self, pixel):
"""
# MODLAND QA Bits 250m Unsigned Int bits[0-1]
#00 -> class 0: Corrected product produced at ideal quality -- all bands
#01 -> class 1: Corrected product produced at less than idel quality -- some or all bands
#10 -> class 2: Corrected product NOT produced due to cloud effect -- all bands
#11 -> class 3: Corrected product NOT produced due to other reasons -- some or all bands maybe fill value (Note that a value of [11] overrides a value of [01])
"""
pixel = pixel & 3
return pixel
def qc250b(self, pixel):
"""
# Cloud State 250m Unsigned Int bits[2-3]
#00 -> class 0: Clear -- No clouds
#01 -> class 1: Cloudy
#10 -> class 2: Mixed
#11 -> class 3: Not Set ; Assumed Clear
"""
pixel >> 2
pixel = pixel & 3
return pixel
def qc250c(self,pixel,bandno):
"""
# Band-wise Data Quality 250m Unsigned Int bits[0-1]
#0000 -> class 0: highest quality
#0111 -> class 1: noisy detector
#1000 -> class 2: dead detector; data interpolated in L1B
#1001 -> class 3: solar zenith >= 86 degrees
#1010 -> class 4: solar zenith >= 85 and < 86 degrees
#1011 -> class 5: missing input
#1100 -> class 6: internal constant used in place of climatological data for at least one atmospheric constant
#1101 -> class 7: correction out of bounds, pixel constrained to extreme allowable value
#1110 -> class 8: L1B data faulty
#1111 -> class 9: not processed due to deep ocean or cloud
#Class 10-15: Combination of bits unused
"""
pixel >> 4 + (4*(bandno-1))
pixel = pixel & 15
return pixel
def qc250d(self, pixel):
"""
# Atmospheric correction 250m Unsigned Int bit[12]
#0 -> class 0: Not Corrected product
#1 -> class 1: Corrected product
"""
pixel >> 12
pixel = pixel & 1
return pixel
def qc250e(self,pixel):
"""
# Adjacency correction 250m Unsigned Int bit[13]
#0 -> class 0: Not Corrected product
#1 -> class 1: Corrected product
"""
pixel >> 13
pixel = pixel & 1
return pixel
def qc250f(self,pixel):
"""
# Different orbit from 500m product, 250m Unsigned Int bit[14]
#0 -> class 0: same orbit as 500m
#1 -> class 1: different orbit from 500m
"""
pixel >> 14
pixel = pixel & 1
return pixel
def qc500a(self,pixel):
"""
# MODLAND QA Bits 500m long int bits[0-1]
#00 -> class 0: Corrected product produced at ideal quality -- all bands
#01 -> class 1: Corrected product produced at less than idel quality -- some or all bands
#10 -> class 2: Corrected product NOT produced due to cloud effect -- all bands
#11 -> class 3: Corrected product NOT produced due to other reasons -- some or all bands mayb be fill value (Note that a value of [11] overrides a value of [01])
"""
pixel = pixel & 3
return pixel
def qc500c(self,pixel,bandno):
"""
# Band-wise Data Quality 500m long Int
#bits[2-5][6-9][10-13][14-17][18-21][22-25][26-29]
#0000 -> class 0: highest quality
#0111 -> class 1: noisy detector
#1000 -> class 2: dead detector; data interpolated in L1B
#1001 -> class 3: solar zenith >= 86 degrees
#1010 -> class 4: solar zenith >= 85 and < 86 degrees
#1011 -> class 5: missing input
#1100 -> class 6: internal constant used in place of climatological data for at least one atmospheric constant
#1101 -> class 7: correction out of bounds, pixel constrained to extreme allowable value
#1110 -> class 8: L1B data faulty
#1111 -> class 9: not processed due to deep ocean or cloud
#Class 10-15: Combination of bits unused
"""
pixel >> 2 + (4*(bandno-1))
pixel = pixel & 15
return pixel
def qc500d(self,pixel):
"""
# Atmospheric correction 500m long Int bit[30]
#0 -> class 0: Not Corrected product
#1 -> class 1: Corrected product
"""
pixel >> 30
pixel = pixel & 1
return pixel
def qc500e(self,pixel):
"""
# Adjacency correction 500m long Int bit[31]
#0 -> class 0: Not Corrected product
#1 -> class 1: Corrected product
"""
pixel >> 31
pixel = pixel & 1
return pixel
def CrAr(self, src_flnm, dst_flnm, format ):
"""
CrAr(): Create Array with Georeferencing from another file (src_flnm), save it in file (dst_flnm) with format (format)
CrAr( self, src_flnm, dst_flnm, format )
"""
cr_opts=[]
# Read information from source file.
src_ds = gdal.Open(str(src_flnm))
gt = src_ds.GetGeoTransform()
pj = src_ds.GetProjection()
src_ds = None
# Standard checking on the GDAL driver
Driver = gdal.GetDriverByName( str(format) )
if Driver is None:
raise ValueError, "CrAr: No DriverFound "+format
DriverMTD = Driver.GetMetadata()
if not DriverMTD.has_key('DCAP_CREATE'):
print 'Format Driver %s does not support creation and piecewise writing.\nPlease select a format that does, such as GTiff or HFA (Erdas/Imagine).' % format
sys.exit( 1 )
# Set up the band number
nbands = 1
#print "nbands =", nbands
# Collect information on source files
flinfos = self.names_to_fileinfos( str(src_flnm) )
ulx = flinfos[0].ulx
uly = flinfos[0].uly
lrx = flinfos[0].lrx
lry = flinfos[0].lry
# get largest extends
for fi in flinfos:
ulx = min(ulx, fi.ulx)
uly = max(uly, fi.uly)
lrx = max(lrx, fi.lrx)
lry = min(lry, fi.lry)
# Set other info
psize_x = flinfos[0].geotransform[1]
psize_y = flinfos[0].geotransform[5]
band_type = flinfos[0].band_type
# Try opening as an existing file
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
out_fh = gdal.Open( str(dst_flnm), gdal.GA_Update )
gdal.PopErrorHandler()
# Otherwise create a new file
if out_fh is None:
geot = [ulx, psize_x, 0, uly, 0, psize_y]
print geot[0], geot[1], geot[2], geot[3], geot[4]
xsize = int((lrx-ulx)/geot[1]+0.5)
ysize = int((lry-uly)/geot[5]+0.5)
out_fh=Driver.Create(str(dst_flnm),xsize,ysize,nbands,band_type,cr_opts)
if out_fh is None:
raise ValueError, "CrAr: Failed to create new file "+dst_flnm
sys.exit( 1 )
out_fh.SetGeoTransform( gt )
out_fh.SetProjection( pj )
#out_fh.GetRasterBand(1).SetRasterColorTable(flinfos[0].ct)
nodata = None
iband = 1
for fi in flinfos:
fi.copy_into( out_fh, 1, iband, nodata )
iband=iband+1
iband = 0
def names_to_fileinfos( self, name ):
file_infos = []
fi = file_info()
if fi.init_from_name( name ) == 1:
file_infos.append( fi )
return file_infos
def OnFileInError(self):
dlg = wx.MessageDialog(self,
'Minimum files to add:\n\n Input files => NDVI and Modis Band7\n One Output file',
'Error',wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def OnQCInError(self):
dlg = wx.MessageDialog(self,
'QC type error\n\n Please check your input',
'Error',wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
# Path+filename seek and set
def make_fb(self):
# get current working directory
self.dirnm = os.getcwd()
self.cc2 = filebrowse.FileBrowseButton(
self, -1, size=(50, -1), labelText='QC File:',
startDirectory = self.dirnm,
fileMode=wx.OPEN,
changeCallback = self.fbbCallback2,
)
self.cc6 = filebrowse.FileBrowseButton(
self, -1, size=(50, -1), labelText='OUT File: ',
startDirectory = self.dirnm,
fileMask='*.tif',
fileMode=wx.SAVE,
changeCallback = self.fbbCallback6
)
# Collect path+filenames
def fbbCallback2(self, evt):
self.qc = str(evt.GetString())
def fbbCallback6(self, evt):
self.output = str(evt.GetString())
# Front text
def make_text(self):
self.text = wx.StaticText(self, -1, "This is processing MODIS Quality Assessment Bits through the use of gdal and numeric.")
# QC type radio buttons
def make_radiobuttons1(self):
self.rbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.rb1 = wx.RadioBox(self, -1, "Select MODIS Type",
wx.DefaultPosition, wx.DefaultSize,
self.NameMOD, 2, wx.RA_SPECIFY_COLS)
self.rb1.SetToolTip(wx.ToolTip("Select MODIS type"))
self.rb1.SetLabel("MODIS Type")
self.rbox1.Add(self.rb1,1,wx.ALL,10)
def EvtRadioBox1(self, evt):
self.nb = evt.GetInt()
self.pixelres = NameMOD[self.nb]
#print self.pixelres
def make_radiobuttons2(self):
self.rbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.rb2 = wx.RadioBox(self, -1, "Select Band number (data quality only)",
wx.DefaultPosition, wx.DefaultSize,
self.bandno, 7, wx.RA_SPECIFY_COLS)
self.rb2.SetToolTip(wx.ToolTip("Select Band number (for data_quality)"))
self.rb2.SetLabel("Band Number (for \"data quality\" only)")
self.rbox2.Add(self.rb2,1,wx.ALL,10)
def EvtRadioBox2(self, evt):
self.nb = evt.GetInt()
self.band_no = self.bandno[self.nb]
#print self.band_no
def make_radiobuttons3(self):
self.rbox3 = wx.BoxSizer(wx.HORIZONTAL)
self.rb3 = wx.RadioBox(self, -1, "Select QC Type",
wx.DefaultPosition, wx.DefaultSize,
self.NameQC, 2, wx.RA_SPECIFY_COLS)
self.rb3.SetToolTip(wx.ToolTip("Select QC type"))
self.rb3.SetLabel("QC Type")
self.rbox3.Add(self.rb3,1,wx.ALL,10)
def EvtRadioBox3(self, evt):
self.nb = evt.GetInt()
self.qc_type = NameQC[self.nb]
#print self.qc_type
# Bottom buttons
def make_buttons(self):
self.bbox = wx.BoxSizer(wx.HORIZONTAL)
# OnOK
bmp0 = images.getPngDialogOKBitmap()
self.b0 = wx.BitmapButton(self, 20, bmp0, (20, 20),
(bmp0.GetWidth()+50, bmp0.GetHeight()+10), style=wx.NO_BORDER)
self.b0.SetToolTipString("Process")
self.bbox.Add(self.b0,1,wx.CENTER,10)
# OnCancel
bmp1 = images.getPngDialogCancelBitmap()
self.b1 = wx.BitmapButton(self, 30, bmp1, (20, 20),
(bmp1.GetWidth()+50, bmp1.GetHeight()+10), style=wx.NO_BORDER)
self.b1.SetToolTipString("Abort")
self.bbox.Add(self.b1,1,wx.CENTER,10)
# OnInfo
bmp2 = images.getPngHelpAboutBitmap()
self.b2 = wx.BitmapButton(self, 40, bmp2, (20, 20),
(bmp2.GetWidth()+50, bmp2.GetHeight()+10), style=wx.NO_BORDER)
self.b2.SetToolTipString("Help/Info.")
self.bbox.Add(self.b2,1,wx.CENTER,10)
def bindEvents(self):
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
self.Bind(wx.EVT_BUTTON, self.OnOK, self.b0)
self.Bind(wx.EVT_BUTTON, self.OnCancel, self.b1)
self.Bind(wx.EVT_BUTTON, self.OnInfo, self.b2)
self.Bind(wx.EVT_RADIOBOX, self.EvtRadioBox1, self.rb1)
self.Bind(wx.EVT_RADIOBOX, self.EvtRadioBox2, self.rb2)
self.Bind(wx.EVT_RADIOBOX, self.EvtRadioBox3, self.rb3)
def OnCloseWindow(self, event):
self.Destroy()
def OnCancel(self, event):
self.Destroy()
def OnInfo(self,event):
dlg = wx.MessageDialog(self, overview,
'Help', wx.OK | wx.ICON_INFORMATION
)
dlg.ShowModal()
dlg.Destroy()
class file_info:
"""A class holding information about a GDAL file."""
def init_from_name(self, filename):
"""
Initialize file_info from filename
filename -- Name of file to read.
Returns 1 on success or 0 if the file can't be opened.
"""
fh = gdal.Open( str(filename) )
if fh is None:
return 0
self.filename = filename
self.bands = fh.RasterCount
self.xsize = fh.RasterXSize
self.ysize = fh.RasterYSize
self.band_type = fh.GetRasterBand(1).DataType
self.projection = fh.GetProjection()
self.geotransform = fh.GetGeoTransform()
self.ulx = self.geotransform[0]
self.uly = self.geotransform[3]
self.lrx = self.ulx + self.geotransform[1] * self.xsize
self.lry = self.uly + self.geotransform[5] * self.ysize
ct = fh.GetRasterBand(1).GetRasterColorTable()
if ct is not None:
self.ct = ct.Clone()
else:
self.ct = None
return 1
def copy_into( self, t_fh, s_band = 1, t_band = 1, nodata_arg=None ):
"""
Copy this files image into target file.
"""
t_geotransform = t_fh.GetGeoTransform()
t_ulx = t_geotransform[0]
t_uly = t_geotransform[3]
t_lrx = t_geotransform[0] + t_fh.RasterXSize * t_geotransform[1]
t_lry = t_geotransform[3] + t_fh.RasterYSize * t_geotransform[5]
# figure out intersection region
tgw_ulx = max(t_ulx,self.ulx)
tgw_lrx = min(t_lrx,self.lrx)
if t_geotransform[5] < 0:
tgw_uly = min(t_uly,self.uly)
tgw_lry = max(t_lry,self.lry)
else:
tgw_uly = max(t_uly,self.uly)
tgw_lry = min(t_lry,self.lry)
# do they even intersect?
if tgw_ulx >= tgw_lrx:
return 1
if t_geotransform[5] < 0 and tgw_uly <= tgw_lry:
return 1
if t_geotransform[5] > 0 and tgw_uly >= tgw_lry:
return 1
# compute target window in pixel coordinates.
tw_xoff = int((tgw_ulx - t_geotransform[0]) / t_geotransform[1] + 0.1)
tw_yoff = int((tgw_uly - t_geotransform[3]) / t_geotransform[5] + 0.1)
tw_xsize = int((tgw_lrx-t_geotransform[0])/t_geotransform[1] + 0.5) - tw_xoff
tw_ysize = int((tgw_lry-t_geotransform[3])/t_geotransform[5] + 0.5) - tw_yoff
if tw_xsize < 1 or tw_ysize < 1:
return 1
# Compute source window in pixel coordinates.
sw_xoff = int((tgw_ulx - self.geotransform[0]) / self.geotransform[1])
sw_yoff = int((tgw_uly - self.geotransform[3]) / self.geotransform[5])
sw_xsize = int((tgw_lrx - self.geotransform[0]) / self.geotransform[1] + 0.5) - sw_xoff
sw_ysize = int((tgw_lry - self.geotransform[3]) / self.geotransform[5] + 0.5) - sw_yoff
if sw_xsize < 1 or sw_ysize < 1:
return 1
# Open the source file, and copy the selected region.
s_fh = gdal.Open( str(self.filename) )
return self.raster_copy( s_fh, sw_xoff, sw_yoff, sw_xsize, sw_ysize, s_band, t_fh, tw_xoff, tw_yoff, tw_xsize, tw_ysize, t_band, nodata_arg )
def raster_copy( self, s_fh, s_xoff, s_yoff, s_xsize, s_ysize, s_band_n, t_fh, t_xoff, t_yoff, t_xsize, t_ysize, t_band_n, nodata=None ):
if nodata is not None:
return self.raster_copy_with_nodata(
s_fh, s_xoff, s_yoff, s_xsize, s_ysize, s_band_n,
t_fh, t_xoff, t_yoff, t_xsize, t_ysize, t_band_n,
nodata )
s_band = s_fh.GetRasterBand( s_band_n )
t_band = t_fh.GetRasterBand( t_band_n )
data = s_band.ReadRaster( s_xoff, s_yoff, s_xsize, s_ysize, t_xsize, t_ysize, t_band.DataType )
t_band.WriteRaster( t_xoff, t_yoff, t_xsize, t_ysize, data, t_xsize, t_ysize, t_band.DataType )
return 0
def raster_copy_with_nodata( self, s_fh, s_xoff, s_yoff, s_xsize, s_ysize, s_band_n,t_fh, t_xoff, t_yoff, t_xsize, t_ysize, t_band_n, nodata ):
import Numeric as Num
s_band = s_fh.GetRasterBand( s_band_n )
t_band = t_fh.GetRasterBand( t_band_n )
data_src = s_band.ReadAsArray( s_xoff, s_yoff, s_xsize, s_ysize, t_xsize, t_ysize )
data_dst = t_band.ReadAsArray( t_xoff, t_yoff, t_xsize, t_ysize )
nodata_test = Num.equal(data_src,nodata)
to_write = Num.choose(nodata_test, (data_src, data_dst))
t_band.WriteArray( to_write, t_xoff, t_yoff )
return 0
class MainApp(wx.App):
def OnInit(self):
frame = MainFrame(None)
frame.Show(True)
self.SetTopWindow(frame)
return True
if __name__ == '__main__':
app = wx.App()
frame = MyFrame(None)
frame.Show()
app.MainLoop()
| YannChemin/wxGIPE | proc_modis_qc.py | Python | unlicense | 23,032 |
def estriangulo(a,b,c):
print("el primer argumento es:",a)
print("el segundo argumento es:",b)
print("el tercer argumento es:",c)
return a+b>c and a+c>b and c+b>a
def espitagorico(a,b,c):
return a**2+b**2==c**2 or a**2+c**2==b**2 or b**2+c**2==a**2
def esisosceles(a,b,c):
return a==b or a==c or b==c
print(estriangulo(int(input("numero? ")),4,5))
print(espitagorico(3,4,5))
print(esisosceles(3,4,5))
| jabaier/iic1103.20152.s4 | estriangulo_prueba.py | Python | unlicense | 428 |
import sys
import collections as c
from scipy import special, stats
import numpy as n, pylab as p, networkx as x
class NetworkDrawer:
drawer_count=0
def __init__(self,metric="strength"):
self.drawer_count+=1
metric_=self.standardizeName(metric)
self.metric_=metric_
self.draw_count=0
def standardizeName(self,name):
if name in (["s","strength","st"]+["f","força","forca","fo"]):
name_="s"
if name in (["d","degree","dg"]+["g","grau","gr"]):
name_="d"
return name_
def makeLayout(self,network_measures,network_partitioning=None):
"""Delivers a sequence of user_ids and (x,y) pos.
"""
self.network_measures=network_measures
if self.metric_=="s":
measures_=network_measures.strengths
elif self.metric_=="d":
measures_=network_measures.degrees
else:
print("not known metric to make layout")
self.ordered_measures=ordered_measures = c.OrderedDict(sorted(measures_.items(), key=lambda x: x[1]))
self.measures=measures=list(ordered_measures.values())
self.authors=authors= list(ordered_measures.keys())
total=network_measures.N
if not network_partitioning:
self.k1=k1=round(total*.80)
self.k2=k2=round(total*.95)
self.periphery=authors[:k1]
self.intermediary=authors[k1:k2]
self.hubs=authors[k2:]
else:
sectors=network_partitioning.sectorialized_agents__
self.k1=k1=len(sectors[0])
self.k2=k2=k1+len(sectors[1])
self.periphery,self.intermediary,self.hubs=sectors
print("fractions ={:0.4f}, {:0.4f}, {:0.4f}".format(k1/total, (k2-k1)/total, 1-k2/total))
self.makeXY()
def drawNetwork(self, network,network_measures,filename="example.png",label="auto",network_partitioning=None):
p.clf()
if self.metric_=="s":
measures_=network_measures.strengths
elif self.metric_=="d":
measures_=network_measures.degree
else:
print("not known metric to make layout")
ordered_measures = c.OrderedDict(sorted(measures_.items(), key=lambda x: x[1]))
measures=list(ordered_measures.values())
authors= list(ordered_measures.keys())
total=network_measures.N
if not network_partitioning:
k1=k1=round(total*.80)
k2=k2=round(total*.95)
periphery=authors[:k1]
intermediary=authors[k1:k2]
hubs=authors[k2:]
else:
sectors=network_partitioning.sectorialized_agents__
k1=k1=len(sectors[0])
k2=k2=k1+len(sectors[1])
periphery,intermediary,hubs=(set(iii) for iii in sectors)
in_measures=network_measures.in_strengths
min_in=max(in_measures.values())/3+0.1
out_measures=network_measures.out_strengths
min_out=max(out_measures.values())/3+.1
self.clustering=clustering=network_measures.weighted_clusterings
A=x.drawing.nx_agraph.to_agraph(network.g)
A.node_attr['style']='filled'
A.graph_attr["bgcolor"]="black"
A.graph_attr["pad"]=.1
#A.graph_attr["size"]="9.5,12"
A.graph_attr["fontsize"]="25"
if label=="auto":
label=self.makeLabel()
A.graph_attr["label"]=label
A.graph_attr["fontcolor"]="white"
cm=p.cm.Reds(range(2**10)) # color table
self.cm=cm
nodes=A.nodes()
self.colors=colors=[]
self.inds=inds=[]
self.poss=poss=[]
for node in nodes:
n_=A.get_node(node)
ind_author=self.authors.index(n_)
inds.append(inds)
colors.append( '#%02x%02x%02x' % tuple([int(255*i) for i in cm[int(clustering[n_]*255)][:-1]]))
#n_.attr['fillcolor']= '#%02x%02x%02x' % tuple([255*i for i in cm[int(clustering[n_]*255)][:-1]])
n_.attr['fillcolor']= colors[-1]
n_.attr['fixedsize']=True
n_.attr['width']= abs(.6*(in_measures[n_]/min_in+ .05))
n_.attr['height']= abs(.6*(out_measures[n_]/min_out+.05))
if n_ in hubs:
n_.attr["shape"] = "hexagon"
elif n_ in intermediary:
pass
else:
n_.attr["shape"] = "diamond"
pos="%f,%f"%tuple(self.posXY[ind_author])
poss.append(pos)
n_.attr["pos"]=pos
n_.attr["pin"]=True
n_.attr["fontsize"]=25
n_.attr["fontcolor"]="white"
n_.attr["label"]=""
weights=[s[2]["weight"] for s in network_measures.edges]
self.weights=weights
max_weight=max(weights)
self.max_weight=max_weight
self.weights_=[]
edges=A.edges()
for e in edges:
factor=float(e.attr['weight'])
self.weights_.append(factor)
e.attr['penwidth']=.34*factor
e.attr["arrowsize"]=1.5
e.attr["arrowhead"]="lteeoldiamond"
w=factor/max_weight # factor em [0-1]
cor=p.cm.Spectral(int(w*255))
self.cor=cor
cor256=255*n.array(cor[:-1])
r0=int(cor256[0]/16)
r1=int(cor256[0]-r0*16)
r=hex(r0)[-1]+hex(r1)[-1]
g0=int(cor256[1]/16)
g1=int(cor256[1]-g0*16)
g=hex(g0)[-1]+hex(g1)[-1]
b0=int(cor256[2]/16)
b1=int(cor256[2]-b0*16)
b=hex(b0)[-1]+hex(b1)[-1]
#corRGB="#"+r+g+b+":#"+r+g+b
corRGB="#"+r+g+b
e.attr["color"]=corRGB
A.draw(filename, prog="neato") # twopi ou circo
################
self.A=A
self.draw_count+=1
def makeLabel(self):
label=""
if "window_size" in dir(self):
label+="w: {}, ".format(self.window_size)
#m: %i, N = %i, E = %i"%(self.draw_count*self.step_size,self.network_measures.N,self.network_measures.E)
if "step_size" in dir(self):
label+="m: {} ,".format(self.draw_count*self.step_size+self.offset)
else:
label+="m: %i, ".format(self.draw_count)
#self.network_measures.N,self.network_measures.E)
label+="N = %i, E = %i"%(self.network_measures.N,self.network_measures.E)
return label
def updateNetwork(self,network,networkMeasures=None):
pass
def makeXY(self):
size_periphery=self.k1
size_intermediary=self.k2-self.k1
size_hubs=self.network_measures.N-self.k2
if size_hubs%2==1:
size_hubs+=1
size_intermediary-=1
xh=n.linspace(0,0.5,size_hubs,endpoint=False)[::-1]
thetah=2*n.pi*xh
yh=n.sin(thetah)
xi=n.linspace(1,0.5, size_intermediary, endpoint=True)
thetai=2*n.pi*xi
yi=n.sin(thetai)
xp=n.linspace(.95,0.4, size_periphery)[::-1]
yp=n.linspace(.1,1.25, size_periphery)[::-1]
self.pos=((xp,yp),(xi,yi),(xh,yh))
XFACT=7
YFACT=3
self.posX=posX=n.hstack((xp,xi,xh))*XFACT
self.posY=posY=n.hstack((yp,yi,yh))*YFACT
self.posXY=n.vstack((posX.T,posY.T)).T
| ttm/gmaneLegacy | gmaneLegacy/networkDrawer.py | Python | unlicense | 7,293 |
#!/usr/bin/env python
"""Test the collector flows."""
import os
from grr.client import vfs
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import artifact
from grr.lib import artifact_lib
from grr.lib import artifact_test
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.flows.general import collectors
from grr.lib.flows.general import transfer
from grr.test_data import client_fixture
# pylint: mode=test
class CollectorTest(artifact_test.ArtifactTest):
pass
class TestArtifactCollectors(CollectorTest):
"""Test the artifact collection mechanism with fake artifacts."""
def setUp(self):
"""Make sure things are initialized."""
super(TestArtifactCollectors, self).setUp()
self.original_artifact_reg = artifact_lib.ArtifactRegistry.artifacts
artifact_lib.ArtifactRegistry.ClearRegistry()
self.LoadTestArtifacts()
artifact_reg = artifact_lib.ArtifactRegistry.artifacts
self.fakeartifact = artifact_reg["FakeArtifact"]
self.fakeartifact2 = artifact_reg["FakeArtifact2"]
self.output_count = 0
with aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") as fd:
fd.Set(fd.Schema.SYSTEM("Linux"))
kb = fd.Schema.KNOWLEDGE_BASE()
artifact.SetCoreGRRKnowledgeBaseValues(kb, fd)
fd.Set(kb)
def tearDown(self):
super(TestArtifactCollectors, self).tearDown()
artifact_lib.ArtifactRegistry.artifacts = self.original_artifact_reg
self.fakeartifact.collectors = [] # Reset any Collectors
self.fakeartifact.conditions = [] # Reset any Conditions
self.fakeartifact2.collectors = [] # Reset any Collectors
self.fakeartifact2.conditions = [] # Reset any Conditions
def testInterpolateArgs(self):
collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)
collect_flow.state.Register("knowledge_base", rdfvalue.KnowledgeBase())
collect_flow.current_artifact_name = "blah"
collect_flow.state.knowledge_base.MergeOrAddUser(
rdfvalue.KnowledgeBaseUser(username="test1"))
collect_flow.state.knowledge_base.MergeOrAddUser(
rdfvalue.KnowledgeBaseUser(username="test2"))
test_rdf = rdfvalue.KnowledgeBase()
action_args = {"usernames": ["%%users.username%%", "%%users.username%%"],
"nointerp": "asdfsdf", "notastring": test_rdf}
kwargs = collect_flow.InterpolateDict(action_args)
self.assertItemsEqual(kwargs["usernames"],
["test1", "test2", "test1", "test2"])
self.assertEqual(kwargs["nointerp"], "asdfsdf")
self.assertEqual(kwargs["notastring"], test_rdf)
# We should be using an array since users.username will expand to multiple
# values.
self.assertRaises(ValueError, collect_flow.InterpolateDict,
{"bad": "%%users.username%%"})
list_args = collect_flow.InterpolateList(["%%users.username%%",
"%%users.username%%aa"])
self.assertItemsEqual(list_args, ["test1", "test2", "test1aa", "test2aa"])
list_args = collect_flow.InterpolateList(["one"])
self.assertEqual(list_args, ["one"])
def testGrepRegexCombination(self):
collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)
self.assertEqual(collect_flow._CombineRegex([r"simple"]),
"simple")
self.assertEqual(collect_flow._CombineRegex(["a", "b"]),
"(a)|(b)")
self.assertEqual(collect_flow._CombineRegex(["a", "b", "c"]),
"(a)|(b)|(c)")
self.assertEqual(collect_flow._CombineRegex(["a|b", "[^_]b", "c|d"]),
"(a|b)|([^_]b)|(c|d)")
def testGrep(self):
class MockCallFlow(object):
def CallFlow(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
mock_call_flow = MockCallFlow()
with utils.Stubber(collectors.ArtifactCollectorFlow, "CallFlow",
mock_call_flow.CallFlow):
collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)
collect_flow.state.Register("knowledge_base", rdfvalue.KnowledgeBase())
collect_flow.current_artifact_name = "blah"
collect_flow.state.knowledge_base.MergeOrAddUser(
rdfvalue.KnowledgeBaseUser(username="test1"))
collect_flow.state.knowledge_base.MergeOrAddUser(
rdfvalue.KnowledgeBaseUser(username="test2"))
collector = rdfvalue.Collector(
collector_type=rdfvalue.Collector.CollectorType.GREP,
args={"path_list": ["/etc/passwd"],
"content_regex_list": [r"^a%%users.username%%b$"]})
collect_flow.Grep(collector, rdfvalue.PathSpec.PathType.TSK)
conditions = mock_call_flow.kwargs["conditions"]
self.assertEqual(len(conditions), 1)
regexes = conditions[0].contents_regex_match.regex.SerializeToString()
self.assertItemsEqual(regexes.split("|"), ["(^atest1b$)", "(^atest2b$)"])
self.assertEqual(mock_call_flow.kwargs["paths"], ["/etc/passwd"])
def testGetArtifact1(self):
"""Test we can get a basic artifact."""
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"FingerprintFile", "HashBuffer")
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
# Dynamically add a Collector specifying the base path.
file_path = os.path.join(self.base_path, "test_img.dd")
coll1 = rdfvalue.Collector(
collector_type=rdfvalue.Collector.CollectorType.FILE,
args={"path_list": [file_path]})
self.fakeartifact.collectors.append(coll1)
artifact_list = ["FakeArtifact"]
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list, use_tsk=False,
token=self.token, client_id=self.client_id
):
pass
# Test the AFF4 file that was created.
fd1 = aff4.FACTORY.Open("%s/fs/os/%s" % (self.client_id, file_path),
token=self.token)
fd2 = open(file_path)
fd2.seek(0, 2)
self.assertEqual(fd2.tell(), int(fd1.Get(fd1.Schema.SIZE)))
def testRunGrrClientActionArtifact(self):
"""Test we can get a GRR client artifact."""
client_mock = action_mocks.ActionMock("ListProcesses")
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
coll1 = rdfvalue.Collector(
collector_type=rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION,
args={"client_action": r"ListProcesses"})
self.fakeartifact.collectors.append(coll1)
artifact_list = ["FakeArtifact"]
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list,
token=self.token, client_id=self.client_id,
output="test_artifact"
):
pass
# Test the AFF4 file that was created.
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(self.client_id).Add("test_artifact"),
token=self.token)
self.assertTrue(isinstance(list(fd)[0], rdfvalue.Process))
self.assertTrue(len(fd) > 5)
def testRunGrrClientActionArtifactSplit(self):
"""Test that artifacts get split into separate collections."""
client_mock = action_mocks.ActionMock("ListProcesses", "StatFile")
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
coll1 = rdfvalue.Collector(
collector_type=rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION,
args={"client_action": r"ListProcesses"})
self.fakeartifact.collectors.append(coll1)
self.fakeartifact2.collectors.append(coll1)
artifact_list = ["FakeArtifact", "FakeArtifact2"]
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list,
token=self.token, client_id=self.client_id,
output="test_artifact",
split_output_by_artifact=True):
pass
# Check that we got two separate collections based on artifact name
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(
self.client_id).Add("test_artifact_FakeArtifact"),
token=self.token)
self.assertTrue(isinstance(list(fd)[0], rdfvalue.Process))
self.assertTrue(len(fd) > 5)
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(
self.client_id).Add("test_artifact_FakeArtifact2"),
token=self.token)
self.assertTrue(len(fd) > 5)
self.assertTrue(isinstance(list(fd)[0], rdfvalue.Process))
def testConditions(self):
"""Test we can get a GRR client artifact with conditions."""
# Run with false condition.
client_mock = action_mocks.ActionMock("ListProcesses")
coll1 = rdfvalue.Collector(
collector_type=rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION,
args={"client_action": "ListProcesses"},
conditions=["os == 'Windows'"])
self.fakeartifact.collectors.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "AFF4Volume")
# Now run with matching or condition.
coll1.conditions = ["os == 'Linux' or os == 'Windows'"]
self.fakeartifact.collectors = []
self.fakeartifact.collectors.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "RDFValueCollection")
# Now run with impossible or condition.
coll1.conditions.append("os == 'NotTrue'")
self.fakeartifact.collectors = []
self.fakeartifact.collectors.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "AFF4Volume")
def testSupportedOS(self):
"""Test supported_os inside the collector object."""
# Run with false condition.
client_mock = action_mocks.ActionMock("ListProcesses")
coll1 = rdfvalue.Collector(
collector_type=rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION,
args={"client_action": "ListProcesses"}, supported_os=["Windows"])
self.fakeartifact.collectors.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "AFF4Volume")
# Now run with matching or condition.
coll1.conditions = []
coll1.supported_os = ["Linux", "Windows"]
self.fakeartifact.collectors = []
self.fakeartifact.collectors.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "RDFValueCollection")
# Now run with impossible or condition.
coll1.conditions = ["os == 'Linux' or os == 'Windows'"]
coll1.supported_os = ["NotTrue"]
self.fakeartifact.collectors = []
self.fakeartifact.collectors.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "AFF4Volume")
def _RunClientActionArtifact(self, client_mock, artifact_list):
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
self.output_count += 1
output = "test_artifact_%d" % self.output_count
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list,
token=self.token, client_id=self.client_id,
output=output
):
pass
# Test the AFF4 file was not created, as flow should not have run due to
# conditions.
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(self.client_id).Add(output),
token=self.token)
return fd
class TestArtifactCollectorsInteractions(CollectorTest):
"""Test the collection of artifacts.
This class loads both real and test artifacts to test the interaction of badly
defined artifacts with real artifacts.
"""
def setUp(self):
"""Add test artifacts to existing registry."""
super(TestArtifactCollectorsInteractions, self).setUp()
self.original_artifact_reg = artifact_lib.ArtifactRegistry.artifacts
self.LoadTestArtifacts()
def tearDown(self):
super(TestArtifactCollectorsInteractions, self).tearDown()
artifact_lib.ArtifactRegistry.artifacts = self.original_artifact_reg
def testProcessCollectedArtifacts(self):
"""Test downloading files from artifacts."""
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeFullVFSHandler
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "FingerprintFile",
"ListDirectory")
# Get KB initialized
for _ in test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
client_id=self.client_id, token=self.token):
pass
artifact_list = ["WindowsPersistenceMechanismFiles"]
with test_lib.Instrument(
transfer.MultiGetFile, "Start") as getfile_instrument:
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list,
token=self.token,
client_id=self.client_id,
output="analysis/{p}/{u}-{t}",
split_output_by_artifact=True):
pass
# Check MultiGetFile got called for our runkey files
# TODO(user): RunKeys for S-1-5-20 are not found because users.sid only
# expands to users with profiles.
pathspecs = getfile_instrument.args[0][0].args.pathspecs
self.assertItemsEqual([x.path for x in pathspecs],
[u"C:\\Windows\\TEMP\\A.exe"])
artifact_list = ["BadPathspecArtifact"]
with test_lib.Instrument(
transfer.MultiGetFile, "Start") as getfile_instrument:
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list,
token=self.token,
client_id=self.client_id,
output="analysis/{p}/{u}-{t}",
split_output_by_artifact=True):
pass
self.assertFalse(getfile_instrument.args)
class TestArtifactCollectorsRealArtifacts(CollectorTest):
"""Test the collection of real artifacts."""
def _CheckDriveAndRoot(self):
client_mock = action_mocks.ActionMock("StatFile", "ListDirectory")
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=[
"SystemDriveEnvironmentVariable"],
token=self.token, client_id=self.client_id,
output="testsystemdrive"):
pass
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(
self.client_id).Add("testsystemdrive"), token=self.token)
self.assertEqual(len(fd), 1)
self.assertEqual(str(fd[0]), "C:")
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=["SystemRoot"],
token=self.token, client_id=self.client_id,
output="testsystemroot"):
pass
fd = aff4.FACTORY.Open(
rdfvalue.RDFURN(self.client_id).Add("testsystemroot"), token=self.token)
self.assertEqual(len(fd), 1)
# Filesystem gives WINDOWS, registry gives Windows
self.assertTrue(str(fd[0]) in [r"C:\Windows", r"C:\WINDOWS"])
def testSystemDriveArtifact(self):
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
class BrokenClientMock(action_mocks.ActionMock):
def StatFile(self, _):
raise IOError
def ListDirectory(self, _):
raise IOError
# No registry, broken filesystem, this should just raise.
with self.assertRaises(RuntimeError):
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow",
BrokenClientMock(), artifact_list=[
"SystemDriveEnvironmentVariable"],
token=self.token,
client_id=self.client_id,
output="testsystemdrive"):
pass
# No registry, so this should use the fallback flow
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.ClientVFSHandlerFixture
self._CheckDriveAndRoot()
# Registry is present, so this should use the regular artifact collection
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler
self._CheckDriveAndRoot()
def testRunWMIComputerSystemProductArtifact(self):
class WMIActionMock(action_mocks.ActionMock):
def WmiQuery(self, _):
return client_fixture.WMI_CMP_SYS_PRD
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
client_mock = WMIActionMock()
for _ in test_lib.TestFlowHelper(
"ArtifactCollectorFlow", client_mock,
artifact_list=["WMIComputerSystemProduct"], token=self.token,
client_id=self.client_id,
dependencies=rdfvalue.ArtifactCollectorFlowArgs.Dependency.IGNORE_DEPS,
store_results_in_aff4=True):
pass
client = aff4.FACTORY.Open(self.client_id, token=self.token,)
hardware = client.Get(client.Schema.HARDWARE_INFO)
self.assertTrue(isinstance(hardware, rdfvalue.HardwareInfo))
self.assertEqual(str(hardware.serial_number), "2RXYYZ1")
def testRunWMIArtifact(self):
class WMIActionMock(action_mocks.ActionMock):
def WmiQuery(self, _):
return client_fixture.WMI_SAMPLE
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
client_mock = WMIActionMock()
for _ in test_lib.TestFlowHelper(
"ArtifactCollectorFlow", client_mock, artifact_list=["WMILogicalDisks"],
token=self.token, client_id=self.client_id,
dependencies=rdfvalue.ArtifactCollectorFlowArgs.Dependency.IGNORE_DEPS,
store_results_in_aff4=True):
pass
# Test that we set the client VOLUMES attribute
client = aff4.FACTORY.Open(self.client_id, token=self.token)
volumes = client.Get(client.Schema.VOLUMES)
self.assertEqual(len(volumes), 2)
for result in volumes:
self.assertTrue(isinstance(result, rdfvalue.Volume))
self.assertTrue(result.windows.drive_letter in ["Z:", "C:"])
if result.windows.drive_letter == "C:":
self.assertAlmostEqual(result.FreeSpacePercent(), 76.142, delta=0.001)
self.assertEqual(result.Name(), "C:")
elif result.windows.drive_letter == "Z:":
self.assertEqual(result.Name(), "homefileshare$")
self.assertAlmostEqual(result.FreeSpacePercent(), 58.823, delta=0.001)
def testRetrieveDependencies(self):
"""Test getting an artifact without a KB using retrieve_depdendencies."""
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeFullVFSHandler
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "FingerprintFile",
"ListDirectory")
artifact_list = ["WinDirEnvironmentVariable"]
for _ in test_lib.TestFlowHelper(
"ArtifactCollectorFlow", client_mock, artifact_list=artifact_list,
token=self.token, client_id=self.client_id,
dependencies=rdfvalue.ArtifactCollectorFlowArgs.Dependency.FETCH_NOW,
output="testRetrieveDependencies"):
pass
output = aff4.FACTORY.Open(self.client_id.Add("testRetrieveDependencies"),
token=self.token)
self.assertEqual(len(output), 1)
self.assertEqual(output[0], r"C:\Windows")
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| defaultnamehere/grr | lib/flows/general/collectors_test.py | Python | apache-2.0 | 21,712 |
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
class AppServiceConfig(Config):
def read_config(self, config):
self.app_service_config_files = config.get("app_service_config_files", [])
def default_config(cls, **kwargs):
return """\
# A list of application service config file to use
app_service_config_files: []
"""
| iot-factory/synapse | synapse/config/appservice.py | Python | apache-2.0 | 926 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of RAFT."""
# pylint:skip-file
import tensorflow as tf
def create_update_Conv2d(c_in, c_out, k_size):
kernel_scale = 1.0 / 3.0
if isinstance(k_size, list) or isinstance(k_size, tuple):
bias_scale = c_out / (3.0 * c_in * k_size[0] * k_size[1])
else:
bias_scale = c_out / (3.0 * c_in * k_size * k_size)
return tf.keras.layers.Conv2D(
filters=c_out,
kernel_size=k_size,
kernel_initializer=tf.keras.initializers.VarianceScaling(
distribution='uniform', scale=kernel_scale, mode='fan_in'),
bias_initializer=tf.keras.initializers.VarianceScaling(
distribution='uniform', scale=bias_scale, mode='fan_in'))
class ConvGRU(tf.keras.layers.Layer):
def __init__(self, hidden_dim=128, input_dim=192 + 128, **kwargs):
super(ConvGRU, self).__init__(**kwargs)
self.convz = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=3)
self.convr = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=3)
self.convq = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=3)
def call(self, input_tensor):
h, x = input_tensor
hx = tf.concat([h, x], axis=3)
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
pad_hx = tf.pad(hx, paddings)
z = tf.math.sigmoid(self.convz(pad_hx))
r = tf.math.sigmoid(self.convr(pad_hx))
pad_q = tf.pad(tf.concat([r * h, x], axis=3), paddings)
q = tf.math.tanh(self.convq(pad_q))
h = (1 - z) * h + z * q
return h
class SepConvGRU(tf.keras.layers.Layer):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(1, 5))
self.convr1 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(1, 5))
self.convq1 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(1, 5))
self.convz2 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(5, 1))
self.convr2 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(5, 1))
self.convq2 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(5, 1))
def call(self, input_tensor):
h, x = input_tensor
# horizontal
hx = tf.concat([h, x], axis=3)
paddings = [[0, 0], [0, 0], [2, 2], [0, 0]]
pad_hx = tf.pad(hx, paddings)
z = tf.math.sigmoid(self.convz1(pad_hx))
r = tf.math.sigmoid(self.convr1(pad_hx))
pad_q = tf.pad(tf.concat([r * h, x], axis=3), paddings)
q = tf.math.tanh(self.convq1(pad_q))
h = (1 - z) * h + z * q
# vertical
hx = tf.concat([h, x], axis=3)
paddings = [[0, 0], [2, 2], [0, 0], [0, 0]]
pad_hx = tf.pad(hx, paddings)
z = tf.math.sigmoid(self.convz2(pad_hx))
r = tf.math.sigmoid(self.convr2(pad_hx))
pad_q = tf.pad(tf.concat([r * h, x], axis=3), paddings)
q = tf.math.tanh(self.convq2(pad_q))
h = (1 - z) * h + z * q
return h
class FlowHead(tf.keras.layers.Layer):
def __init__(self, hidden_dim=256, input_dim=128, **kwargs):
super(FlowHead, self).__init__(**kwargs)
self.conv1 = create_update_Conv2d(
c_in=input_dim, c_out=hidden_dim, k_size=3)
self.conv2 = create_update_Conv2d(c_in=hidden_dim, c_out=2, k_size=3)
def call(self, x):
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
conv = tf.nn.relu(self.conv1(tf.pad(x, paddings)))
return self.conv2(tf.pad(conv, paddings))
class BasicMotionEncoder(tf.keras.layers.Layer):
def __init__(self, args, **kwargs):
super(BasicMotionEncoder, self).__init__(**kwargs)
cor_planes = args.corr_levels * (2 * args.corr_radius + 1)**2
self.convc1 = create_update_Conv2d(c_in=cor_planes, c_out=256, k_size=1)
self.convc2 = create_update_Conv2d(c_in=256, c_out=192, k_size=3)
self.convf1 = create_update_Conv2d(c_in=2, c_out=128, k_size=7)
self.convf2 = create_update_Conv2d(c_in=128, c_out=64, k_size=3)
self.conv = create_update_Conv2d(c_in=64 + 192, c_out=128 - 2, k_size=3)
def call(self, input_tensor):
flow, corr = input_tensor
cor = tf.nn.relu(self.convc1(corr))
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
cor = tf.nn.relu(self.convc2(tf.pad(cor, paddings)))
paddings7 = [[0, 0], [3, 3], [3, 3], [0, 0]]
flo = tf.nn.relu(self.convf1(tf.pad(flow, paddings7)))
flo = tf.nn.relu(self.convf2(tf.pad(flo, paddings)))
cor_flo = tf.concat([cor, flo], axis=-1)
out = tf.nn.relu(self.conv(tf.pad(cor_flo, paddings)))
return tf.concat([out, flow], axis=-1)
class SmallMotionEncoder(tf.keras.layers.Layer):
def __init__(self, args, **kwargs):
super(SmallMotionEncoder, self).__init__(**kwargs)
cor_planes = args.corr_levels * (2 * args.corr_radius + 1)**2
self.convc1 = create_update_Conv2d(c_in=cor_planes, c_out=96, k_size=1)
self.convf1 = create_update_Conv2d(c_in=96, c_out=64, k_size=7)
self.convf2 = create_update_Conv2d(c_in=64, c_out=32, k_size=3)
self.conv = create_update_Conv2d(c_in=32, c_out=80, k_size=3)
def call(self, input_tensor):
flow, corr = input_tensor
cor = tf.nn.relu(self.convc1(corr))
paddings7 = [[0, 0], [3, 3], [3, 3], [0, 0]]
flo = tf.nn.relu(self.convf1(tf.pad(flow, paddings7)))
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
flo = tf.nn.relu(self.convf2(tf.pad(flo, paddings)))
cor_flo = tf.concat([cor, flo], axis=-1)
out = tf.nn.relu(self.conv(tf.pad(cor_flo, paddings)))
return tf.concat([out, flow], axis=-1)
class BasicUpdateBlock(tf.keras.layers.Layer):
def __init__(self, args, hidden_dim=128, **kwargs):
super(BasicUpdateBlock, self).__init__(**kwargs)
self.args = args
self.encoder = BasicMotionEncoder(args)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128 + hidden_dim)
self.flow_head = FlowHead(hidden_dim=256, input_dim=hidden_dim)
if args.convex_upsampling:
self.mask = tf.keras.Sequential(
[create_update_Conv2d(c_in=128, c_out=256, k_size=3),
tf.keras.layers.ReLU(),
create_update_Conv2d(c_in=256, c_out=64 * 9, k_size=1)
])
def call(self, input_tensor, training):
net, inp, corr, flow = input_tensor
motion_features = self.encoder([flow, corr])
inp = tf.concat([inp, motion_features], axis=-1)
net = self.gru([net, inp])
delta_flow = self.flow_head(net)
if self.args.convex_upsampling:
# Scale mask to balance gradients.
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
pad_net = tf.pad(net, paddings)
mask = .25 * self.mask(pad_net)
else:
mask = None
return net, mask, delta_flow
class SmallUpdateBlock(tf.keras.layers.Layer):
def __init__(self, args, hidden_dim=96, **kwargs):
super(SmallUpdateBlock, self).__init__(**kwargs)
self.encoder = SmallMotionEncoder(args)
self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82 + 64)
self.flow_head = FlowHead(hidden_dim=128, input_dim=hidden_dim)
def call(self, input_tensor, training):
net, inp, corr, flow = input_tensor
motion_features = self.encoder([flow, corr])
inp = tf.concat([inp, motion_features], axis=-1)
net = self.gru([net, inp])
delta_flow = self.flow_head(net)
return net, None, delta_flow
| google-research/google-research | smurf/smurf_models/raft_update.py | Python | apache-2.0 | 8,034 |
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import sys
import time
import unittest
from fabric.api import local
import nose
from lib.noseplugin import OptionParser, parser_option
from lib import base
from lib.base import (
BGP_FSM_IDLE,
BGP_FSM_ACTIVE,
BGP_FSM_ESTABLISHED,
BGP_ATTR_TYPE_MULTI_EXIT_DISC,
BGP_ATTR_TYPE_LOCAL_PREF,
wait_for_completion,
assert_several_times,
)
from lib.gobgp import (
GoBGPContainer,
extract_path_attribute,
)
from lib.quagga import QuaggaBGPContainer
from lib.exabgp import ExaBGPContainer
class GoBGPTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
gobgp_ctn_image_name = parser_option.gobgp_image
base.TEST_PREFIX = parser_option.test_prefix
g1 = GoBGPContainer(name='g1', asn=65000, router_id='192.168.0.1',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level)
q1 = QuaggaBGPContainer(name='q1', asn=65001, router_id='192.168.0.2')
q2 = QuaggaBGPContainer(name='q2', asn=65002, router_id='192.168.0.3')
q3 = QuaggaBGPContainer(name='q3', asn=65003, router_id='192.168.0.4')
qs = [q1, q2, q3]
ctns = [g1, q1, q2, q3]
initial_wait_time = max(ctn.run() for ctn in ctns)
time.sleep(initial_wait_time)
for q in qs:
g1.add_peer(q, passwd='passwd')
q.add_peer(g1, passwd='passwd', passive=True)
# advertise a route from q1, q2, q3
for idx, q in enumerate(qs):
route = '10.0.{0}.0/24'.format(idx + 1)
q.add_route(route)
cls.gobgp = g1
cls.quaggas = {'q1': q1, 'q2': q2, 'q3': q3}
# test each neighbor state is turned establish
def test_01_neighbor_established(self):
for q in self.quaggas.itervalues():
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q)
def test_02_check_gobgp_global_rib(self):
for q in self.quaggas.itervalues():
# paths expected to exist in gobgp's global rib
routes = q.routes.keys()
timeout = 120
interval = 1
count = 0
while True:
# gobgp's global rib
state = self.gobgp.get_neighbor_state(q)
self.assertEqual(state, BGP_FSM_ESTABLISHED)
global_rib = [p['prefix'] for p in self.gobgp.get_global_rib()]
for p in global_rib:
if p in routes:
routes.remove(p)
if len(routes) == 0:
break
time.sleep(interval)
count += interval
if count >= timeout:
raise Exception('timeout')
# check gobgp properly add it's own asn to aspath
def test_03_check_gobgp_adj_out_rib(self):
for q in self.quaggas.itervalues():
for path in self.gobgp.get_adj_rib_out(q):
asns = path['aspath']
self.assertTrue(self.gobgp.asn in asns)
# check routes are properly advertised to all BGP speaker
def test_04_check_quagga_global_rib(self):
interval = 1
timeout = int(120 / interval)
for q in self.quaggas.itervalues():
done = False
for _ in range(timeout):
if done:
break
global_rib = q.get_global_rib()
global_rib = [p['prefix'] for p in global_rib]
if len(global_rib) < len(self.quaggas):
time.sleep(interval)
continue
self.assertTrue(len(global_rib) == len(self.quaggas))
for c in self.quaggas.itervalues():
for r in c.routes:
self.assertTrue(r in global_rib)
done = True
if done:
continue
# should not reach here
raise AssertionError
def test_05_add_quagga(self):
q4 = QuaggaBGPContainer(name='q4', asn=65004, router_id='192.168.0.5')
self.quaggas['q4'] = q4
initial_wait_time = q4.run()
time.sleep(initial_wait_time)
self.gobgp.add_peer(q4)
q4.add_peer(self.gobgp)
q4.add_route('10.0.4.0/24')
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q4)
def test_06_check_global_rib(self):
self.test_02_check_gobgp_global_rib()
self.test_04_check_quagga_global_rib()
def test_07_stop_one_quagga(self):
g1 = self.gobgp
q4 = self.quaggas['q4']
q4.stop()
self.gobgp.wait_for(expected_state=BGP_FSM_ACTIVE, peer=q4)
g1.del_peer(q4)
del self.quaggas['q4']
# check gobgp properly send withdrawal message with q4's route
def test_08_check_global_rib(self):
self.test_02_check_gobgp_global_rib()
self.test_04_check_quagga_global_rib()
def test_09_add_distant_relative(self):
q1 = self.quaggas['q1']
q2 = self.quaggas['q2']
q3 = self.quaggas['q3']
q5 = QuaggaBGPContainer(name='q5', asn=65005, router_id='192.168.0.6')
initial_wait_time = q5.run()
time.sleep(initial_wait_time)
for q in [q2, q3]:
q5.add_peer(q)
q.add_peer(q5)
med200 = {'name': 'med200',
'type': 'permit',
'match': '0.0.0.0/0',
'med': 200}
q2.add_policy(med200, self.gobgp, 'out')
med100 = {'name': 'med100',
'type': 'permit',
'match': '0.0.0.0/0',
'med': 100}
q3.add_policy(med100, self.gobgp, 'out')
q5.add_route('10.0.6.0/24')
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q2)
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q3)
q2.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q5)
q3.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q5)
timeout = 120
interval = 1
count = 0
while True:
paths = self.gobgp.get_adj_rib_out(q1, '10.0.6.0/24')
if len(paths) > 0:
path = paths[0]
print "{0}'s nexthop is {1}".format(path['nlri']['prefix'],
path['nexthop'])
n_addrs = [i[1].split('/')[0] for i in self.gobgp.ip_addrs]
if path['nexthop'] in n_addrs:
break
time.sleep(interval)
count += interval
if count >= timeout:
raise Exception('timeout')
def test_10_originate_path(self):
self.gobgp.add_route('10.10.0.0/24')
dst = self.gobgp.get_global_rib('10.10.0.0/24')
self.assertTrue(len(dst) == 1)
self.assertTrue(len(dst[0]['paths']) == 1)
path = dst[0]['paths'][0]
self.assertTrue(path['nexthop'] == '0.0.0.0')
self.assertTrue(len(path['aspath']) == 0)
def test_11_check_adj_rib_out(self):
for q in self.quaggas.itervalues():
paths = self.gobgp.get_adj_rib_out(q, '10.10.0.0/24')
self.assertTrue(len(paths) == 1)
path = paths[0]
peer_info = self.gobgp.peers[q]
local_addr = peer_info['local_addr'].split('/')[0]
self.assertTrue(path['nexthop'] == local_addr)
self.assertTrue(path['aspath'] == [self.gobgp.asn])
def test_12_disable_peer(self):
q1 = self.quaggas['q1']
self.gobgp.disable_peer(q1)
self.gobgp.wait_for(expected_state=BGP_FSM_IDLE, peer=q1)
time.sleep(3)
for route in q1.routes.iterkeys():
dst = self.gobgp.get_global_rib(route)
self.assertTrue(len(dst) == 0)
for q in self.quaggas.itervalues():
if q is q1:
continue
paths = self.gobgp.get_adj_rib_out(q, route)
self.assertTrue(len(paths) == 0)
def test_13_enable_peer(self):
q1 = self.quaggas['q1']
self.gobgp.enable_peer(q1)
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q1)
def test_14_check_adj_rib_out(self):
self.test_11_check_adj_rib_out()
def test_15_check_active_connection(self):
g1 = self.gobgp
g2 = GoBGPContainer(name='g2', asn=65000, router_id='192.168.0.7',
ctn_image_name=self.gobgp.image,
log_level=parser_option.gobgp_log_level)
time.sleep(g2.run())
self.quaggas['g2'] = g2
g2.add_peer(g1, passive=True)
g1.add_peer(g2)
g1.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=g2)
def test_16_check_local_pref_and_med_handling(self):
g1 = self.gobgp
g1.add_route('10.20.0.0/24', local_pref=1000, med=2000)
# iBGP peer
g2 = self.quaggas['g2']
paths = g2.get_global_rib('10.20.0.0/24')
self.assertTrue(len(paths) == 1)
self.assertTrue(len(paths[0]['paths']) == 1)
path = paths[0]['paths'][0]
local_pref = extract_path_attribute(path, BGP_ATTR_TYPE_LOCAL_PREF)
self.assertTrue(local_pref['value'] == 1000)
med = extract_path_attribute(path, BGP_ATTR_TYPE_MULTI_EXIT_DISC)
self.assertTrue(med['metric'] == 2000)
# eBGP peer
q1 = self.quaggas['q1']
paths = q1.get_global_rib('10.20.0.0/24')
self.assertTrue(len(paths) == 1)
path = paths[0]
local_pref = extract_path_attribute(path, BGP_ATTR_TYPE_LOCAL_PREF)
# local_pref's default value is 100
self.assertTrue(local_pref['value'] == 100)
med = extract_path_attribute(path, BGP_ATTR_TYPE_MULTI_EXIT_DISC)
self.assertTrue(med['metric'] == 2000)
def test_17_check_shutdown(self):
g1 = self.gobgp
q1 = self.quaggas['q1']
q2 = self.quaggas['q2']
q3 = self.quaggas['q3']
q2.add_route('20.0.0.0/24')
q3.add_route('20.0.0.0/24')
self.test_01_neighbor_established()
self.test_02_check_gobgp_global_rib()
paths = q1.get_global_rib('20.0.0.0/24')
self.assertEqual(len(paths), 1)
n_addrs = [i[1].split('/')[0] for i in self.gobgp.ip_addrs]
self.assertIn(paths[0]['nexthop'], n_addrs)
q3.stop()
self.gobgp.wait_for(expected_state=BGP_FSM_ACTIVE, peer=q3)
paths = q1.get_global_rib('20.0.0.0/24')
self.assertEqual(len(paths), 1)
self.assertIn(paths[0]['nexthop'], n_addrs)
g1.del_peer(q3)
del self.quaggas['q3']
def test_18_check_withdrawal(self):
g1 = self.gobgp
q1 = self.quaggas['q1']
q2 = self.quaggas['q2']
g1.add_route('30.0.0.0/24')
q1.add_route('30.0.0.0/24')
self.test_01_neighbor_established()
self.test_02_check_gobgp_global_rib()
paths = g1.get_adj_rib_out(q1, '30.0.0.0/24')
self.assertEqual(len(paths), 1)
self.assertNotIn('source-id', paths[0])
paths = g1.get_adj_rib_out(q2, '30.0.0.0/24')
self.assertEqual(len(paths), 1)
self.assertNotIn('source-id', paths[0])
g1.local('gobgp global rib del 30.0.0.0/24')
def f():
paths = g1.get_adj_rib_out(q1, '30.0.0.0/24')
self.assertEqual(len(paths), 0)
paths = g1.get_adj_rib_out(q2, '30.0.0.0/24')
self.assertEqual(len(paths), 1)
self.assertEqual(paths[0]['source-id'], '192.168.0.2')
assert_several_times(f)
def test_19_check_grpc_add_neighbor(self):
g1 = self.gobgp
e1 = ExaBGPContainer(name='e1', asn=65000, router_id='192.168.0.7')
time.sleep(e1.run())
e1.add_peer(g1)
self.quaggas['e1'] = e1
n = e1.peers[g1]['local_addr'].split('/')[0]
g1.local('gobgp n add {0} as 65000'.format(n))
g1.add_peer(e1, reload_config=False)
g1.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=e1)
def test_20_check_grpc_del_neighbor(self):
g1 = self.gobgp
e1 = self.quaggas['e1']
n = e1.peers[g1]['local_addr'].split('/')[0]
g1.local('gobgp n del {0}'.format(n))
g1.del_peer(e1, reload_config=False)
def test_21_check_withdrawal_2(self):
g1 = self.gobgp
g2 = self.quaggas['g2']
prefix = '40.10.0.0/24'
g1.add_route(prefix)
wait_for_completion(lambda: len(g1.get_global_rib(prefix)) == 1)
wait_for_completion(lambda: len(g2.get_global_rib(prefix)) == 1)
r = g2.local('gobgp monitor global rib -j', stream=True, tty=False)
g1.local('gobgp global rib del 40.10.0.0/24')
del g1.routes[prefix]
wait_for_completion(lambda: len(g1.get_global_rib(prefix)) == 0)
wait_for_completion(lambda: len(g2.get_global_rib(prefix)) == 0)
ret = json.loads(r.next())
self.assertTrue(ret[0]['nlri']['prefix'] == prefix)
self.assertTrue('withdrawal' in ret[0])
def test_22_check_cli_sorted(self):
g1 = self.gobgp
cnt = 0
def next_prefix():
for i in range(100, 105):
for j in range(100, 105):
yield '{0}.{1}.0.0/24'.format(i, j)
for p in next_prefix():
g1.local('gobgp global rib add {0}'.format(p))
cnt += 1
cnt2 = 0
g = next_prefix()
n = g.next()
for path in g1.local("gobgp global rib", capture=True).split('\n')[1:]:
if [elem for elem in path.split(' ') if elem != ''][1] == n:
try:
cnt2 += 1
n = g.next()
except StopIteration:
break
self.assertTrue(cnt == cnt2)
def test_23_check_withdrawal3(self):
gobgp_ctn_image_name = parser_option.gobgp_image
g1 = self.gobgp
g3 = GoBGPContainer(name='g3', asn=65006, router_id='192.168.0.8',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level)
g4 = GoBGPContainer(name='g4', asn=65007, router_id='192.168.0.9',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level)
initial_wait_time = max(ctn.run() for ctn in [g3, g4])
time.sleep(initial_wait_time)
self.quaggas = {'g3': g3, 'g4': g4}
g3.local('gobgp global rib add 50.0.0.0/24')
g1.add_peer(g3, passive=True)
g3.add_peer(g1)
g1.add_peer(g4, passive=True)
g4.add_peer(g1)
self.test_01_neighbor_established()
self.test_02_check_gobgp_global_rib()
g4.local('gobgp global rib add 50.0.0.0/24 med 10')
paths = g1.get_adj_rib_out(g3, '50.0.0.0/24')
self.assertTrue(len(paths) == 0)
paths = g1.get_adj_rib_out(g4, '50.0.0.0/24')
self.assertTrue(len(paths) == 1)
self.assertTrue(paths[0]['source-id'] == '192.168.0.8')
g3.local('gobgp global rib del 50.0.0.0/24')
paths = g1.get_adj_rib_out(g3, '50.0.0.0/24')
self.assertTrue(len(paths) == 1)
self.assertTrue(paths[0]['source-id'] == '192.168.0.9')
paths = g1.get_adj_rib_out(g4, '50.0.0.0/24')
self.assertTrue(len(paths) == 0)
if __name__ == '__main__':
output = local("which docker 2>&1 > /dev/null ; echo $?", capture=True)
if int(output) is not 0:
print "docker not found"
sys.exit(1)
nose.main(argv=sys.argv, addplugins=[OptionParser()],
defaultTest=sys.argv[0])
| coreswitch/openconfigd | vendor/github.com/osrg/gobgp/test/scenario_test/bgp_router_test.py | Python | apache-2.0 | 16,381 |
from setuptools import find_packages
from setuptools import setup
setup(
name='svs',
version='1.0.0',
description='The InAcademia Simple validation Service allows for the easy validation of affiliation (Student,'
'Faculty, Staff) of a user in Academia',
license='Apache 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
],
author='Rebecka Gulliksson',
author_email='tech@inacademia.org',
zip_safe=False,
url='http://www.inacademia.org',
packages=find_packages('src'),
package_dir={'': 'src'},
package_data={
'svs': [
'data/i18n/locale/*/LC_MESSAGES/*.mo',
'templates/*.mako',
'site/static/*',
],
},
message_extractors={
'src/svs': [
('**.py', 'python', None),
('templates/**.mako', 'mako', None),
('site/**', 'ignore', None)
]
},
install_requires=[
'satosa==3.3.1',
'Mako',
'gunicorn',
'Werkzeug'
]
)
| its-dirg/svs | setup.py | Python | apache-2.0 | 1,148 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateFolder
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-resourcemanager
# [START cloudresourcemanager_v3_generated_Folders_UpdateFolder_sync]
from google.cloud import resourcemanager_v3
def sample_update_folder():
# Create a client
client = resourcemanager_v3.FoldersClient()
# Initialize request argument(s)
folder = resourcemanager_v3.Folder()
folder.parent = "parent_value"
request = resourcemanager_v3.UpdateFolderRequest(
folder=folder,
)
# Make the request
operation = client.update_folder(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END cloudresourcemanager_v3_generated_Folders_UpdateFolder_sync]
| googleapis/python-resource-manager | samples/generated_samples/cloudresourcemanager_v3_generated_folders_update_folder_sync.py | Python | apache-2.0 | 1,634 |
"""This module implements the Lovins stemming algorithm. Use the ``stem()``
function::
stemmed_word = stem(word)
"""
from whoosh.util.collections2 import defaultdict
# Conditions
def A(base):
# A No restrictions on stem
return True
def B(base):
# B Minimum stem length = 3
return len(base) > 2
def C(base):
# C Minimum stem length = 4
return len(base) > 3
def D(base):
# D Minimum stem length = 5
return len(base) > 4
def E(base):
# E Do not remove ending after e
return base[-1] != "e"
def F(base):
# F Minimum stem length = 3 and do not remove ending after e
return len(base) > 2 and base[-1] != "e"
def G(base):
# G Minimum stem length = 3 and remove ending only after f
return len(base) > 2 and base[-1] == "f"
def H(base):
# H Remove ending only after t or ll
c1, c2 = base[-2:]
return c2 == "t" or (c2 == "l" and c1 == "l")
def I(base):
# I Do not remove ending after o or e
c = base[-1]
return c != "o" and c != "e"
def J(base):
# J Do not remove ending after a or e
c = base[-1]
return c != "a" and c != "e"
def K(base):
# K Minimum stem length = 3 and remove ending only after l, i or u*e
c = base[-1]
cc = base[-3]
return len(base) > 2 and (c == "l" or c == "i" or (c == "e" and cc == "u"))
def L(base):
# L Do not remove ending after u, x or s, unless s follows o
c1, c2 = base[-2:]
return c2 != "u" and c2 != "x" and (c2 != "s" or c1 == "o")
def M(base):
# M Do not remove ending after a, c, e or m
c = base[-1]
return c != "a" and c!= "c" and c != "e" and c != "m"
def N(base):
# N Minimum stem length = 4 after s**, elsewhere = 3
return len(base) > 3 or (len(base) == 3 and base[-1] != "s")
def O(base):
# O Remove ending only after l or i
c = base[-1]
return c == "l" or c == "i"
def P(base):
# P Do not remove ending after c
return base[-1] != "c"
def Q(base):
# Q Minimum stem length = 3 and do not remove ending after l or n
c = base[-1]
return len(base) > 2 and (c != "l" and c != "n")
def R(base):
# R Remove ending only after n or r
c = base[-1]
return c == "n" or c == "r"
def S(base):
# S Remove ending only after dr or t, unless t follows t
l2 = base[-2]
return l2 == "rd" or (base[-1] == "t" and l2 != "tt")
def T(base):
# T Remove ending only after s or t, unless t follows o
c1, c2 = base[-2:]
return c2 == "s" or (c2 == "t" and c1 != "o")
def U(base):
# U Remove ending only after l, m, n or r
c = base[-1]
return c == "l" or c == "m" or c == "n" or c == "r"
def V(base):
# V Remove ending only after c
return base[-1] == "c"
def W(base):
# W Do not remove ending after s or u
c = base[-1]
return c != "s" and c != "u"
def X(base):
# X Remove ending only after l, i or u*e
c = base[-1]
cc = base[-3]
return c == "l" or c == "i" or (c == "e" and cc == "u")
def Y(base):
# Y Remove ending only after in
return base[-2:] == "in"
def Z(base):
# Z Do not remove ending after f
return base[-1] != "f"
def a(base):
# a Remove ending only after d, f, ph, th, l, er, or, es or t
c = base[-1]
l2 = base[-2:]
return (c == "d" or c == "f" or l2 == "ph" or l2 == "th" or c == "l"
or l2 == "er" or l2 == "or" or l2 == "es" or c == "t")
def b(base):
# b Minimum stem length = 3 and do not remove ending after met or ryst
return len(base) > 2 and not (base.endswith("met")
or base.endswith("ryst"))
def c(base):
# c Remove ending only after l
return base[-1] == "l"
# Endings
m = [None] * 12
m[11] = dict((
("alistically", B),
("arizability", A),
("izationally", B)))
m[10] = dict((
("antialness", A),
("arisations", A),
("arizations", A),
("entialness", A)))
m[9] = dict((
("allically", C),
("antaneous", A),
("antiality", A),
("arisation", A),
("arization", A),
("ationally", B),
("ativeness", A),
("eableness", E),
("entations", A),
("entiality", A),
("entialize", A),
("entiation", A),
("ionalness", A),
("istically", A),
("itousness", A),
("izability", A),
("izational", A)))
m[8] = dict((
("ableness", A),
("arizable", A),
("entation", A),
("entially", A),
("eousness", A),
("ibleness", A),
("icalness", A),
("ionalism", A),
("ionality", A),
("ionalize", A),
("iousness", A),
("izations", A),
("lessness", A)))
m[7] = dict((
("ability", A),
("aically", A),
("alistic", B),
("alities", A),
("ariness", E),
("aristic", A),
("arizing", A),
("ateness", A),
("atingly", A),
("ational", B),
("atively", A),
("ativism", A),
("elihood", E),
("encible", A),
("entally", A),
("entials", A),
("entiate", A),
("entness", A),
("fulness", A),
("ibility", A),
("icalism", A),
("icalist", A),
("icality", A),
("icalize", A),
("ication", G),
("icianry", A),
("ination", A),
("ingness", A),
("ionally", A),
("isation", A),
("ishness", A),
("istical", A),
("iteness", A),
("iveness", A),
("ivistic", A),
("ivities", A),
("ization", F),
("izement", A),
("oidally", A),
("ousness", A)))
m[6] = dict((
("aceous", A),
("acious", B),
("action", G),
("alness", A),
("ancial", A),
("ancies", A),
("ancing", B),
("ariser", A),
("arized", A),
("arizer", A),
("atable", A),
("ations", B),
("atives", A),
("eature", Z),
("efully", A),
("encies", A),
("encing", A),
("ential", A),
("enting", C),
("entist", A),
("eously", A),
("ialist", A),
("iality", A),
("ialize", A),
("ically", A),
("icance", A),
("icians", A),
("icists", A),
("ifully", A),
("ionals", A),
("ionate", D),
("ioning", A),
("ionist", A),
("iously", A),
("istics", A),
("izable", E),
("lessly", A),
("nesses", A),
("oidism", A)))
m[5] = dict((
("acies", A),
("acity", A),
("aging", B),
("aical", A),
("alist", A),
("alism", B),
("ality", A),
("alize", A),
("allic", b),
("anced", B),
("ances", B),
("antic", C),
("arial", A),
("aries", A),
("arily", A),
("arity", B),
("arize", A),
("aroid", A),
("ately", A),
("ating", I),
("ation", B),
("ative", A),
("ators", A),
("atory", A),
("ature", E),
("early", Y),
("ehood", A),
("eless", A),
("elily", A),
("ement", A),
("enced", A),
("ences", A),
("eness", E),
("ening", E),
("ental", A),
("ented", C),
("ently", A),
("fully", A),
("ially", A),
("icant", A),
("ician", A),
("icide", A),
("icism", A),
("icist", A),
("icity", A),
("idine", I),
("iedly", A),
("ihood", A),
("inate", A),
("iness", A),
("ingly", B),
("inism", J),
("inity", c),
("ional", A),
("ioned", A),
("ished", A),
("istic", A),
("ities", A),
("itous", A),
("ively", A),
("ivity", A),
("izers", F),
("izing", F),
("oidal", A),
("oides", A),
("otide", A),
("ously", A)))
m[4] = dict((
("able", A),
("ably", A),
("ages", B),
("ally", B),
("ance", B),
("ancy", B),
("ants", B),
("aric", A),
("arly", K),
("ated", I),
("ates", A),
("atic", B),
("ator", A),
("ealy", Y),
("edly", E),
("eful", A),
("eity", A),
("ence", A),
("ency", A),
("ened", E),
("enly", E),
("eous", A),
("hood", A),
("ials", A),
("ians", A),
("ible", A),
("ibly", A),
("ical", A),
("ides", L),
("iers", A),
("iful", A),
("ines", M),
("ings", N),
("ions", B),
("ious", A),
("isms", B),
("ists", A),
("itic", H),
("ized", F),
("izer", F),
("less", A),
("lily", A),
("ness", A),
("ogen", A),
("ward", A),
("wise", A),
("ying", B),
("yish", A)))
m[3] = dict((
("acy", A),
("age", B),
("aic", A),
("als", b),
("ant", B),
("ars", O),
("ary", F),
("ata", A),
("ate", A),
("eal", Y),
("ear", Y),
("ely", E),
("ene", E),
("ent", C),
("ery", E),
("ese", A),
("ful", A),
("ial", A),
("ian", A),
("ics", A),
("ide", L),
("ied", A),
("ier", A),
("ies", P),
("ily", A),
("ine", M),
("ing", N),
("ion", Q),
("ish", C),
("ism", B),
("ist", A),
("ite", a),
("ity", A),
("ium", A),
("ive", A),
("ize", F),
("oid", A),
("one", R),
("ous", A)))
m[2] = dict((
("ae", A),
("al", b),
("ar", X),
("as", B),
("ed", E),
("en", F),
("es", E),
("ia", A),
("ic", A),
("is", A),
("ly", B),
("on", S),
("or", T),
("um", U),
("us", V),
("yl", R),
("s'", A),
("'s", A)))
m[1] = dict((
("a", A),
("e", A),
("i", A),
("o", A),
("s", W),
("y", B)))
def remove_ending(word):
length = len(word)
el = 11
while el > 0:
if length - el > 1:
ending = word[length-el:]
cond = m[el].get(ending)
if cond:
base = word[:length-el]
if cond(base):
return base
el -= 1
return word
_endings = (("iev", "ief"),
("uct", "uc"),
("iev", "ief"),
("uct", "uc"),
("umpt", "um"),
("rpt", "rb"),
("urs", "ur"),
("istr", "ister"),
("metr", "meter"),
("olv", "olut"),
("ul", "l", "aoi"),
("bex", "bic"),
("dex", "dic"),
("pex", "pic"),
("tex", "tic"),
("ax", "ac"),
("ex", "ec"),
("ix", "ic"),
("lux", "luc"),
("uad", "uas"),
("vad", "vas"),
("cid", "cis"),
("lid", "lis"),
("erid", "eris"),
("pand", "pans"),
("end", "ens", "s"),
("ond", "ons"),
("lud", "lus"),
("rud", "rus"),
("her", "hes", "pt"),
("mit", "mis"),
("ent", "ens", "m"),
("ert", "ers"),
("et", "es", "n"),
("yt", "ys"),
("yz", "ys"))
# Hash the ending rules by the last letter of the target ending
_endingrules = defaultdict(list)
for rule in _endings:
_endingrules[rule[0][-1]].append(rule)
_doubles = frozenset(("dd", "gg", "ll", "mm", "nn", "pp", "rr", "ss", "tt"))
def fix_ending(word):
if word[-2:] in _doubles:
word = word[:-1]
for endingrule in _endingrules[word[-1]]:
target, newend = endingrule[:2]
if word.endswith(target):
if len(endingrule) > 2:
exceptafter = endingrule[2]
c = word[0-(len(target)+1)]
if c in exceptafter: return word
return word[:0-len(target)] + newend
return word
def stem(word):
"""Returns the stemmed version of the argument string.
"""
return fix_ending(remove_ending(word))
| archatas/whoosh | whoosh/lang/lovins.py | Python | apache-2.0 | 12,657 |
from optparse import OptionParser
import re
import os
import sys
import numpy as np
from ..util import dirs
from ..util import file_handling as fh
from ..preprocessing import data_splitting as ds
from ..feature_extractors.vocabulary_with_counts import VocabWithCounts
def main():
usage = "%prog project"
parser = OptionParser(usage=usage)
parser.add_option('-v', dest='vocab_size', default=10000,
help='Vocabulary size (most frequent words): default=%default')
parser.add_option('--seed', dest='seed', default=42,
help='Random seed: default=%default')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(options, args) = parser.parse_args()
project_name = args[0]
dirs.make_base_dir(project_name)
vocab_size = int(options.vocab_size)
suffixes = {"'s", "n't"}
pronouns = {"i", 'you', 'he', 'his', 'she', 'her', 'hers', 'it', 'its', 'we', 'you', 'your', 'they', 'them', 'their'}
determiners = {'a', 'an', 'the', 'this', 'that', 'these', 'those'}
prepositions = {'at', 'by', 'for', 'from', 'in', 'into', 'of', 'on', 'than', 'to', 'with'}
transitional = {'and', 'also', 'as', 'but', 'if', 'or', 'then'}
common_verbs = {'are', 'be', 'been', 'had', 'has', 'have', 'is', 'said', 'was', 'were'}
stopwords = suffixes.union(pronouns).union(determiners).union(prepositions).union(transitional).union(common_verbs)
print "Removing %d stopwords:" % len(stopwords)
for s in stopwords:
print s
# set random seed
np.random.seed(int(options.seed))
# read in data
dirs.make_base_dir(project_name)
sentences = fh.read_json(dirs.get_processed_text_file())
all_documents = sentences.keys()
documents = list(set(all_documents))
# create a vocabulary and fill it with the tokenized documents
tokenized, vocab = tokenize(sentences, documents, stopwords=stopwords)
print "Most common words in corpus:"
most_common = vocab.most_common(50)
most_common.sort()
for v in most_common:
print v
# set vocabulary size and prune tokens
print "Pruning vocabulary"
vocab.prune(n_words=vocab_size)
n_words = 0
for k in documents:
tokens = [t for t in tokenized[k] if t in vocab.token2index]
n_words += len(tokens)
tokenized[k] = tokens
n_documents = len(documents)
n_vocab = len(vocab)
print n_documents, "documents"
print n_vocab, "word types"
print n_words, "word tokens"
# create the count matrices
vocab_assignments = np.zeros(n_words, dtype=int) # vocab index of the ith word
#topic_assignments = np.zeros(n_words, dtype=int) # topic of the ith word
doc_assignments = np.zeros(n_words, dtype=int) # document of the ith word
count = 0
for d_i, d in enumerate(documents):
tokens = tokenized[d]
for t in tokens:
v_index = vocab.get_index(t)
assert v_index >= 0
#w_topic = np.random.randint(n_topics)
vocab_assignments[count] = v_index
#topic_assignments[count] = w_topic
doc_assignments[count] = d_i
#topic_counts[w_topic] += 1
#vocab_topics[v_index, w_topic] += 1
#doc_topics[d_i, w_topic] += 1
count += 1
assert count == n_words
output_filename = os.path.join(dirs.lda_dir, 'word_num.json')
fh.write_to_json(list(vocab_assignments), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'word_doc.json')
fh.write_to_json(list(doc_assignments), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'vocab.json')
fh.write_to_json(vocab.index2token, output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'documents.json')
fh.write_to_json(documents, output_filename, sort_keys=False)
# just exit after writing data
def tokenize(sentences, documents_to_tokenize, stopwords=set()):
print "Tokenizing"
vocab = VocabWithCounts('', add_oov=False)
tokenized = {}
for k in documents_to_tokenize:
text = sentences[k].lower()
text = re.sub('\d', '#', text)
tokens = text.split()
tokens = [t for t in tokens if re.search('[a-zA-Z]', t)]
tokens = [t for t in tokens if t not in stopwords]
vocab.add_tokens(tokens)
tokenized[k] = tokens
return tokenized, vocab
if __name__ == '__main__':
main()
| dallascard/guac | core/lda/lda_preprocessing.py | Python | apache-2.0 | 4,593 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Heat
"""
import logging as sys_logging
import os
from eventlet.green import socket
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.common import wsgi
LOG = logging.getLogger(__name__)
paste_deploy_group = cfg.OptGroup('paste_deploy')
paste_deploy_opts = [
cfg.StrOpt('flavor',
help=_("The flavor to use.")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use."))]
service_opts = [
cfg.IntOpt('periodic_interval',
default=60,
help=_('Seconds between running periodic tasks.')),
cfg.StrOpt('heat_metadata_server_url',
default="",
help=_('URL of the Heat metadata server.')),
cfg.StrOpt('heat_waitcondition_server_url',
help=_('URL of the Heat waitcondition server.')),
cfg.StrOpt('heat_watch_server_url',
default="",
help=_('URL of the Heat CloudWatch server.')),
cfg.StrOpt('instance_connection_is_secure',
default="0",
help=_('Instance connection to CFN/CW API via https.')),
cfg.StrOpt('instance_connection_https_validate_certificates',
default="1",
help=_('Instance connection to CFN/CW API validate certs if '
'SSL is used.')),
cfg.StrOpt('region_name_for_services',
help=_('Default region name used to get services endpoints.')),
cfg.StrOpt('heat_stack_user_role',
default="heat_stack_user",
help=_('Keystone role for heat template-defined users.')),
cfg.StrOpt('stack_user_domain_id',
deprecated_opts=[cfg.DeprecatedOpt('stack_user_domain',
group=None)],
help=_('Keystone domain ID which contains heat '
'template-defined users. If this option is set, '
'stack_user_domain_name option will be ignored.')),
cfg.StrOpt('stack_user_domain_name',
help=_('Keystone domain name which contains heat '
'template-defined users. If `stack_user_domain_id` '
'option is set, this option is ignored.')),
cfg.StrOpt('stack_domain_admin',
help=_('Keystone username, a user with roles sufficient to '
'manage users and projects in the stack_user_domain.')),
cfg.StrOpt('stack_domain_admin_password',
secret=True,
help=_('Keystone password for stack_domain_admin user.')),
cfg.IntOpt('max_template_size',
default=524288,
help=_('Maximum raw byte size of any template.')),
cfg.IntOpt('max_nested_stack_depth',
default=5,
help=_('Maximum depth allowed when using nested stacks.')),
cfg.IntOpt('num_engine_workers',
default=processutils.get_worker_count(),
help=_('Number of heat-engine processes to fork and run.'))]
engine_opts = [
cfg.StrOpt('instance_user',
default='',
help=_("The default user for new instances. This option "
"is deprecated and will be removed in the Juno release. "
"If it's empty, Heat will use the default user set up "
"with your cloud image (for OS::Nova::Server) or "
"'ec2-user' (for AWS::EC2::Instance).")),
cfg.ListOpt('plugin_dirs',
default=['/usr/lib64/heat', '/usr/lib/heat',
'/usr/local/lib/heat', '/usr/local/lib64/heat'],
help=_('List of directories to search for plug-ins.')),
cfg.StrOpt('environment_dir',
default='/etc/heat/environment.d',
help=_('The directory to search for environment files.')),
cfg.StrOpt('deferred_auth_method',
choices=['password', 'trusts'],
default='trusts',
help=_('Select deferred auth method, '
'stored password or trusts.')),
cfg.ListOpt('trusts_delegated_roles',
default=[],
help=_('Subset of trustor roles to be delegated to heat.'
' If left unset, all roles of a user will be'
' delegated to heat when creating a stack.')),
cfg.IntOpt('max_resources_per_stack',
default=1000,
help=_('Maximum resources allowed per top-level stack. '
'-1 stands for unlimited.')),
cfg.IntOpt('max_stacks_per_tenant',
default=100,
help=_('Maximum number of stacks any one tenant may have'
' active at one time.')),
cfg.IntOpt('action_retry_limit',
default=5,
help=_('Number of times to retry to bring a '
'resource to a non-error state. Set to 0 to disable '
'retries.')),
cfg.IntOpt('event_purge_batch_size',
default=10,
help=_("Controls how many events will be pruned whenever a "
"stack's events exceed max_events_per_stack. Set this "
"lower to keep more events at the expense of more "
"frequent purges.")),
cfg.IntOpt('max_events_per_stack',
default=1000,
help=_('Maximum events that will be available per stack. Older'
' events will be deleted when this is reached. Set to 0'
' for unlimited events per stack.')),
cfg.IntOpt('stack_action_timeout',
default=3600,
help=_('Timeout in seconds for stack action (ie. create or'
' update).')),
cfg.IntOpt('error_wait_time',
default=240,
help=_('Error wait time in seconds for stack action (ie. create'
' or update).')),
cfg.IntOpt('engine_life_check_timeout',
default=2,
help=_('RPC timeout for the engine liveness check that is used'
' for stack locking.')),
cfg.BoolOpt('enable_cloud_watch_lite',
default=False,
help=_('Enable the legacy OS::Heat::CWLiteAlarm resource.')),
cfg.BoolOpt('enable_stack_abandon',
default=False,
help=_('Enable the preview Stack Abandon feature.')),
cfg.BoolOpt('enable_stack_adopt',
default=False,
help=_('Enable the preview Stack Adopt feature.')),
cfg.BoolOpt('convergence_engine',
default=False,
help=_('Enables engine with convergence architecture. All '
'stacks with this option will be created using '
'convergence engine .')),
cfg.StrOpt('default_software_config_transport',
choices=['POLL_SERVER_CFN',
'POLL_SERVER_HEAT',
'POLL_TEMP_URL'],
default='POLL_SERVER_CFN',
help=_('Template default for how the server should receive the '
'metadata required for software configuration. '
'POLL_SERVER_CFN will allow calls to the cfn API action '
'DescribeStackResource authenticated with the provided '
'keypair (requires enabled heat-api-cfn). '
'POLL_SERVER_HEAT will allow calls to the '
'Heat API resource-show using the provided keystone '
'credentials (requires keystone v3 API, and configured '
'stack_user_* config options). '
'POLL_TEMP_URL will create and populate a '
'Swift TempURL with metadata for polling (requires '
'object-store endpoint which supports TempURL).')),
cfg.StrOpt('default_deployment_signal_transport',
choices=['CFN_SIGNAL',
'TEMP_URL_SIGNAL',
'HEAT_SIGNAL'],
default='CFN_SIGNAL',
help=_('Template default for how the server should signal to '
'heat with the deployment output values. CFN_SIGNAL '
'will allow an HTTP POST to a CFN keypair signed URL '
'(requires enabled heat-api-cfn). '
'TEMP_URL_SIGNAL will create a Swift TempURL to be '
'signaled via HTTP PUT (requires object-store endpoint '
'which supports TempURL). '
'HEAT_SIGNAL will allow calls to the Heat API '
'resource-signal using the provided keystone '
'credentials')),
cfg.ListOpt('hidden_stack_tags',
default=[],
help=_('Stacks containing these tag names will be hidden. '
'Multiple tags should be given in a comma-delimited '
'list (eg. hidden_stack_tags=hide_me,me_too).')),
cfg.StrOpt('onready',
help=_('Deprecated.')),
cfg.BoolOpt('stack_scheduler_hints',
default=False,
help=_('When this feature is enabled, scheduler hints'
' identifying the heat stack context of a server'
' resource are passed to the configured schedulers in'
' nova, for server creates done using heat resource'
' types OS::Nova::Server and AWS::EC2::Instance.'
' heat_root_stack_id will be set to the id of the root'
' stack of the resource, heat_stack_id will be set to'
' the id of the resource\'s parent stack,'
' heat_stack_name will be set to the name of the'
' resource\'s parent stack, heat_path_in_stack will be'
' set to a list of tuples,'
' (stackresourcename, stackname) with list[0] being'
' (None, rootstackname), and heat_resource_name will'
' be set to the resource\'s name.')),
cfg.BoolOpt('encrypt_parameters_and_properties',
default=False,
help=_('Encrypt template parameters that were marked as'
' hidden and also all the resource properties before'
' storing them in database.'))]
rpc_opts = [
cfg.StrOpt('host',
default=socket.gethostname(),
help=_('Name of the engine node. '
'This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, '
'or IP address.'))]
profiler_group = cfg.OptGroup('profiler')
profiler_opts = [
cfg.BoolOpt("profiler_enabled", default=False,
help=_('If False fully disable profiling feature.')),
cfg.BoolOpt("trace_sqlalchemy", default=False,
help=_("If False do not trace SQL requests."))
]
auth_password_group = cfg.OptGroup('auth_password')
auth_password_opts = [
cfg.BoolOpt('multi_cloud',
default=False,
help=_('Allow orchestration of multiple clouds.')),
cfg.ListOpt('allowed_auth_uris',
default=[],
help=_('Allowed keystone endpoints for auth_uri when '
'multi_cloud is enabled. At least one endpoint needs '
'to be specified.'))]
# these options define baseline defaults that apply to all clients
default_clients_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the OpenStack service.')),
cfg.StrOpt('ca_file',
help=_('Optional CA cert file to use in SSL connections.')),
cfg.StrOpt('cert_file',
help=_('Optional PEM-formatted certificate chain file.')),
cfg.StrOpt('key_file',
help=_('Optional PEM-formatted file that contains the '
'private key.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate will not "
"be verified."))]
# these options can be defined for each client
# they must not specify defaults, since any options not defined in a client
# specific group is looked up on the generic group above
clients_opts = [
cfg.StrOpt('endpoint_type',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the OpenStack service.')),
cfg.StrOpt('ca_file',
help=_('Optional CA cert file to use in SSL connections.')),
cfg.StrOpt('cert_file',
help=_('Optional PEM-formatted certificate chain file.')),
cfg.StrOpt('key_file',
help=_('Optional PEM-formatted file that contains the '
'private key.')),
cfg.BoolOpt('insecure',
help=_("If set, then the server's certificate will not "
"be verified."))]
heat_client_opts = [
cfg.StrOpt('url',
default='',
help=_('Optional heat url in format like'
' http://0.0.0.0:8004/v1/%(tenant_id)s.'))]
client_http_log_debug_opts = [
cfg.BoolOpt('http_log_debug',
default=False,
help=_("Allow client's debug log output."))]
revision_group = cfg.OptGroup('revision')
revision_opts = [
cfg.StrOpt('heat_revision',
default='unknown',
help=_('Heat build revision. '
'If you would prefer to manage your build revision '
'separately, you can move this section to a different '
'file and add it as another config option.'))]
def startup_sanity_check():
if (not cfg.CONF.stack_user_domain_id and
not cfg.CONF.stack_user_domain_name):
# FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration
LOG.warn(_LW('stack_user_domain_id or stack_user_domain_name not '
'set in heat.conf falling back to using default'))
else:
domain_admin_user = cfg.CONF.stack_domain_admin
domain_admin_password = cfg.CONF.stack_domain_admin_password
if not (domain_admin_user and domain_admin_password):
raise exception.Error(_('heat.conf misconfigured, cannot '
'specify "stack_user_domain_id" or '
'"stack_user_domain_name" without '
'"stack_domain_admin" and '
'"stack_domain_admin_password"'))
auth_key_len = len(cfg.CONF.auth_encryption_key)
if auth_key_len in (16, 24):
LOG.warn(
_LW('Please update auth_encryption_key to be 32 characters.'))
elif auth_key_len != 32:
raise exception.Error(_('heat.conf misconfigured, auth_encryption_key '
'must be 32 characters'))
def list_opts():
yield None, rpc_opts
yield None, engine_opts
yield None, service_opts
yield paste_deploy_group.name, paste_deploy_opts
yield auth_password_group.name, auth_password_opts
yield revision_group.name, revision_opts
yield profiler_group.name, profiler_opts
yield 'clients', default_clients_opts
for client in ('nova', 'swift', 'neutron', 'cinder',
'ceilometer', 'keystone', 'heat', 'glance', 'trove',
'sahara'):
client_specific_group = 'clients_' + client
yield client_specific_group, clients_opts
yield 'clients_heat', heat_client_opts
yield 'clients_nova', client_http_log_debug_opts
yield 'clients_cinder', client_http_log_debug_opts
cfg.CONF.register_group(paste_deploy_group)
cfg.CONF.register_group(auth_password_group)
cfg.CONF.register_group(revision_group)
cfg.CONF.register_group(profiler_group)
for group, opts in list_opts():
cfg.CONF.register_opts(opts, group=group)
def _get_deployment_flavor():
"""
Retrieve the paste_deploy.flavor config item, formatted appropriately
for appending to the application name.
"""
flavor = cfg.CONF.paste_deploy.flavor
return '' if not flavor else ('-' + flavor)
def _get_deployment_config_file():
"""
Retrieve the deployment_config_file config item, formatted as an
absolute pathname.
"""
config_path = cfg.CONF.find_file(
cfg.CONF.paste_deploy['api_paste_config'])
if config_path is None:
return None
return os.path.abspath(config_path)
def load_paste_app(app_name=None):
"""
Builds and returns a WSGI app from a paste config file.
We assume the last config file specified in the supplied ConfigOpts
object is the paste config file.
:param app_name: name of the application to load
:raises RuntimeError when config file cannot be located or application
cannot be loaded from config file
"""
if app_name is None:
app_name = cfg.CONF.prog
# append the deployment flavor to the application name,
# in order to identify the appropriate paste pipeline
app_name += _get_deployment_flavor()
conf_file = _get_deployment_config_file()
if conf_file is None:
raise RuntimeError(_("Unable to locate config file"))
try:
app = wsgi.paste_deploy_app(conf_file, app_name, cfg.CONF)
# Log the options used when starting if we're in debug mode...
if cfg.CONF.debug:
cfg.CONF.log_opt_values(logging.getLogger(app_name),
sys_logging.DEBUG)
return app
except (LookupError, ImportError) as e:
raise RuntimeError(_("Unable to load %(app_name)s from "
"configuration file %(conf_file)s."
"\nGot: %(e)r") % {'app_name': app_name,
'conf_file': conf_file,
'e': e})
| rh-s/heat | heat/common/config.py | Python | apache-2.0 | 19,187 |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.compliance_objects import ComplianceBuilding
log = logging.getLogger(__name__)
class TestComplianceBuilding(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_compliancebuilding(self):
pyidf.validation_level = ValidationLevel.error
obj = ComplianceBuilding()
# real
var_building_rotation_for_appendix_g = 1.1
obj.building_rotation_for_appendix_g = var_building_rotation_for_appendix_g
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertAlmostEqual(idf2.compliancebuildings[0].building_rotation_for_appendix_g, var_building_rotation_for_appendix_g) | rbuffat/pyidf | tests/test_compliancebuilding.py | Python | apache-2.0 | 1,041 |
from .SlackIntegration import slackIntegration
import logging
from aiohttp import ClientSession
import asyncio
from time import perf_counter
class LoadApp():
def __init__(self):
self.logger = logging.getLogger('reliability')
self.app_visit_succeeded = 0
self.app_visit_failed = 0
self.tasks = []
async def get(self, url):
# To simulate differnt users accessing same app, do not reuse session
async with ClientSession() as session:
async with session.get(url) as response:
code = response.status
result = await response.text()
self.logger.info(f"{str(code)} : load: {url}")
#print (f"{str(code)} : load: {url}")
if code == 200:
self.app_visit_succeeded += 1
else:
self.app_visit_failed += 1
# send slack message if response code is not 200
slackIntegration.post_message_in_slack(f"Access to {url} failed. Response code: {str(code)}")
def set_tasks(self, urls, num):
for url in urls:
for i in range(num):
task = asyncio.ensure_future(self.get(url))
self.tasks.append(task)
if __name__ == "__main__":
loadApp = LoadApp()
urls = ["https://www.google.com",]
concurrency = 10
loadApp.set_tasks(urls, concurrency)
loop = asyncio.get_event_loop()
start = perf_counter()
loop.run_until_complete(asyncio.wait(loadApp.tasks))
end = perf_counter()
print(f"Perf of {concurrency} visits is: {end - start} second.")
| mffiedler/svt | reliability/tasks/utils/LoadApp.py | Python | apache-2.0 | 1,646 |
#! /usr/bin/env python
import subprocess
import sys, threading, Queue
import os
import string
from time import gmtime, strftime
import urllib2
import urllib
import re, time
import urlparse
import os.path
import logging
#from google import search
import scan
import executemechanize
import extraction
import mechanize
from BeautifulSoup import BeautifulSoup
def domaindownload():# this function downloads domain and website links from multible blacklisted website databases.
if os.path.isfile("list/list1.txt")==True:
print "Malicious website database from https://spyeyetracker.abuse.ch exists!\n"
print "Continuing with the next list."
else:
print "Fetching list from: https://spyeyetracker.abuse.ch"
command1="wget https://zeustracker.abuse.ch/blocklist.php?download=domainblocklist -O list/list1.txt"
os.system(command1)
#--proxy-user=username --proxy-password=password
if os.path.isfile("list/list2.txt")==True:
print "Malicious website database from https://zeustracker.abuse.ch/ exists!\n"
print "Continuing with the next list."
else:
print "Fetching list from: https://zeustracker.abuse.ch/"
command2="wget https://zeustracker.abuse.ch/blocklist.php?download=domainblocklist -O list/list2.txt"
os.system(command2)
if os.path.isfile("list/list3.txt")==True:
print "Malicious website database 3 exists!\n"
else:
print "Fetching list 3"
command3="wget http://hosts-file.net/hphosts-partial.asp -O list/list3.txt"
os.system(command3)
print "*****\nThis May Take a While\n"
mainfile=open("list/malwebsites.txt", 'w')
file1=open("list/list1.txt", 'r')
mainfile.write(file1.read())
file2=open("list/list2.txt", 'r')
mainfile.write(file2.read())
file3=open("list/list3.txt", 'r')
mainfile.write(file3.read())
mainfile.close()
file1.close()
file2.close()
file3.close()
def duplicateremover():
mylist=list()
fopen2=open("list/malwebsites.txt","r")
for line in fopen2:
line=line.strip()
if line.startswith("127.0.0.1"):
line=line[10:]
pass
if line.startswith("#"):
continue
if line.find('#') == 1:
continue
# if line=="invalid":
# continue
if not line:
continue
if line in mylist:
continue
if not (line.startswith("http://")) and not (line.startswith("https://")):
line="http://"+line
pass
# print line
mylist.append(line)
fopen2.close()
fopen3=open("list/malwebsites.txt","w")
for line in mylist:
fopen3.write(line+"\n")
fopen3.close()
print "List of Malicious websites were downloaded from three databases."
| Masood-M/yalih | malwebsites.py | Python | apache-2.0 | 2,519 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Load and save functions for zipped svg files."""
import faint.svg.parse_svg as parse_svg
import faint.svg.write_svg as write_svg
def load(filename, imageprops):
"""Load image from the zipped svg file."""
parse_svg.parse_svgz_file(filename, imageprops, "en")
def save(filename, canvas):
"""Save the image to the specified file as zipped svg."""
write_svg.write_svgz(filename, canvas)
| lukas-ke/faint-graphics-editor | py/faint/formatsvgz.py | Python | apache-2.0 | 1,026 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data processing utility functions."""
from typing import Tuple, List, Sequence
import attr
import six
from etcmodel.models import tokenization
_WHITESPACE_DELIMITER = u" \t\r\n\u202f" # \u202f corresponds to ""
@attr.s(auto_attribs=True)
class TokenizedText:
"""Tokenized text with indices mappings."""
# The original text.
text: str = ""
# The Wordpiece tokenized text.
tokens: List[str] = attr.Factory(list)
# The Wordpiece token ids.
token_ids: List[int] = attr.Factory(list)
# The whitespace tokenized text.
unigrams: List[str] = attr.Factory(list)
# The indices mapping from chars to unigrams. The char at index `i` belongs to
# the unigram at index `chars_to_unigrams[i]`. A whitespace belongs to the
# previous unigram. Only used with WordPiece tokenizer.
chars_to_unigrams: List[int] = attr.Factory(list)
# The indices mapping from unigrams to tokens. The unigram at index `i` starts
# at the Wordpiece token at index `unigrams_to_tokens[i]`. Only used with
# WordPiece tokenizer.
unigrams_to_tokens: List[int] = attr.Factory(list)
# The indices mapping from tokens to unigrams. The token at index `i` belongs
# to the unigram at index `tokens_to_unigrams[i]`. Only used with WordPiece
# tokenizer.
tokens_to_unigrams: List[int] = attr.Factory(list)
# The indices mapping from chars to tokens. The char at index `i` belongs to
# the token at index `char_to_token_index[i]`. A whitespace belongs to the
# later token. Note that the `text` stored in this class is obtained from
# first SentencePiece tokenize the input text, then detokenize the tokens.
# Only used with SentencePiece tokenizer.
chars_to_tokens: List[int] = attr.Factory(list)
def whitespace_split_with_indices(
text: str) -> Tuple[List[str], List[int], List[int]]:
"""Whitespace splits a text into unigrams and returns indices mapping."""
if not isinstance(text, str):
raise ValueError("The input text is not of unicode format.")
unigrams = []
unigram_to_char_map = []
char_to_unigram_map = []
prev_is_separator = True
for i, c in enumerate(text):
if c in _WHITESPACE_DELIMITER:
prev_is_separator = True
else:
if prev_is_separator:
unigrams.append(c)
unigram_to_char_map.append(i)
else:
unigrams[-1] += c
prev_is_separator = False
char_to_unigram_map.append(len(unigrams) - 1)
return unigrams, unigram_to_char_map, char_to_unigram_map
def wordpiece_tokenize_with_indices(
doc_unigrams: Sequence[str], tokenizer: tokenization.FullTokenizer
) -> Tuple[List[str], List[int], List[int]]:
"""Wordpiece tokenizes unigrams to tokens and returns indices mapping."""
token_to_unigram_map = []
unigram_to_token_map = []
doc_tokens = []
for (i, token) in enumerate(doc_unigrams):
unigram_to_token_map.append(len(doc_tokens))
sub_tokens = tokenizer.tokenize(token)
token_to_unigram_map.extend([i] * len(sub_tokens))
doc_tokens.extend(sub_tokens)
return doc_tokens, unigram_to_token_map, token_to_unigram_map
def get_wordpiece_tokenized_text(
text: str, tokenizer: tokenization.FullTokenizer) -> TokenizedText:
"""Gets WordPiece TokenizedText for a text with indices mapping."""
unigrams, _, chars_to_unigrams = whitespace_split_with_indices(text)
tokens, unigrams_to_tokens, tokens_to_unigrams = (
wordpiece_tokenize_with_indices(unigrams, tokenizer))
token_ids = tokenizer.convert_tokens_to_ids(tokens)
tokenized_text = TokenizedText()
tokenized_text.text = text
tokenized_text.tokens = tokens
tokenized_text.token_ids = token_ids
tokenized_text.unigrams = unigrams
tokenized_text.chars_to_unigrams = chars_to_unigrams
tokenized_text.unigrams_to_tokens = unigrams_to_tokens
tokenized_text.tokens_to_unigrams = tokens_to_unigrams
return tokenized_text
def sentencepiece_detokenize(tokens: Sequence[str]) -> str:
"""Recovers SenencePiece token to original text, with whitespace removal."""
spiece_token = tokenization.SPIECE_UNDERLINE.decode("utf-8")
tokens = list(tokens)
if tokens and tokens[0].startswith(spiece_token):
tokens[0] = tokens[0][1:]
return "".join(tokens).replace(spiece_token, " ")
def get_sentencepiece_tokenized_text(
text: str, tokenizer: tokenization.FullTokenizer) -> TokenizedText:
"""Gets SentencePiece TokenizedText for a text with indices mapping."""
tokens = [six.ensure_text(tk, "utf-8") for tk in tokenizer.tokenize(text)]
token_ids = tokenizer.convert_tokens_to_ids(tokens)
chars_to_tokens = []
for i, token in enumerate(tokens):
num_chars = len(token)
if i == 0:
num_chars -= 1
chars_to_tokens.extend([i] * num_chars)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
tokenized_text = TokenizedText()
tokenized_text.text = sentencepiece_detokenize(tokens)
tokenized_text.tokens = tokens
tokenized_text.token_ids = token_ids
tokenized_text.chars_to_tokens = chars_to_tokens
return tokenized_text
def find_char_spans(text: str, substring: str) -> List[Tuple[int, int]]:
"""Finds all substring occurrence char level spans (inclusive)."""
if not substring:
return []
char_spans = []
char_begin = text.find(substring)
while char_begin != -1:
char_end = char_begin + len(substring) - 1
char_spans.append((char_begin, char_end))
char_begin = text.find(substring, char_end + 1)
return char_spans
def _improve_answer_span(
doc_tokens: Sequence[str],
unimproved_span: Tuple[int, int],
orig_answer_text: str,
tokenizer: tokenization.FullTokenizer,
):
"""Returns answer token spans that better match the annotated answer.
This function is branched from the original BERT `run_squad.py` code
Usually question answer span annotations are character based. We first project
them to whitespace-tokenized words (unigrams). But then after WordPiece
tokenization, we can often find a "better match". For example:
Question: What year was John Smith born?
Context: The leader was John Smith (1895-1943).
Answer: 1895
The original whitespace-tokenized answer will be "(1895-1943).". However
after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
the exact answer, 1895. The purpose of this function is to find such "better
match".
However, this is not always possible. Consider the following:
Question: What country is the top exporter of electornics?
Context: The Japanese electronics industry is the lagest in the world.
Answer: Japan
In this case, the annotator chose "Japan" as a character sub-span of
the word "Japanese". Since our WordPiece tokenizer does not split
"Japanese", we just use "Japanese" as the annotation. This is expected to be
fairly rare.
Args:
doc_tokens: Sequence of Text, the wordpiece tokenized tokens of the doc.
unimproved_span: Tuple of two ints, the unimproved answer token span. In the
first example, it is the token span for "(" and ")".
orig_answer_text: Text, the original answer text. In the first example, it
is "1895".
tokenizer: FullTokenizer, wordpiece tokenizer to tokenize the original
answer text.
Returns:
Tuple of two ints, the improved answer token span. In the first example, it
corresponds to the answer token span for "1895".
"""
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_begin in range(unimproved_span[0], unimproved_span[1] + 1):
for new_end in range(unimproved_span[1], new_begin - 1, -1):
text_span = " ".join(doc_tokens[new_begin:(new_end + 1)])
if text_span == tok_answer_text:
return new_begin, new_end
return unimproved_span
def _convert_answer_spans(answer_unigram_spans: Sequence[Tuple[int, int]],
unigram_to_token_map: Sequence[int],
num_tokens: int) -> List[Tuple[int, int]]:
"""Converts answer unigram spans to token spans."""
answer_token_spans = []
for unigram_begin, unigram_end in answer_unigram_spans:
token_begin = unigram_to_token_map[unigram_begin]
if unigram_end + 1 < len(unigram_to_token_map):
token_end = unigram_to_token_map[unigram_end + 1] - 1
else:
token_end = num_tokens - 1
answer_token_spans.append((token_begin, token_end))
return answer_token_spans
def find_answer_spans_wordpiece(
tokenized_context: TokenizedText, answer: str,
tokenizer: tokenization.FullTokenizer) -> List[Tuple[int, int]]:
"""Finds all answer occurrence WordPiece token spans (inclusive).
Args:
tokenized_context: WordPiece tokenized context with indices mapping.
answer: Answer string.
tokenizer: A WordPiece tokenizer.
Returns:
A list of (begin, end) WordPiece token level indices (inclusive) of all the
answer occurrences in the context. If the answer is empty or there is no
answer occurrence in the context, return empty list.
"""
# The answer occurrence always corresponds to char level occurrence.
# This is to avoid the following case,
# context: "..Italian composer who wrote 39 operas.."
# answer: "opera"
# Since both "operas" and "opera" are in the vocab, simply searching token
# level spans will miss such kind of occurrence.
token_spans = []
for char_begin, char_end in find_char_spans(tokenized_context.text, answer):
unigram_span = (tokenized_context.chars_to_unigrams[char_begin],
tokenized_context.chars_to_unigrams[char_end])
unimproved_token_span = _convert_answer_spans(
[unigram_span], tokenized_context.unigrams_to_tokens,
len(tokenized_context.tokens))[0]
token_spans.append(
_improve_answer_span(tokenized_context.tokens, unimproved_token_span,
answer, tokenizer))
return token_spans
def find_answer_spans_sentencepiece(tokenized_context: TokenizedText,
answer: str) -> List[Tuple[int, int]]:
"""Finds all answer occurrence SentencePiece token spans (inclusive).
Args:
tokenized_context: SentencePiece tokenized context with indices mapping.
answer: Answer string.
Returns:
A list of (begin, end) WordPiece token level indices (inclusive) of all the
answer occurrences in the context. If the answer is empty or there is no
answer occurrence in the context, return empty list.
"""
# The answer occurrence always corresponds to char level occurrence.
# This is to avoid the following case,
# context: "..Italian composer who wrote 39 operas.."
# answer: "opera"
# Since both "operas" and "opera" are in the vocab, simply searching token
# level spans will miss such kind of occurrence.
token_spans = []
for char_begin, char_end in find_char_spans(tokenized_context.text, answer):
token_spans.append((tokenized_context.chars_to_tokens[char_begin],
tokenized_context.chars_to_tokens[char_end]))
return token_spans
def wordpiece_tokens_to_normalized_text(wordpiece_tokens: Sequence[str]) -> str:
"""Concatenates wordpiece tokens to a normalized text and cleans up.
The wordpiece tokens are results from BERT tokenization. They may contain
symbols of '##' or ' ##' and some extra whitespaces. The function first
concatenate the tokens and then removes those extrac symbols and whitespaces.
Args:
wordpiece_tokens: A sequence of wordpiece tokens from BERT tokenization.
Returns:
The text by concatenating the wordpiece tokens and cleaning up.
"""
text = " ".join(wordpiece_tokens)
# De-tokenize WordPieces that have been split off.
text = text.replace(" ##", "")
text = text.replace("##", "")
# Clean whitespace
text = text.strip()
text = " ".join(text.split())
return text
| google-research/google-research | etcmodel/models/hotpotqa/data_utils.py | Python | apache-2.0 | 12,364 |
# -*- coding: utf-8 -*-
import pairing_heap as pheap
from copy import deepcopy,copy
import threading
import Queue
import requests
from requests.auth import HTTPDigestAuth
import json
import sys
import communication
import config
import time
import L_sprit
# グローバル変数の宣言
LIMIT_SELECTION = 0
SELECTON_RATE = 0
EXCHANGE_RATE = 0
MODE_CHANGE_THRESHOLD = 0.50
ALL_COST = 0
columns = 0
rows = 0
mode_flag = "N"
fwd_ahead = []
back_ahead = []
thresh = MODE_CHANGE_THRESHOLD
class Node :
def __init__ (self, board, selection,exchange,distance):
self.board = board
self.selection = selection
self.exchange = exchange
self.mydistance = distance
def get_next_nodes(self): #渡したノードに隣接するノードを返す
nodes_dic = {}
board = self.board
for i in range(len(board)): #選択するマスを変えたノードをキューに追加する。
for j in range(len(board[0])):
x,y = (i,j)
#右と交換
nodes_dic[((i,j),"R")] = Node(exchange(board,(x, y), (x + 1, y)) , (x + 1, y),(x,y),0)
#左と交換
if x == 0:
# 左への移動は存在しない
nodes_dic[((i,j),"L")] = Node(None, (x - 1, y), (x,y),0)
else:
# 一つ左の選択のRを流用する
#nodes_dic[((i,j),"L")] = Node(exchange(board,(x, y), (x - 1, y)) , (x - 1, y))
nodes_dic[((i,j),"L")] = Node(nodes_dic[((i - 1, j), "R")].board, (x - 1, y), (x, y),0)
#上と交換
if y == 0:
# 上への移動は存在しない
nodes_dic[((i,j),"U")] = Node(None, (x, y - 1), (x,y), 0)
else:
# 一つ上の選択のDを流用する
#nodes_dic[((i,j),"U")] = Node(exchange(board,(x, y), (x, y - 1)) , (x, y - 1))
nodes_dic[((i,j),"U")] = Node(nodes_dic[((i, j - 1), "D")].board, (x, y - 1), (x,y), 0)
#下と交換
nodes_dic[((i,j),"D")] = Node(exchange(board,(x, y), (x, y + 1)) , (x, y + 1),(x,y),0)
return nodes_dic
def make_problem(w, h):
arr = []
for i in range(w):
column = []
for j in range(h):
column.append((i, j))
arr.append(column)
return arr
def transpose(arr2d): #転置した2次元配列を返す
result = []
for i in range(len(arr2d[0])):
arr = []
for j in range(len(arr2d)):
arr.append(arr2d[j][i])
result.append(arr)
return result
def operations_to_list(operations): #operationsの型を普通のリストに戻した物を返す
pair = operations
lst = []
while pair != ():
lst.append(pair[0])
pair = pair[1]
return lst
def exchange (then_board, start, destination): # then_boadのstartとdestinationを交換したboardを返す
# 変更されたcolumnだけをdeep copyする
x, y = start
new_x, new_y = destination
if not(0 <= new_x < len(then_board) and 0 <= new_y < len(then_board[0])):
return None
startImg = then_board[x][y]
destImg = then_board[new_x][new_y]
return [
then_board[x] if x != start[0] and x != destination[0]
else [destImg if (x, y) == start
else (startImg if (x, y) == destination else then_board[x][y])
for y in range(len(then_board[0]))]
for x in range(len(then_board))]
board = copy(then_board)
board[x] = deepcopy(then_board[x])
if x != new_x:
board[new_x] = deepcopy(then_board[new_x])
destination_element = board[new_x][new_y]
board[new_x][new_y] = board[x][y]
board[x][y] = destination_element
return board
def create_distance_table(goal): #距離計算用のテーブルを返す
table = []
for i in range(len(goal)):
col = []
for j in range(len(goal[0])):
col.append(None)
table.append(col)
for i in range(len(goal)):
for j in range(len(goal[0])):
(goal_x, goal_y) = goal[i][j]
table[goal_x][goal_y] = (i, j)
return table
def distance_to_goal(table, board): #ノードとゴールノードまでの予測距離を返す。引数は(距離計算用テーブル,ゴールのボード)
ans = 0
for i in range(len(board)):
for j in range(len(board[0])):
(board_x, board_y) = board[i][j]
a = table[board_x][board_y]
b = (i, j)
x = abs(a[0] - b[0])
y = abs(a[1] - b[1])
ans += x + y
return ans * EXCHANGE_RATE
def point_md(point,board, table):
table_x, table_y = board[point[0]][point[1]]
a = table[table_x][table_y]
x = abs(a[0] - point[0])
y = abs(a[1] - point[1])
ans = x + y
return ans
def fast_distance_to_goal(looking_node,node, table):
parent_distance = looking_node.mydistance
parent_board = looking_node.board
selection = node.selection
exchange = node.exchange
child_board = node.board
exchange_distance = point_md(selection,parent_board, table) - point_md(exchange ,child_board, table)
selection_distance = point_md(exchange ,parent_board, table) - point_md(selection,child_board, table)
child_distance = parent_distance - (exchange_distance + selection_distance)
node.mydistance = child_distance
return child_distance * EXCHANGE_RATE
def tuplenode (node) : #ノードをtupleの形にした物を返す
return (tuple([tuple(a) for a in node.board]) , node.selection)
def caliculate_cost (operations): #現在のoperationsのコストを返す
pair = operations
cost = 0
lst = []
while pair != ():
if pair[0][0] == "S":
cost += SELECTON_RATE
else:
cost += EXCHANGE_RATE
pair = pair[1]
return cost
def count_missmatch_image(board1, board2):#board1とboard2間の不一致画像の数を返す
counts = 0
for i in range(len(board1)):
for j in range(len(board1[0])):
try:
if board1[i][j] != board2[i][j]:
counts += 1
except:
print "----"
print board1
print board2
sys.exit()
return counts
def count_selection(operations): #選択を数える
count = 0
for op in operations:
if op[0] == "S":
count += 1
return count
def encode_answer_format(operations_list,L_answer_text):
selectcount = 1
changecount = 0
ans = ""
word = ""
for i in range(len(operations_list)):
if((operations_list[i] == "L")or(operations_list[i] == "R")or(operations_list[i] == "U")or(operations_list[i] == "D")):
word += operations_list[i]
changecount +=1
else:
ans = "\r\n" + word[::-1] + ans
ans = "\r\n" + str(changecount) +ans
ans = "\r\n" + operations_list[i][1:] + ans
word = ""
changecount = 0
selectcount += 1
ans = str(selectcount) + "\r\n" +L_answer_text+ ans
return ans
# リストの先頭から順番に実行する
def move_position(move_list, pos):
pos = list(pos)
for move in move_list:
if move == "L":
pos[0] -= 1
elif move == "R":
pos[0] += 1
elif move == "U":
pos[1] -= 1
elif move == "D":
pos[1] += 1
return tuple(pos)
def reverse_operations(operations):
reverse_table = {
"L": "R",
"R": "L",
"U": "D",
"D": "U"
}
result = []
moves = []
for op in operations:
if op[0] == "S":
pos = (int(op[1], 16), int(op[2], 16))
rev_moves = [reverse_table[a] for a in moves]
new_pos = move_position(reversed(moves), pos)
new_op = "S%X%X" % new_pos
result.append(new_op)
result += rev_moves
moves = []
else:
moves.append(op)
rev_moves = [reverse_table[a] for a in moves]
result += rev_moves
return result
def astar_step(queue, checked_nodes, table, min_distance, tag, fwd_ahead, back_ahead):
dummy, looking_node, operations, selection_count = queue.pop() #キューの先頭を取り出
g_star = caliculate_cost(operations)
checked_nodes[(tuplenode(looking_node),tag)] = operations #chacked_nodes集合にチェック済みとして追加
next_nodes = looking_node.get_next_nodes() #looking_nodeに隣接するノードたち(上下左右)を辞書型でnext_nodesに追加
for key, node in next_nodes.items() : #中身全部取り出すぜー
cost = 0
select = False
if key[0] != looking_node.selection :
select = True
cost += SELECTON_RATE
added_operation = (key[1],("S%X%X"%key[0],operations))
else:
added_operation = (key[1],operations)
if node.board != None and not((tuplenode(node),tag) in checked_nodes): #各隣接ノードがcheckd_nodesに無ければキューに追加。
h_star = fast_distance_to_goal(looking_node,node, table)
f_star = g_star + h_star
if select:
new_selection_count = selection_count + 1
else:
new_selection_count = selection_count
if new_selection_count <= LIMIT_SELECTION:
queue.push((f_star + cost + EXCHANGE_RATE, node, added_operation, new_selection_count))
if h_star <= min_distance:
min_distance = h_star
print "%s distance=%d tag=%s" % (operations_to_list(added_operation), h_star, tag)
#if int(h_star) == 0:
#cost = -1000000000
#print "stop!"
return min_distance
def forward(problem, answer, checked_nodes,L_answer_text, result_queue):
global mode_flag ,fwd_ahead, back_ahead, thresh
ans_status = 0
distance_table = create_distance_table(answer)
static_h_star = distance_to_goal(distance_table,problem)/EXCHANGE_RATE
print static_h_star
queue = pheap.Empty(key=lambda a: a[0]) #空のキューを作成
forward_min = 999999999999
my_tag = "f"
back_tag = "b"
true_ans = answer
next_nodes = Node(problem,(0,0),(0,0),static_h_star).get_next_nodes() #problemに隣接するノードたち(上下左右)を辞書型でnext_nodesに追加
for key, node in next_nodes.items(): #中身全部取り出すぜー
added_operation = (key[1],("S%X%X"%key[0],()))
if node.board != None :
h_star = distance_to_goal(distance_table,node.board)
h_star = fast_distance_to_goal(Node(problem,(0,0),(0,0),static_h_star),node, distance_table)
queue.push((h_star+SELECTON_RATE+EXCHANGE_RATE, node, added_operation, 1))
while not queue.is_empty:
operations = queue.element[2]
if queue.element[1].board == true_ans: #仮に取り出したキューが正答と一致したら終了
print "forward goal"
print operations_to_list(operations)
print "cost=%d" % caliculate_cost(operations)
ALL_COST = caliculate_cost(operations)
result_queue.put(encode_answer_format(operations_to_list(operations)))
return
if (tuplenode(queue.element[1]),back_tag) in checked_nodes:
print "ぶつかったforward"
fwd_op = list(reversed(operations_to_list(operations)))
fwd_cost = caliculate_cost(operations)
back_op = checked_nodes[(tuplenode(queue.element[1]),back_tag)]
back_cost = caliculate_cost(back_op) - SELECTON_RATE
back_op = reverse_operations(operations_to_list(back_op))[1:]
full_op = fwd_op + back_op
full_cost = fwd_cost + back_cost
ALL_COST = full_cost
result_queue.put(encode_answer_format(list(reversed(full_op)), L_answer_text))
return
fwd_ahead = queue.element[1].board
if count_missmatch_image(fwd_ahead, back_ahead) <= int(rows * columns * thresh):# and mode_flag == "N":
print "mode change!"
mode_flag = "A"
thresh *= MODE_CHANGE_THRESHOLD
ans_status = 0
if mode_flag == "A" and ans_status == 0:
print "change answer!"
answer = back_ahead
distance_table = create_distance_table(answer)
print distance_table
ans_status = 1
forward_min = min(forward_min, astar_step(queue, checked_nodes, distance_table, forward_min, my_tag, fwd_ahead, back_ahead))
def back(problem, answer, checked_nodes, L_answer_text, result_queue):
global mode_flag, fwd_ahead, back_ahead, thresh
ans_status = 0
distance_table = create_distance_table(problem)
static_h_star = distance_to_goal(distance_table,answer)/EXCHANGE_RATE
print static_h_star
queue = pheap.Empty(key=lambda a: a[0]) #空のキューを作成
back_min = 999999999999
my_tag = "b"
fwd_tag = "f"
true_prob = problem
next_nodes = Node(answer,(0,0), (0,0),static_h_star).get_next_nodes() #problemに隣接するノードたち(上下左右)を辞書型でnext_nodesに追加
for key, node in next_nodes.items() : #中身全部取り出すぜー
added_operation = (key[1],("S%X%X"%key[0],()))
if node.board != None :
h_star = fast_distance_to_goal(Node(answer,(0,0),(0,0),static_h_star),node, distance_table)
queue.push((h_star+SELECTON_RATE+EXCHANGE_RATE, node, added_operation, 1))
while not queue.is_empty:
operations = queue.element[2]
if queue.element[1].board == true_prob: #仮に取り出したキューが正答と一致したら終了
print "back goal"
print operations_to_list(operations)
print "cost=%d" % caliculate_cost(operations)
ALL_COST = caliculate_cost(operations)
result_queue.put(encode_answer_format(list(reversed(reverse_operations(operations_to_list(operations))))))
return
if (tuplenode(queue.element[1]),fwd_tag) in checked_nodes:
print "ぶつかったback"
fwd_op = checked_nodes[(tuplenode(queue.element[1]),fwd_tag)]
fwd_op = list(reversed(operations_to_list(fwd_op)))
fwd_cost = caliculate_cost(operations)
back_op = operations
back_cost = caliculate_cost(back_op) - SELECTON_RATE
back_op = reverse_operations(operations_to_list(back_op))[1:]
full_op = fwd_op + back_op
full_cost = fwd_cost + back_cost
ALL_COST = full_cost
result_queue.put(encode_answer_format(list(reversed(full_op)), L_answer_text))
return
back_ahead = queue.element[1].board
if count_missmatch_image(fwd_ahead, back_ahead) <= int(rows * columns * thresh):# and mode_flag == "N":
print "mode change!"
mode_flag = "A"
thresh *= MODE_CHANGE_THRESHOLD
ans_status = 0
if mode_flag == "A" and ans_status == 0:
print "change answer!"
problem = fwd_ahead
distance_table = create_distance_table(problem)
print distance_table
ans_status = 1
back_min = min(back_min, astar_step(queue, checked_nodes, distance_table, back_min, my_tag, fwd_ahead, back_ahead))
def solve(sortedImages, splitColumns, splitRows, limit, sel_rate, exc_rate, target_columns, target_rows):
global LIMIT_SELECTION, SELECTON_RATE, EXCHANGE_RATE, rows, columns, fwd_ahead, back_ahead
LIMIT_SELECTION = limit
SELECTON_RATE = sel_rate
EXCHANGE_RATE = exc_rate
problem = make_problem(splitColumns, splitRows)
answer = sortedImages
columns = splitColumns
rows = splitRows
checked_nodes = {} #set() #チェック済みのノード集合
problem,L_answer_text = L_sprit.corner_L_sprit(target_columns, target_rows, problem,answer)
LIMIT_SELECTION -= 1
fwd_ahead = problem
back_ahead = answer
result_queue = Queue.Queue()
fwd_thr = threading.Thread(target=forward, name="fwd", args=(problem, answer, checked_nodes, L_answer_text, result_queue))
back_thr = threading.Thread(target=back, name="back", args=(problem, answer, checked_nodes, L_answer_text, result_queue))
fwd_thr.daemon = True
back_thr.daemon = True
fwd_thr.start()
back_thr.start()
while True:
try:
# 1秒ごとにタイムアウトする
# タイムアウト時にキューに内容が無ければEmpty例外が出る
return result_queue.get(True, 1)
except Queue.Empty:
# 例外が出ても何もしない
pass
except KeyboardInterrupt:
print "aborting"
# kill flagをセットしてスレッドを終了させる
kill_flag = True
sys.exit(0)
#main
master = ""
target_columns = 4
target_rows = 4
if len(sys.argv) == 3:
master = sys.argv[1]
target_columns,target_rows = sys.argv[2].split("-")
elif len(sys.argv) == 2:
if '.' in sys.argv[1]:
master = sys.argv[1]
elif '-' in sys.argv[1]:
target_columns,target_rows = sys.argv[1].split("-")
master = config.master
else:
master = config.master
para = communication.get_problem(master)
ans_str = solve(para['answer'], para['columns'], para['rows'], para['lim_select'], para['selection_rate'], para['exchange_rate'],int(target_columns),int(target_rows))
communication.post(master, ans_str)
| SP2LC/procon25-main | A-star/L-dynamic.py | Python | apache-2.0 | 17,085 |
from xml.etree import ElementTree
from os.path import dirname, realpath
directory_of_sources = dirname(realpath(__file__)) + "/sources/"
d = {}
d['AR'] = "Arabic"
d['EN'] = "English"
d['ES'] = "Spanish"
d['FR'] = "French"
d['RU'] = "Russian"
d['ZH'] = "Mandarin"
filepath = '/tmp/uncorpora_plain_20090831.tmx'
count = 0
for event, elem in ElementTree.iterparse(filepath, events=('start', 'end', 'start-ns', 'end-ns')):
if event == "start":
print event, elem
if elem.tag == "tu":
uid = elem.attrib['tuid']
if elem.tag == "tuv":
language = elem.attrib['{http://www.w3.org/XML/1998/namespace}lang']
if elem.tag == "seg":
text = elem.text
print language, "text is", text
if text and len(text) > 200:
with open(directory_of_sources + d[language] + "/" + uid, "wb") as f:
f.write(text.encode("utf-8"))
count += 1
if count == 50000:
break
| DanielJDufour/language-detector | language_detector/prep/parse_un.py | Python | apache-2.0 | 986 |
# -*- coding: utf-8 -*-
"""
module that specified how we choose paramaters based on current search database
properties
"""
from __future__ import absolute_import, division, print_function
#import six
import utool as ut
#import numpy as np
#import vtool as vt
#from ibeis.algo.hots import hstypes
#from ibeis.algo.hots import match_chips4 as mc4
#from ibeis.algo.hots import distinctiveness_normalizer
#from six.moves import filter
print, print_, printDBG, rrr, profile = ut.inject(__name__, '[autoparams]')
@profile
def choose_vsmany_K(num_names, qaids, daids):
"""
TODO: Should also scale up the number of checks as well
method for choosing K in the initial vsmany queries
Ignore:
>>> # DISABLE_DOCTEST
>>> # Shows plot for K vs number of names
>>> from ibeis.algo.hots.automated_params import * # NOQA
>>> import ibeis
>>> from ibeis import constants as const
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> valid_aids = ibs.get_valid_aids(species=const.TEST_SPECIES.ZEB_PLAIN)
>>> num_names = np.arange(0, 1000)
>>> num_names_slope = .1
>>> K_max = 10
>>> K_min = 1
>>> K_list = np.floor(num_names_slope * num_names)
>>> K_list[K_list > K_max] = K_max
>>> K_list[K_list < K_min] = K_min
>>> clip_index_list = np.where(K_list >= K_max)[0]
>>> clip_index = clip_index_list[min(len(clip_index_list) - 1, 10)]
>>> K_list = K_list[0:clip_index]
>>> num_names = num_names[0:clip_index]
>>> pt.plot2(num_names, K_list, x_label='num_names', y_label='K',
... equal_aspect=False, marker='g-', pad=1, dark=True)
>>> pt.update()
"""
#K = ibs.cfg.query_cfg.nn_cfg.K
# TODO: paramaterize in config
num_names_slope = .1 # increase K every fifty names
K_max = 10
K_min = 1
num_names_lower = K_min / num_names_slope
num_names_upper = K_max / num_names_slope
if num_names < num_names_lower:
K = K_min
elif num_names < num_names_upper:
K = num_names_slope * num_names
else:
K = K_max
with ut.embed_on_exception_context:
if len(ut.intersect_ordered(qaids, daids)) > 0:
# if self is in query bump k
K += 1
return K
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.algo.hots.automated_params
python -m ibeis.algo.hots.automated_params --allexamples
python -m ibeis.algo.hots.automated_params --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| SU-ECE-17-7/ibeis | _broken/_old_qt_hs_matcher/automated_params.py | Python | apache-2.0 | 2,697 |
#
# Created as part of the StratusLab project (http://stratuslab.eu),
# co-funded by the European Commission under the Grant Agreement
# INFSO-RI-261552."
#
# Copyright (c) 2011, SixSq Sarl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from datetime import datetime
import time
import tempfile
from stratuslab.CloudConnectorFactory import CloudConnectorFactory
from stratuslab.Util import sshCmd
from stratuslab.Util import sshCmdWithOutput
from stratuslab.Util import waitUntilPingOrTimeout
from stratuslab.Util import getHostnameFromUri
import stratuslab.Util as Util
from Exceptions import ValidationException
from Exceptions import ExecutionException
from Authn import AuthnFactory
from stratuslab.system.ubuntu import installCmd as aptInstallCmd
from stratuslab.system.ubuntu import updateCmd as aptUpdateCmd
from stratuslab.system.ubuntu import cleanPackageCacheCmd as aptCleanPackageCacheCmd
from stratuslab.system.centos import installCmd as yumInstallCmd
from stratuslab.system.centos import updateCmd as yumUpdateCmd
from stratuslab.system.centos import cleanPackageCacheCmd as yumCleanPackageCacheCmd
from stratuslab.image.Image import Image
from stratuslab.system import Systems
from stratuslab import Defaults
from stratuslab.marketplace.ManifestDownloader import ManifestDownloader
from stratuslab.Monitor import Monitor
from stratuslab.vm_manager.vm_manager import VmManager
from stratuslab.vm_manager.vm_manager_factory import VmManagerFactory
class Creator(object):
VM_START_TIMEOUT = 60 * 10
VM_PING_TIMEOUT = 60 * 5
excludeFromCreatedImageDefault = ['/tmp/*',
'/etc/ssh/ssh_host_*',
'/root/.ssh/{authorized_keys,known_hosts}']
def __init__(self, image, configHolder):
self.image = image
self.configHolder = configHolder
self.newImageGroupName = ''
self.newInstalledSoftwareName = ''
self.newInstalledSoftwareVersion = ''
self.newImageGroupVersion = ''
self.newImageGroupVersionWithManifestId = False
self.author = ''
self.title = ''
self.comment = ''
self.os = ''
self.authorEmail = ''
self.marketplaceEndpointNewimage = ''
self.endpoint = ''
self.extraOsReposUrls = ''
self.packages = ''
self.scripts = ''
self.prerecipe = ''
self.recipe = ''
self.verboseLevel = ''
self.shutdownVm = True
self.signManifest = True
self.vmStartTimeout = self.VM_START_TIMEOUT
self.vmPingTimeout = self.VM_PING_TIMEOUT
self.options = VmManager.defaultRunOptions()
self.options.update(configHolder.options)
self.configHolder.options.update(self.options)
configHolder.assign(self)
self._set_stdouterr()
credentials = AuthnFactory.getCredentials(self)
self.cloud = CloudConnectorFactory.getCloud(credentials)
self.cloud.setEndpoint(self.endpoint)
self.runner = None
self.vmAddress = None
self.vmId = None
self.vmIp = None
self.vmName = 'creator'
self.userPublicKeyFile = self.options.get('userPublicKeyFile',
Defaults.sshPublicKeyLocation)
self.userPrivateKeyFile = self.userPublicKeyFile.strip('.pub')
self.mainDisk = ''
self.extraDisk = ''
self.mountPointExtraDisk = '/media'
self.imageFile = ''
self.imageFileBundled = ''
self.excludeFromCreatedImage = \
self.excludeFromCreatedImageDefault + \
self.options.get('excludeFromCreatedImage', '').split(',')
self.installer = self.options.get('installer')
self.targetImageUri = ''
self.targetManifestUri = ''
self.manifest = ''
self.manifestObject = None
self.newManifestFileName = None
self.manifestLocalFileName = ''
self.__listener = CreatorBaseListener()
def _set_stdouterr(self):
Util.set_stdouterr(self)
def printDetail(self, msg):
return Util.printDetail(msg, self.verboseLevel, Util.VERBOSE_LEVEL_NORMAL)
def create(self):
self._printAction('Starting image creation')
self.startNode()
try:
self.buildNodeIncrement()
self._printAction('Finished building image increment.')
self._printAction('Please check %s for new image ID and instruction.' %
self.authorEmail)
finally:
self._shutdownNode()
self._localCleanUp()
def startNode(self):
self._imageExists()
self._retrieveManifest()
self.__setAttributesFromManifest()
self.__createRunner()
self._startMachine()
self._waitMachineNetworkUpOrAbort()
self._checkIfCanConnectToMachine()
def buildNodeIncrement(self):
self._executePrerecipe()
self._installPackages()
self._executeRecipe()
self._executeScripts()
def _printAction(self, msg):
Util.printAction(msg)
self._notifyOnAction(msg)
def _printStep(self, msg):
Util.printStep(msg)
self._notifyOnStep(msg)
def _printError(self, msg):
self._notifyOnError(msg)
Util.printError(msg)
def setListener(self, listener):
if listener:
self.__listener = listener
def _notifyOnAction(self, note):
self._notify('Action', note)
def _notifyOnStep(self, note):
self._notify('Step', note)
def _notifyOnError(self, note):
self._notify('Error', note)
def _notify(self, operation, note):
def callListener():
notifyFunction = getattr(self.__listener, onOperation)
notifyFunction(note)
onOperation = 'on%s' % operation
if hasattr(self.__listener, onOperation):
pass
elif hasattr(self.__listener, 'onAny'):
onOperation = 'onAny'
callListener()
def _checkIfCanConnectToMachine(self):
self._printStep('Check if we can connect to the machine')
cmd = 'true'
try:
self._sshCmdWithOutputVerb(cmd)
except ExecutionException:
sleepTime = 6
maxCount = 40
counter = 0
while True:
try:
self.printDetail('Sleeping %i sec. Retry %i out of %i.' % (sleepTime, counter + 1, maxCount))
time.sleep(sleepTime)
self._sshCmdWithOutputVerb(cmd)
break
except ExecutionException, e:
if counter >= maxCount:
raise ExecutionException(e)
counter += 1
def _imageExists(self):
self._printStep('Checking that base image exists')
self._checkImageExists()
def _checkImageExists(self):
image = Image(self.configHolder)
image.checkImageExists(self.image)
def _getCreateImageTemplateDict(self):
return {VmManager.CREATE_IMAGE_KEY_CREATOR_EMAIL: self.authorEmail,
VmManager.CREATE_IMAGE_KEY_CREATOR_NAME: self.author,
VmManager.CREATE_IMAGE_KEY_NEWIMAGE_TITLE: self.title,
VmManager.CREATE_IMAGE_KEY_NEWIMAGE_COMMENT: self.comment,
VmManager.CREATE_IMAGE_KEY_NEWIMAGE_VERSION: self.newImageGroupVersion,
VmManager.CREATE_IMAGE_KEY_NEWIMAGE_MARKETPLACE: self.marketplaceEndpointNewimage}
def createRunner(self):
self.__createRunner()
def __createRunner(self):
self.configHolder.set('vmName',
'%s: %s' % (self.vmName, Util.getTimeInIso8601()))
self.configHolder.set('noCheckImageUrl', True)
self.configHolder.set('saveDisk', True)
self.runner = VmManagerFactory.create(self.image, self.configHolder)
self.runner.updateCreateImageTemplateData(
self._getCreateImageTemplateDict())
def _startMachine(self):
self._printStep('Starting base image')
try:
self.vmId = self.runner.runInstance()[0]
except Exception, msg:
self._printError('An error occurred while starting machine: \n\t%s' % msg)
try:
_, self.vmIp = self.runner.getNetworkDetail(self.vmId)
self.vmAddress = self.vmIp
except Exception, e:
self._printError('An error occurred while getting machine network details: \n\t%s' % str(e))
self._printStep('Waiting for machine to boot')
vmStarted = self.runner.waitUntilVmRunningOrTimeout(self.vmId,
self.vmStartTimeout,
failOn='Failed')
if not vmStarted:
if self.runner.getVmState(self.vmId) == 'Failed':
msg = 'Failed to start VM (id=%s, ip=%s): %s' % \
(self.vmId, self.vmAddress,
self._getVmFailureMessage(self.vmId))
else:
msg = 'Failed to start VM within %i seconds (id=%s, ip=%s)' % \
(self.vmStartTimeout, self.vmId, self.vmAddress)
self.printDetail(msg)
self._killMachine()
self._printError(msg)
def _stopMachine(self):
self._printStep('Shutting down machine')
if self.getVmState() != 'Failed':
self.cloud.vmStop(self.vmId)
def _killMachine(self):
self._printStep('Killing machine')
if self.vmId:
self.cloud.vmKill(self.vmId)
else:
Util.printWarning('Undefined VM ID, when trying to kill machine.')
def _getVmFailureMessage(self, vmId):
return getattr(Monitor(self.configHolder)._vmDetail(vmId),
'template_error_message', '')
def _shutdownNode(self):
if self.shutdownVm:
self._stopMachine()
else:
self._printStep('Machine ready for use')
msg = '\n\tMachine IP: %s\tRemember to stop the machine when finished' % self.vmIp
Util.printInfo(msg)
def _waitMachineNetworkUpOrAbort(self):
self._printStep('Waiting for machine network to start')
if not waitUntilPingOrTimeout(self.vmAddress, self.vmPingTimeout):
msg = 'Unable to ping VM in %i seconds (id=%s, ip=%s)' % \
(self.vmPingTimeout, self.vmId, self.vmAddress)
self._printError(msg)
self._stopMachine()
def _getPublicAddress(self):
return self.vmIp
def _retrieveManifest(self):
self._printStep('Retrieving image manifest')
configHolder = self.configHolder.copy()
downloader = ManifestDownloader(configHolder)
self.manifestObject = downloader.getManifestInfo(self.image)
self.manifest = self.manifestObject.tostring()
def __setAttributesFromManifest(self):
self._setOsFromManifest()
self._setInstallerBasedOnOs()
def _setOsFromManifest(self):
if not self.os:
self.os = self._getAttrFromManifest('os').lower()
def _setInstallerBasedOnOs(self):
if not self.installer:
self.installer = Systems.getInstallerBasedOnOs(self.os)
def _getAttrFromManifest(self, attr):
return getattr(self.manifestObject, attr)
def _installPackages(self):
self._printStep('Installing user packages')
if len(self.packages) == 0:
self.printDetail('No packages to install')
return
self._setUpExtraRepositories()
self.printDetail('Updating installer')
ret = self._doInstallerUpdate()
self.printDetail('Installing packages: %s' % self.packages)
ret = self._doInstallPackagesRemotly(self.packages)
if ret != 0:
self._printError('An error occurred while installing packages')
def _setUpExtraRepositories(self):
if not self.extraOsReposUrls:
return
self.printDetail('Adding extra repositories')
if self.installer not in Systems.INSTALLERS:
ValidationException('Unknown installer %s. Bailing out.' %
self.installer)
extraReposList = self.extraOsReposUrls.split(',')
if self.installer == 'yum':
for i, repoUrl in enumerate(extraReposList):
repoName = getHostnameFromUri(repoUrl)
cmd = """cat >> /etc/yum.repos.d/%(name)s.repo << EOF
[%(name)s]
name=%(name)s
baseurl=%(url)s
gpgcheck=0
enabled=1
EOF
""" % {'name': '%s-%i' % (repoName, i), 'id': i, 'url': repoUrl}
elif self.installer == 'apt':
for repoUrl in extraReposList:
repoName = getHostnameFromUri(repoUrl)
cmd = """cat >> /etc/apt/sources.list.d/%(reponame)s.list << EOF
deb %(repourl)s
EOF
""" % {'reponame': repoName, 'repourl': repoUrl}
self._sshCmdWithOutput(cmd)
def _doInstallPackagesRemotly(self, packages):
cmd = self._buildInstallerCommand() + ' '
cmd += ' '.join(packages.split(','))
return self._sshCmd(cmd, stderr=self.stderr, stdout=self.stdout)
def _doInstallerUpdate(self):
cmd = self._buildUpdaterCommand()
return self._sshCmd(cmd, stderr=self.stderr, stdout=self.stdout)
def _buildInstallerCommand(self):
if self.installer == 'yum':
return yumInstallCmd
elif self.installer == 'apt':
return aptInstallCmd
def _buildUpdaterCommand(self):
if self.installer == 'yum':
return yumUpdateCmd
elif self.installer == 'apt':
return aptUpdateCmd
def _buildPackageCacheCleanerCommand(self):
if self.installer == 'yum':
return yumCleanPackageCacheCmd
elif self.installer == 'apt':
return aptCleanPackageCacheCmd
def _executeScripts(self):
self._printStep('Executing user scripts')
if len(self.scripts) == 0:
self.printDetail('No scripts to execute')
return
self.printDetail('Executing scripts: %s' % self.scripts)
for script in self.scripts.split(','):
self._uploadAndExecuteRemoteScript(script)
def _uploadAndExecuteRemoteScript(self, script):
def __tellScriptNameAndArgs(script):
scriptNameAndArgs = os.path.basename(script)
scriptNameAndArgsList = scriptNameAndArgs.split(' ', 1)
if len(scriptNameAndArgsList) == 1: # no arguments given
scriptNameAndArgsList = scriptNameAndArgsList + ['']
return scriptNameAndArgsList
def _uploadScript(script):
scriptName, args = __tellScriptNameAndArgs(script)
scriptDirectory = Util.sanitizePath(os.path.dirname(script))
scriptPathLocal = os.path.abspath(os.path.join(scriptDirectory, scriptName))
scriptPathRemote = '/tmp/%s' % scriptName
rc, output = self._scpWithOutput(scriptPathLocal, 'root@%s:%s' % (self.vmAddress, scriptPathRemote))
if rc != 0:
self._printError('An error occurred while uploading script %s\n%s' % (script, output))
self._sshCmdWithOutput('chmod 0755 %s' % scriptPathRemote)
return scriptPathRemote, args
def _executeRemoteScript(scriptPathRemote, args=''):
rc = self._sshCmd('%s %s' % (scriptPathRemote, args), throwOnError=False,
pseudoTTY=True)
if rc != 0:
self._printError('An error occurred while executing script %s' % script)
scriptPathRemote, args = _uploadScript(script)
_executeRemoteScript(scriptPathRemote, args)
def _executePrerecipe(self):
self._printStep('Executing user prerecipe')
if len(self.prerecipe) == 0:
self.printDetail('No prerecipe to execute')
return
self._uploadAndExecuteRemoteRecipe(self.prerecipe)
def _executeRecipe(self):
self._printStep('Executing user recipe')
if len(self.recipe) == 0:
self.printDetail('No recipe to execute')
return
self._uploadAndExecuteRemoteRecipe(self.recipe)
def _uploadAndExecuteRemoteRecipe(self, script):
fd, recipeFile = tempfile.mkstemp()
try:
os.write(fd, script)
os.close(fd)
os.chmod(recipeFile, 0755)
scriptPath = '/tmp/%s' % os.path.basename(recipeFile)
rc = self._scp(recipeFile, 'root@%s:%s' % (self.vmAddress, scriptPath))
if rc != 0:
self._printError('An error occurred while uploading recipe')
self._sshCmdWithOutput('chmod 0755 %s' % scriptPath)
rc = self._sshCmd(scriptPath, throwOnError=False, pseudoTTY=True)
if rc != 0:
self._printError('An error occurred while executing user recipe.')
finally:
try:
os.unlink(recipeFile)
except:
pass
def _localCleanUp(self):
Util.execute(['rm', '-rf', self.manifestLocalFileName])
def _scp(self, src, dst, **kwargs):
return Util.scp(src, dst, self.userPrivateKeyFile,
verboseLevel=self.verboseLevel, verboseThreshold=Util.VERBOSE_LEVEL_DETAILED,
stderr=self.stderr, stdout=self.stdout, **kwargs)
def _scpWithOutput(self, src, dst):
return self._scp(src, dst, withOutput=True)
def _sshCmd(self, cmd, throwOnError=True, **kwargs):
ret = sshCmd(cmd, self.vmAddress,
sshKey=self.userPrivateKeyFile,
verboseLevel=self.verboseLevel,
verboseThreshold=Util.VERBOSE_LEVEL_DETAILED,
**kwargs)
if ret and throwOnError:
raise ExecutionException('Error executing command: %s' % cmd)
return ret
def _sshCmdWithOutput(self, cmd, throwOnError=True, **kwargs):
rc, output = sshCmdWithOutput(cmd, self.vmAddress,
sshKey=self.userPrivateKeyFile,
verboseLevel=self.verboseLevel,
verboseThreshold=Util.VERBOSE_LEVEL_DETAILED,
**kwargs)
if rc and throwOnError:
raise ExecutionException('Error executing command: %s\n%s' % (cmd, output))
return rc, output
def _sshCmdWithOutputVerb(self, cmd, **kwargs):
return self._sshCmdWithOutput(cmd, sshVerb=True, **kwargs)
def _sshCmdWithOutputQuiet(self, cmd, **kwargs):
return self._sshCmdWithOutput(cmd, sshQuiet=True, **kwargs)
def getNewImageId(self):
return self.manifestObject.identifier
def getVmId(self):
return self.vmId
def getVmState(self):
return self.runner.getVmState(self.vmId)
# FIXME: This should be treated as a log handler rather than an ad hoc class.
class CreatorBaseListener(object):
def __init__(self, verbose=False):
if verbose:
self.write = self.__beVerbose
def write(self, msg):
pass
def __beVerbose(self, msg):
print msg
def onAction(self, msg):
self.write('action: %s' % msg)
def onStep(self, msg):
self.write('step: %s' % msg)
def onError(self, msg):
self.write('error: %s' % msg)
| StratusLab/client | api/code/src/main/python/stratuslab/Creator.py | Python | apache-2.0 | 20,097 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implicit-explicit time stepping routines for ODEs."""
import dataclasses
from typing import Callable, Sequence, TypeVar
import tree_math
PyTreeState = TypeVar("PyTreeState")
TimeStepFn = Callable[[PyTreeState], PyTreeState]
class ImplicitExplicitODE:
"""Describes a set of ODEs with implicit & explicit terms.
The equation is given by:
∂x/∂t = explicit_terms(x) + implicit_terms(x)
`explicit_terms(x)` includes terms that should use explicit time-stepping and
`implicit_terms(x)` includes terms that should be modeled implicitly.
Typically the explicit terms are non-linear and the implicit terms are linear.
This simplifies solves but isn't strictly necessary.
"""
def explicit_terms(self, state: PyTreeState) -> PyTreeState:
"""Evaluates explicit terms in the ODE."""
raise NotImplementedError
def implicit_terms(self, state: PyTreeState) -> PyTreeState:
"""Evaluates implicit terms in the ODE."""
raise NotImplementedError
def implicit_solve(
self, state: PyTreeState, step_size: float,
) -> PyTreeState:
"""Solves `y - step_size * implicit_terms(y) = x` for y."""
raise NotImplementedError
def backward_forward_euler(
equation: ImplicitExplicitODE, time_step: float,
) -> TimeStepFn:
"""Time stepping via forward and backward Euler methods.
This method is first order accurate.
Args:
equation: equation to solve.
time_step: time step.
Returns:
Function that performs a time step.
"""
# pylint: disable=invalid-name
dt = time_step
F = tree_math.unwrap(equation.explicit_terms)
G_inv = tree_math.unwrap(equation.implicit_solve, vector_argnums=0)
@tree_math.wrap
def step_fn(u0):
g = u0 + dt * F(u0)
u1 = G_inv(g, dt)
return u1
return step_fn
def crank_nicolson_rk2(
equation: ImplicitExplicitODE, time_step: float,
) -> TimeStepFn:
"""Time stepping via Crank-Nicolson and 2nd order Runge-Kutta (Heun).
This method is second order accurate.
Args:
equation: equation to solve.
time_step: time step.
Returns:
Function that performs a time step.
Reference:
Chandler, G. J. & Kerswell, R. R. Invariant recurrent solutions embedded in
a turbulent two-dimensional Kolmogorov flow. J. Fluid Mech. 722, 554–595
(2013). https://doi.org/10.1017/jfm.2013.122 (Section 3)
"""
# pylint: disable=invalid-name
dt = time_step
F = tree_math.unwrap(equation.explicit_terms)
G = tree_math.unwrap(equation.implicit_terms)
G_inv = tree_math.unwrap(equation.implicit_solve, vector_argnums=0)
@tree_math.wrap
def step_fn(u0):
g = u0 + 0.5 * dt * G(u0)
h1 = F(u0)
u1 = G_inv(g + dt * h1, 0.5 * dt)
h2 = 0.5 * (F(u1) + h1)
u2 = G_inv(g + dt * h2, 0.5 * dt)
return u2
return step_fn
def low_storage_runge_kutta_crank_nicolson(
alphas: Sequence[float],
betas: Sequence[float],
gammas: Sequence[float],
equation: ImplicitExplicitODE,
time_step: float,
) -> TimeStepFn:
"""Time stepping via "low-storage" Runge-Kutta and Crank-Nicolson steps.
These scheme are second order accurate for the implicit terms, but potentially
higher order accurate for the explicit terms. This seems to be a favorable
tradeoff when the explicit terms dominate, e.g., for modeling turbulent
fluids.
Per Canuto: "[these methods] have been widely used for the time-discretization
in applications of spectral methods."
Args:
alphas: alpha coefficients.
betas: beta coefficients.
gammas: gamma coefficients.
equation: equation to solve.
time_step: time step.
Returns:
Function that performs a time step.
Reference:
Canuto, C., Yousuff Hussaini, M., Quarteroni, A. & Zang, T. A.
Spectral Methods: Evolution to Complex Geometries and Applications to
Fluid Dynamics. (Springer Berlin Heidelberg, 2007).
https://doi.org/10.1007/978-3-540-30728-0 (Appendix D.3)
"""
# pylint: disable=invalid-name,non-ascii-name
α = alphas
β = betas
γ = gammas
dt = time_step
F = tree_math.unwrap(equation.explicit_terms)
G = tree_math.unwrap(equation.implicit_terms)
G_inv = tree_math.unwrap(equation.implicit_solve, vector_argnums=0)
if len(alphas) - 1 != len(betas) != len(gammas):
raise ValueError("number of RK coefficients does not match")
@tree_math.wrap
def step_fn(u):
h = 0
for k in range(len(β)):
h = F(u) + β[k] * h
µ = 0.5 * dt * (α[k + 1] - α[k])
u = G_inv(u + γ[k] * dt * h + µ * G(u), µ)
return u
return step_fn
def crank_nicolson_rk3(
equation: ImplicitExplicitODE, time_step: float,
) -> TimeStepFn:
"""Time stepping via Crank-Nicolson and RK3 ("Williamson")."""
return low_storage_runge_kutta_crank_nicolson(
alphas=[0, 1/3, 3/4, 1],
betas=[0, -5/9, -153/128],
gammas=[1/3, 15/16, 8/15],
equation=equation,
time_step=time_step,
)
def crank_nicolson_rk4(
equation: ImplicitExplicitODE, time_step: float,
) -> TimeStepFn:
"""Time stepping via Crank-Nicolson and RK4 ("Carpenter-Kennedy")."""
# pylint: disable=line-too-long
return low_storage_runge_kutta_crank_nicolson(
alphas=[0, 0.1496590219993, 0.3704009573644, 0.6222557631345, 0.9582821306748, 1],
betas=[0, -0.4178904745, -1.192151694643, -1.697784692471, -1.514183444257],
gammas=[0.1496590219993, 0.3792103129999, 0.8229550293869, 0.6994504559488, 0.1530572479681],
equation=equation,
time_step=time_step,
)
@dataclasses.dataclass
class ImExButcherTableau:
"""Butcher Tableau for implicit-explicit Runge-Kutta methods."""
a_ex: Sequence[Sequence[float]]
a_im: Sequence[Sequence[float]]
b_ex: Sequence[float]
b_im: Sequence[float]
def __post_init__(self):
if len({len(self.a_ex) + 1,
len(self.a_im) + 1,
len(self.b_ex),
len(self.b_im)}) > 1:
raise ValueError("inconsistent Butcher tableau")
def imex_runge_kutta(
tableau: ImExButcherTableau,
equation: ImplicitExplicitODE,
time_step: float,
) -> TimeStepFn:
"""Time stepping with Implicit-Explicit Runge-Kutta."""
# pylint: disable=invalid-name
dt = time_step
F = tree_math.unwrap(equation.explicit_terms)
G = tree_math.unwrap(equation.implicit_terms)
G_inv = tree_math.unwrap(equation.implicit_solve, vector_argnums=0)
a_ex = tableau.a_ex
a_im = tableau.a_im
b_ex = tableau.b_ex
b_im = tableau.b_im
num_steps = len(b_ex)
@tree_math.wrap
def step_fn(y0):
f = [None] * num_steps
g = [None] * num_steps
f[0] = F(y0)
g[0] = G(y0)
for i in range(1, num_steps):
ex_terms = dt * sum(a_ex[i-1][j] * f[j] for j in range(i) if a_ex[i-1][j])
im_terms = dt * sum(a_im[i-1][j] * g[j] for j in range(i) if a_im[i-1][j])
Y_star = y0 + ex_terms + im_terms
Y = G_inv(Y_star, dt * a_im[i-1][i])
if any(a_ex[j][i] for j in range(i, num_steps - 1)) or b_ex[i]:
f[i] = F(Y)
if any(a_im[j][i] for j in range(i, num_steps - 1)) or b_im[i]:
g[i] = G(Y)
ex_terms = dt * sum(b_ex[j] * f[j] for j in range(num_steps) if b_ex[j])
im_terms = dt * sum(b_im[j] * g[j] for j in range(num_steps) if b_im[j])
y_next = y0 + ex_terms + im_terms
return y_next
return step_fn
def imex_rk_sil3(
equation: ImplicitExplicitODE, time_step: float,
) -> TimeStepFn:
"""Time stepping with the SIL3 implicit-explicit RK scheme.
This method is second-order accurate for the implicit terms and third-order
accurate for the explicit terms.
Args:
equation: equation to solve.
time_step: time step.
Returns:
Function that performs a time step.
Reference:
Whitaker, J. S. & Kar, S. K. Implicit-Explicit Runge-Kutta Methods for
Fast-Slow Wave Problems. Monthly Weather Review vol. 141 3426-3434 (2013)
http://dx.doi.org/10.1175/mwr-d-13-00132.1
"""
return imex_runge_kutta(
tableau=ImExButcherTableau(
a_ex=[[1/3], [1/6, 1/2], [1/2, -1/2, 1]],
a_im=[[1/6, 1/6], [1/3, 0, 1/3], [3/8, 0, 3/8, 1/4]],
b_ex=[1/2, -1/2, 1, 0],
b_im=[3/8, 0, 3/8, 1/4],
),
equation=equation,
time_step=time_step,
)
| google/jax-cfd | jax_cfd/spectral/time_stepping.py | Python | apache-2.0 | 8,747 |
import logging
import pickle
from typing import Dict, Optional, Union, List, Sequence
from ray.tune.result import DEFAULT_METRIC
from ray.tune.sample import Categorical, Domain, Float, Integer, LogUniform, \
Quantized
from ray.tune.suggest.suggestion import UNRESOLVED_SEARCH_SPACE, \
UNDEFINED_METRIC_MODE, UNDEFINED_SEARCH_SPACE
from ray.tune.suggest.variant_generator import parse_spec_vars
from ray.tune.utils import flatten_dict
from ray.tune.utils.util import unflatten_dict
try:
import nevergrad as ng
from nevergrad.optimization import Optimizer
from nevergrad.optimization.base import ConfiguredOptimizer
Parameter = ng.p.Parameter
except ImportError:
ng = None
Optimizer = None
ConfiguredOptimizer = None
Parameter = None
from ray.tune.suggest import Searcher
logger = logging.getLogger(__name__)
class NevergradSearch(Searcher):
"""Uses Nevergrad to optimize hyperparameters.
Nevergrad is an open source tool from Facebook for derivative free
optimization. More info can be found at:
https://github.com/facebookresearch/nevergrad.
You will need to install Nevergrad via the following command:
.. code-block:: bash
$ pip install nevergrad
Parameters:
optimizer (nevergrad.optimization.Optimizer|class): Optimizer provided
from Nevergrad. Alter
space (list|nevergrad.parameter.Parameter): Nevergrad parametrization
to be passed to optimizer on instantiation, or list of parameter
names if you passed an optimizer object.
metric (str): The training result objective value attribute. If None
but a mode was passed, the anonymous metric `_metric` will be used
per default.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
points_to_evaluate (list): Initial parameter suggestions to be run
first. This is for when you already have some good parameters
you want hyperopt to run first to help the TPE algorithm
make better suggestions for future parameters. Needs to be
a list of dict of hyperopt-named variables.
use_early_stopped_trials: Deprecated.
max_concurrent: Deprecated.
Tune automatically converts search spaces to Nevergrad's format:
.. code-block:: python
import nevergrad as ng
config = {
"width": tune.uniform(0, 20),
"height": tune.uniform(-100, 100),
"activation": tune.choice(["relu", "tanh"])
}
current_best_params = [{
"width": 10,
"height": 0,
"activation": relu",
}]
ng_search = NevergradSearch(
optimizer=ng.optimizers.OnePlusOne,
metric="mean_loss",
mode="min",
points_to_evaluate=current_best_params)
run(my_trainable, config=config, search_alg=ng_search)
If you would like to pass the search space manually, the code would
look like this:
.. code-block:: python
import nevergrad as ng
space = ng.p.Dict(
width=ng.p.Scalar(lower=0, upper=20),
height=ng.p.Scalar(lower=-100, upper=100),
activation=ng.p.Choice(choices=["relu", "tanh"])
)
ng_search = NevergradSearch(
optimizer=ng.optimizers.OnePlusOne,
space=space,
metric="mean_loss",
mode="min")
run(my_trainable, search_alg=ng_search)
"""
def __init__(self,
optimizer: Union[None, Optimizer, ConfiguredOptimizer] = None,
space: Optional[Union[Dict, Parameter]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
max_concurrent: Optional[int] = None,
points_to_evaluate: Optional[List[Dict]] = None,
**kwargs):
assert ng is not None, """Nevergrad must be installed!
You can install Nevergrad with the command:
`pip install nevergrad`."""
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
super(NevergradSearch, self).__init__(
metric=metric, mode=mode, max_concurrent=max_concurrent, **kwargs)
self._space = None
self._opt_factory = None
self._nevergrad_opt = None
if points_to_evaluate is None:
self._points_to_evaluate = None
elif not isinstance(points_to_evaluate, Sequence):
raise ValueError(
f"Invalid object type passed for `points_to_evaluate`: "
"{type(points_to_evaluate)}. "
f"Please pass a list of points (dictionaries) instead.")
else:
self._points_to_evaluate = list(points_to_evaluate)
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="space", cls=type(self)))
space = self.convert_search_space(space)
if isinstance(optimizer, Optimizer):
if space is not None or isinstance(space, list):
raise ValueError(
"If you pass a configured optimizer to Nevergrad, either "
"pass a list of parameter names or None as the `space` "
"parameter.")
self._parameters = space
self._nevergrad_opt = optimizer
elif isinstance(optimizer, ConfiguredOptimizer):
self._opt_factory = optimizer
self._parameters = None
self._space = space
else:
raise ValueError(
"The `optimizer` argument passed to NevergradSearch must be "
"either an `Optimizer` or a `ConfiguredOptimizer`.")
self._live_trial_mapping = {}
self.max_concurrent = max_concurrent
if self._nevergrad_opt or self._space:
self._setup_nevergrad()
def _setup_nevergrad(self):
if self._opt_factory:
self._nevergrad_opt = self._opt_factory(self._space)
# nevergrad.tell internally minimizes, so "max" => -1
if self._mode == "max":
self._metric_op = -1.
elif self._mode == "min":
self._metric_op = 1.
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
if hasattr(self._nevergrad_opt, "instrumentation"): # added in v0.2.0
if self._nevergrad_opt.instrumentation.kwargs:
if self._nevergrad_opt.instrumentation.args:
raise ValueError(
"Instrumented optimizers should use kwargs only")
if self._parameters is not None:
raise ValueError("Instrumented optimizers should provide "
"None as parameter_names")
else:
if self._parameters is None:
raise ValueError("Non-instrumented optimizers should have "
"a list of parameter_names")
if len(self._nevergrad_opt.instrumentation.args) != 1:
raise ValueError(
"Instrumented optimizers should use kwargs only")
if self._parameters is not None and \
self._nevergrad_opt.dimension != len(self._parameters):
raise ValueError("len(parameters_names) must match optimizer "
"dimension for non-instrumented optimizers")
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
config: Dict) -> bool:
if self._nevergrad_opt or self._space:
return False
space = self.convert_search_space(config)
self._space = space
if metric:
self._metric = metric
if mode:
self._mode = mode
self._setup_nevergrad()
return True
def suggest(self, trial_id: str) -> Optional[Dict]:
if not self._nevergrad_opt:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="space"))
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__,
metric=self._metric,
mode=self._mode))
if self.max_concurrent:
if len(self._live_trial_mapping) >= self.max_concurrent:
return None
if self._points_to_evaluate is not None:
if len(self._points_to_evaluate) > 0:
point_to_evaluate = self._points_to_evaluate.pop(0)
self._nevergrad_opt.suggest(point_to_evaluate)
suggested_config = self._nevergrad_opt.ask()
self._live_trial_mapping[trial_id] = suggested_config
# in v0.2.0+, output of ask() is a Candidate,
# with fields args and kwargs
if not suggested_config.kwargs:
if self._parameters:
return unflatten_dict(
dict(zip(self._parameters, suggested_config.args[0])))
return unflatten_dict(suggested_config.value)
else:
return unflatten_dict(suggested_config.kwargs)
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
"""Notification for the completion of trial.
The result is internally negated when interacting with Nevergrad
so that Nevergrad Optimizers can "maximize" this value,
as it minimizes on default.
"""
if result:
self._process_result(trial_id, result)
self._live_trial_mapping.pop(trial_id)
def _process_result(self, trial_id: str, result: Dict):
ng_trial_info = self._live_trial_mapping[trial_id]
self._nevergrad_opt.tell(ng_trial_info,
self._metric_op * result[self._metric])
def save(self, checkpoint_path: str):
trials_object = (self._nevergrad_opt, self._parameters)
with open(checkpoint_path, "wb") as outputFile:
pickle.dump(trials_object, outputFile)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "rb") as inputFile:
trials_object = pickle.load(inputFile)
self._nevergrad_opt = trials_object[0]
self._parameters = trials_object[1]
@staticmethod
def convert_search_space(spec: Dict) -> Parameter:
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a Nevergrad search space.")
def resolve_value(domain: Domain) -> Parameter:
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
logger.warning("Nevergrad does not support quantization. "
"Dropped quantization.")
sampler = sampler.get_sampler()
if isinstance(domain, Float):
if isinstance(sampler, LogUniform):
return ng.p.Log(
lower=domain.lower,
upper=domain.upper,
exponent=sampler.base)
return ng.p.Scalar(lower=domain.lower, upper=domain.upper)
if isinstance(domain, Integer):
return ng.p.Scalar(
lower=domain.lower,
upper=domain.upper).set_integer_casting()
if isinstance(domain, Categorical):
return ng.p.Choice(choices=domain.categories)
raise ValueError("SkOpt does not support parameters of type "
"`{}`".format(type(domain).__name__))
# Parameter name is e.g. "a/b/c" for nested dicts
space = {
"/".join(path): resolve_value(domain)
for path, domain in domain_vars
}
return ng.p.Dict(**space)
| richardliaw/ray | python/ray/tune/suggest/nevergrad.py | Python | apache-2.0 | 12,589 |
"""
ou are given two integer arrays, A and B, each containing N integers. The size of the array is less than or equal to
1000. You are free to permute the order of the elements in the arrays.
Now here's the real question: Is there an permutation A', B' possible of A and B, such that, A'i+B'i >= K for all i,
where A'i denotes the ith element in the array A' and B'i denotes ith element in the array B'.
Input Format
The first line contains an integer, T, the number of test-cases. T test cases follow. Each test case has the following
format:
The first line contains two integers, N and K. The second line contains N space separated integers, denoting array A.
The third line describes array B in a same format.
"""
__author__ = 'Danyang'
class Solution(object):
def solve(self, cipher):
"""
main solution function
:param cipher: the cipher
"""
N, K, A, B = cipher
A.sort()
B.sort(reverse=True) # dynamic typed, then cannot detect list()
for i in xrange(N):
if not A[i] + B[i] >= K:
return "NO"
return "YES"
if __name__ == "__main__":
import sys
f = open("1.in", "r")
# f = sys.stdin
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
N, K = map(int, f.readline().strip().split(" "))
A = map(int, f.readline().strip().split(' '))
B = map(int, f.readline().strip().split(' '))
cipher = N, K, A, B
# solve
s = "%s\n" % (Solution().solve(cipher))
print s,
| algorhythms/HackerRankAlgorithms | Two arrays.py | Python | apache-2.0 | 1,587 |
#!/usr/bin/python
import boto3
import shutil
import zipfile
from zipfile import ZipFile, ZipInfo
import os
import tempfile
import time
import threading
from os.path import join
from botocore.client import Config
class JobPoller:
def __init__(self, action_type_id, builder):
self._action_type_id = action_type_id
self._codepipeline = boto3.client('codepipeline')
self._builder = builder
def poll(self):
jobs = []
print("Polling for jobs %s" % self._action_type_id)
while not jobs:
time.sleep(2)
response = self._codepipeline.poll_for_jobs(actionTypeId=self._action_type_id, maxBatchSize=1)
jobs = response['jobs']
job = jobs[0]
job_id = job['id']
print("Job with id %s found" % job_id)
nonce = job['nonce']
self._codepipeline.acknowledge_job(jobId=job_id, nonce=nonce)
threading.Thread(target=self._build, args=(job,)).start()
self.poll()
def _build(self, job):
job_id = job['id']
try:
artifactCredentials = job['data']['artifactCredentials']
s3session = boto3.Session(
aws_access_key_id=artifactCredentials['accessKeyId'],
aws_secret_access_key=artifactCredentials['secretAccessKey'],
aws_session_token=artifactCredentials['sessionToken'])
s3 = s3session.client('s3', config=Config(signature_version='s3v4'))
bucketName = job['data']['inputArtifacts'][0]['location']['s3Location']['bucketName']
objectKey = job['data']['inputArtifacts'][0]['location']['s3Location']['objectKey']
tempdir = tempfile.mkdtemp()
print('tempdir for job %s is %s' % (job_id, tempdir))
input_src = join(tempdir, 'input')
os.mkdir(input_src)
target = join(tempdir, 'output')
os.mkdir(target)
print('Downloading artifact %s from bucket %s' % (objectKey, bucketName))
s3.download_file(bucketName, objectKey, join(tempdir, 'input.zip'))
with ZipFileWithPermissions(join(tempdir, 'input.zip'), 'r') as zip:
zip.extractall(input_src)
configuration = job['data']['actionConfiguration']['configuration']
print('Using configuration %s' % configuration)
print("Building job %s" % job_id)
#Run build
rc = self._builder.run(configuration=configuration, input_src=input_src, target_dir=target)
shutil.make_archive(join(tempdir, 'output'), 'zip', target)
uploadBucket = job['data']['outputArtifacts'][0]['location']['s3Location']['bucketName']
uploadKey = job['data']['outputArtifacts'][0]['location']['s3Location']['objectKey']
print('Uploading artifact %s to bucket %s' % (uploadKey, uploadBucket))
s3.upload_file(join(tempdir, 'output.zip'), uploadBucket, uploadKey)
if not rc == 0:
print('job %s failed with return code %d' % (job_id, rc))
self._codepipeline.put_job_failure_result(jobId=job_id, failureDetails={'type': 'JobFailed', 'message': 'Failed'})
else:
self._codepipeline.put_job_success_result(jobId=job_id, executionDetails={'summary': 'It worked'})
print('job %s succeeded' % job_id)
shutil.rmtree(tempdir)
print("Done with " + job_id)
except:
self._codepipeline.put_job_failure_result(jobId=job_id, failureDetails={'type': 'JobFailed', 'message': 'Failed'})
raise
# ZipFile should keep permissions
class ZipFileWithPermissions(ZipFile):
def extract(self, member, path=None, pwd=None):
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
ret_val = self._extract_member(member, path, pwd)
attr = member.external_attr >> 16
os.chmod(ret_val, attr)
return ret_val
| horiam/codebuild-emulator | codebuild_emulator/jobpoller.py | Python | apache-2.0 | 4,019 |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from edb.common import ordered
from .. import common
from . import base
class Record(type):
def __new__(mcls, name, fields, default=None):
dct = {'_fields___': fields, '_default___': default}
bases = (RecordBase, )
return super(Record, mcls).__new__(mcls, name, bases, dct)
def __init__(cls, name, fields, default):
pass
def has_field(cls, name):
return name in cls._fields___
class RecordBase:
def __init__(self, **kwargs):
for k, v in kwargs.items():
if k not in self.__class__._fields___:
msg = '__init__() got an unexpected keyword argument %s' % k
raise TypeError(msg)
setattr(self, k, v)
for k in set(self.__class__._fields___) - set(kwargs.keys()):
setattr(self, k, self.__class__._default___)
def __setattr__(self, name, value):
if name not in self.__class__._fields___:
msg = '%s has no attribute %s' % (self.__class__.__name__, name)
raise AttributeError(msg)
super().__setattr__(name, value)
def __eq__(self, tup):
if not isinstance(tup, tuple):
return NotImplemented
return tuple(self) == tup
def __getitem__(self, index):
return getattr(self, self.__class__._fields___[index])
def __iter__(self):
for name in self.__class__._fields___:
yield getattr(self, name)
def __len__(self):
return len(self.__class__._fields___)
def items(self):
for name in self.__class__._fields___:
yield name, getattr(self, name)
def keys(self):
return iter(self.__class__._fields___)
def __str__(self):
f = ', '.join(str(v) for v in self)
if len(self) == 1:
f += ','
return '(%s)' % f
__repr__ = __str__
class CompositeDBObject(base.DBObject):
def __init__(self, name, columns=None):
super().__init__()
self.name = name
self._columns = ordered.OrderedSet()
self.add_columns(columns or [])
def add_columns(self, iterable):
self._columns.update(iterable)
@property
def record(self):
return Record(
self.__class__.__name__ + '_record',
[c.name for c in self._columns], default=base.Default)
class CompositeAttributeCommand:
def __init__(self, attribute):
self.attribute = attribute
def __repr__(self):
return '<%s.%s %r>' % (
self.__class__.__module__, self.__class__.__name__, self.attribute)
class AlterCompositeAddAttribute(CompositeAttributeCommand):
def code(self, block: base.PLBlock) -> str:
return (f'ADD {self.get_attribute_term()} ' # type: ignore
f'{self.attribute.code(block)}')
def generate_extra(self, block: base.PLBlock,
alter: base.CompositeCommandGroup):
self.attribute.generate_extra(block, alter)
class AlterCompositeDropAttribute(CompositeAttributeCommand):
def code(self, block: base.PLBlock) -> str:
attrname = common.qname(self.attribute.name)
return f'DROP {self.get_attribute_term()} {attrname}' # type: ignore
class AlterCompositeAlterAttributeType:
def __init__(self, attribute_name, new_type, *, using_expr=None):
self.attribute_name = attribute_name
self.new_type = new_type
self.using_expr = using_expr
def code(self, block: base.PLBlock) -> str:
attrterm = self.get_attribute_term() # type: ignore
attrname = common.quote_ident(str(self.attribute_name))
code = f'ALTER {attrterm} {attrname} SET DATA TYPE {self.new_type}'
if self.using_expr is not None:
code += f' USING ({self.using_expr})'
return code
def __repr__(self):
cls = self.__class__
return f'<{cls.__name__} {self.attribute_name!r} to {self.new_type}>'
| edgedb/edgedb | edb/pgsql/dbops/composites.py | Python | apache-2.0 | 4,645 |
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Reacher domain."""
import collections
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.suite.utils import randomizers
from dm_control.utils import containers
from dm_control.utils import rewards
import numpy as np
SUITE = containers.TaggedTasks()
_DEFAULT_TIME_LIMIT = 20
_BIG_TARGET = .05
_SMALL_TARGET = .015
def get_model_and_assets():
"""Returns a tuple containing the model XML string and a dict of assets."""
return common.read_model('reacher.xml'), common.ASSETS
@SUITE.add('benchmarking', 'easy')
def easy(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns reacher with sparse reward with 5e-2 tol and randomized target."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Reacher(target_size=_BIG_TARGET, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, time_limit=time_limit, **environment_kwargs)
@SUITE.add('benchmarking')
def hard(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns reacher with sparse reward with 1e-2 tol and randomized target."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Reacher(target_size=_SMALL_TARGET, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, time_limit=time_limit, **environment_kwargs)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Reacher domain."""
def finger_to_target(self):
"""Returns the vector from target to finger in global coordinates."""
return (self.named.data.geom_xpos['target', :2] -
self.named.data.geom_xpos['finger', :2])
def finger_to_target_dist(self):
"""Returns the signed distance between the finger and target surface."""
return np.linalg.norm(self.finger_to_target())
class Reacher(base.Task):
"""A reacher `Task` to reach the target."""
def __init__(self, target_size, random=None):
"""Initialize an instance of `Reacher`.
Args:
target_size: A `float`, tolerance to determine whether finger reached the
target.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._target_size = target_size
super().__init__(random=random)
def initialize_episode(self, physics):
"""Sets the state of the environment at the start of each episode."""
physics.named.model.geom_size['target', 0] = self._target_size
randomizers.randomize_limited_and_rotational_joints(physics, self.random)
# Randomize target position
angle = self.random.uniform(0, 2 * np.pi)
radius = self.random.uniform(.05, .20)
physics.named.model.geom_pos['target', 'x'] = radius * np.sin(angle)
physics.named.model.geom_pos['target', 'y'] = radius * np.cos(angle)
super().initialize_episode(physics)
def get_observation(self, physics):
"""Returns an observation of the state and the target position."""
obs = collections.OrderedDict()
obs['position'] = physics.position()
obs['to_target'] = physics.finger_to_target()
obs['velocity'] = physics.velocity()
return obs
def get_reward(self, physics):
radii = physics.named.model.geom_size[['target', 'finger'], 0].sum()
return rewards.tolerance(physics.finger_to_target_dist(), (0, radii))
| deepmind/dm_control | dm_control/suite/reacher.py | Python | apache-2.0 | 4,233 |
# 378. Kth Smallest Element in a Sorted Matrix
#
# Given a n x n matrix where each of the rows and columns are sorted in ascending order, find the kth smallest element in the matrix.
#
# Note that it is the kth smallest element in the sorted order, not the kth distinct element.
#
# Example:
#
# matrix = [
# [ 1, 5, 9],
# [10, 11, 13],
# [12, 13, 15]
# ],
# k = 8,
#
# return 13.
#
# Note:
# You may assume k is always valid, 1 <= k <= n2.
# https://nb4799.neu.edu/wordpress/?p=2017
from heapq import *
class Solution:
def kthSmallest(self, matrix, k):
"""
:type matrix: List[List[int]]
:type k: int
:rtype: int
"""
# heapq.merge: Merge multiple sorted inputs into a single sorted output
# (for example, merge timestamped entries from multiple log files).
# Returns an iterator over the sorted values.
return list(merge(*matrix))[k-1]
# Maintain a min-heap with k element, initialized by the elements of the first row.
# Since it is a min-heap, and note the property that rows and columns are already sorted in ascending order,
# the heap root after popping k-1 times is the k-th smallest element of the whole matrix.
# When popping the heap, we also need to push necessary matrix elements into the heap.
# Time complexity is O(KlogK) (every heap operation takes O(logK))
def kthSmallest(self, matrix, k):
# element in the heap: (val, x coord, y coord)
h = []
for i in range(min(len(matrix[0]), k)):
heappush(h, (matrix[0][i], 0, i))
# pop k-1 times
for i in range(k-1):
val, x, y = heappop(h)
if x < len(matrix) - 1:
heappush(h, (matrix[x+1][y], x+1, y))
return h[0][0] # smallest element in heap. 0th index in tuple
# binary search
# We can eventually find the k-th smallest element by shrinking the search range in binary search.
# Binary search is feasible for this problem since left, right,and mid in binary search are integers
# and we know that matrix elements are integers.
# The algorithm takes O(nlogN) time (N is the range of matrix[0][0] ~ matrix[n-1][n-1]) and O(1) space.
# Time complexity analysis: the outer loop executes at most O(logN) times.
# The inner for loop executes at most O(n) times.
def kthSmallest(self, matrix, k):
n = len(matrix)
L = matrix[0][0]
R = matrix[n-1][n-1]
while L < R:
mid = L + ((R - L) >> 1)
count = 0
j = n - 1
for i in range(n):
while j >= 0 and matrix[i][j] > mid:
j -= 1
count += j+1
if count >= k:
R = mid
else:
L = mid + 1
return L
sol = Solution()
matrix = [
[ 1, 5, 9],
[10, 11, 13],
[12, 13, 15]
]
k = 8
print(sol.kthSmallest(matrix, k))
| gengwg/leetcode | 378_kth_smallest_element_in_a_sorted_matrix.py | Python | apache-2.0 | 2,958 |
# File has been renamed.
raise DeprecationWarning("This file has been renamed to `fractional_gpus.py` "
"in the same folder!")
| pcmoritz/ray-1 | rllib/examples/partial_gpus.py | Python | apache-2.0 | 152 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=relative-beyond-top-level
from os.path import join, exists
from itertools import product
import re
import numpy as np
from PIL import Image
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
import xiuminglib as xm
from util import logging as logutil, io as ioutil
from .base import Dataset as BaseDataset
logger = logutil.Logger(loggee="datasets/nlt")
class Dataset(BaseDataset):
def __init__(self, config, mode, **kwargs):
self.data_root = config.get('DEFAULT', 'data_root')
data_status_path = self.data_root.rstrip('/') + '.json'
if not exists(data_status_path):
raise FileNotFoundError((
"Data status JSON not found at \n\t%s\nRun "
"$REPO/data_gen/postproc.py to generate it") % data_status_path)
self.data_paths = ioutil.read_json(data_status_path)
# Because paths in JSON are relative, prepend data root directory
for _, paths in self.data_paths.items():
for k, v in paths.items():
if k != 'complete':
paths[k] = join(self.data_root, v)
super().__init__(config, mode, **kwargs)
# Trigger init. in a main thread before starting multi-threaded work.
# See http://yaqs/eng/q/6292200559345664 for details
Image.init()
def _glob(self):
# Handle holdouts
holdout_cam = self.config.get('DEFAULT', 'holdout_cam').split(',')
holdout_light = self.config.get('DEFAULT', 'holdout_light').split(',')
holdout = [
'%s_%s' % x for x in product(holdout_cam, holdout_light)]
# Add only if data are complete for this camera
ids = []
for id_, paths in self.data_paths.items():
if id_.startswith('test' if self.mode == 'test' else 'trainvali'):
if paths['complete']:
ids.append(id_)
else:
logger.warn(
"Skipping '%s' because its data are incomplete", id_)
# Shortcircuit if testing
if self.mode == 'test':
logger.info(
"Number of '%s' camera-light combinations: %d", self.mode,
len(ids))
return ids
# Training-validation split
ids_split = []
for id_ in ids:
# ID is {bin_mode}_{i:09d}_{cam}_{light}
cam_light = '_'.join(id_.split('_')[-2:])
if (self.mode == 'vali' and cam_light in holdout) or \
(self.mode != 'vali' and cam_light not in holdout):
ids_split.append(id_)
logger.info(
"Number of '%s' camera-light combinations: %d", self.mode,
len(ids_split))
return ids_split
def _get_nn_id(self, nn):
id_regex = re.compile(
r'trainvali_\d\d\d\d\d\d\d\d\d_{cam}_{light}'.format(**nn))
matched = [
x for x in self.data_paths.keys() if id_regex.search(x) is not None]
n_matches = len(matched)
if not matched:
return None
if n_matches == 1:
return matched[0]
raise ValueError(
"Found {n} matches:\n\t{matches}".format(
n=n_matches, matches=matched))
def _process_example_precache(self, id_): # pylint: disable=arguments-differ
"""Loads data from paths.
"""
id_, base, cvis, lvis, warp, rgb, rgb_camspc, nn_id, nn_base, nn_rgb, \
nn_rgb_camspc = tf.py_function(
self._load_data, [id_], (
tf.string, tf.float32, tf.float32, tf.float32, tf.float32,
tf.float32, tf.float32, tf.string, tf.float32, tf.float32,
tf.float32))
return \
id_, base, cvis, lvis, warp, rgb, rgb_camspc, nn_id, nn_base, \
nn_rgb, nn_rgb_camspc
def _load_data(self, id_):
if isinstance(id_, tf.Tensor):
id_ = id_.numpy().decode()
paths = self.data_paths[id_]
imh = self.config.getint('DEFAULT', 'imh')
imw = self.config.getint('DEFAULT', 'imw')
# Load images
base = xm.io.img.load(paths['diffuse'], as_array=True)[:, :, :3]
cvis = xm.io.img.load(paths['cvis'], as_array=True)
lvis = xm.io.img.load(paths['lvis'], as_array=True)
warp = ioutil.read_npy(paths['uv2cam'])
if self.mode == 'test':
rgb = np.zeros_like(base) # placeholders
rgb_camspc = np.zeros((imh, imw, 3))
else:
rgb = xm.io.img.load(paths['rgb'], as_array=True)[:, :, :3]
rgb_camspc = xm.io.img.load(
paths['rgb_camspc'], as_array=True)[:, :, :3]
# Normalize to [0, 1]
base = xm.img.normalize_uint(base)
cvis = xm.img.normalize_uint(cvis)
lvis = xm.img.normalize_uint(lvis)
if self.mode != 'test':
rgb = xm.img.normalize_uint(rgb)
rgb_camspc = xm.img.normalize_uint(rgb_camspc)
# Resize images
uvh = self.config.getint('DEFAULT', 'uvh')
base = xm.img.resize(base, new_h=uvh)
cvis = xm.img.resize(cvis, new_h=uvh)
lvis = xm.img.resize(lvis, new_h=uvh)
rgb = xm.img.resize(rgb, new_h=uvh)
rgb_camspc = xm.img.resize(rgb_camspc, new_h=imh, new_w=imw)
# NOTE: We didn't resize warp because this introduces artifacts --
# always warp first and then resize
# Neighbor diffuse base and full
nn = ioutil.read_json(paths['nn'])
nn_id = self._get_nn_id(nn)
if nn_id is None:
nn_id = 'incomplete-data_{cam}_{light}'.format(**nn)
# NOTE: When neighbor is missing, simply return black placeholders
nn_base = np.zeros_like(base)
nn_rgb = np.zeros_like(rgb)
nn_rgb_camspc = np.zeros_like(rgb_camspc)
else:
nn_base = xm.io.img.load(
self.data_paths[nn_id]['diffuse'], as_array=True)[:, :, :3]
nn_rgb = xm.io.img.load(
self.data_paths[nn_id]['rgb'], as_array=True)[:, :, :3]
nn_rgb_camspc = xm.io.img.load(
self.data_paths[nn_id]['rgb_camspc'], as_array=True)[:, :, :3]
nn_rgb_camspc = nn_rgb_camspc[:, :, :3] # discards alpha
nn_base = xm.img.normalize_uint(nn_base)
nn_rgb = xm.img.normalize_uint(nn_rgb)
nn_rgb_camspc = xm.img.normalize_uint(nn_rgb_camspc)
nn_base = xm.img.resize(nn_base, new_h=uvh)
nn_rgb = xm.img.resize(nn_rgb, new_h=uvh)
nn_rgb_camspc = xm.img.resize(nn_rgb_camspc, new_h=imh, new_w=imw)
# Return
base = base.astype(np.float32)
cvis = cvis.astype(np.float32)[:, :, None] # HxWx1
lvis = lvis.astype(np.float32)[:, :, None]
warp = warp.astype(np.float32)
rgb = rgb.astype(np.float32)
rgb_camspc = rgb_camspc.astype(np.float32)
nn_base = nn_base.astype(np.float32)
nn_rgb = nn_rgb.astype(np.float32)
nn_rgb_camspc = nn_rgb_camspc.astype(np.float32)
return \
id_, base, cvis, lvis, warp, rgb, rgb_camspc, nn_id, nn_base, \
nn_rgb, nn_rgb_camspc
| google/neural-light-transport | nlt/datasets/nlt.py | Python | apache-2.0 | 7,804 |
import numpy as np
from ._utils import move_axis_to_end, suppress_warnings
def brier_score(observations, forecasts):
"""
Calculate the Brier score (BS)
The Brier score (BS) scores binary forecasts $k \in \{0, 1\}$,
..math:
BS(p, k) = (p_1 - k)^2,
where $p_1$ is the forecast probability of $k=1$.
Parameters
----------
observations, forecasts : array_like
Broadcast compatible arrays of forecasts (probabilities between 0 and
1) and observations (0, 1 or NaN).
Returns
-------
out : np.ndarray
Brier score for each forecast/observation.
References
----------
Jochen Broecker. Chapter 7 in Forecast Verification: A Practitioner's Guide
in Atmospheric Science. John Wiley & Sons, Ltd, Chichester, UK, 2nd
edition, 2012.
https://drive.google.com/a/climate.com/file/d/0B8AfRcot4nsIYmc3alpTeTZpLWc
Tilmann Gneiting and Adrian E. Raftery. Strictly proper scoring rules,
prediction, and estimation, 2005. University of Washington Department of
Statistics Technical Report no. 463R.
https://www.stat.washington.edu/research/reports/2004/tr463R.pdf
"""
machine_eps = np.finfo(float).eps
forecasts = np.asarray(forecasts)
if (forecasts < 0.0).any() or (forecasts > (1.0 + machine_eps)).any():
raise ValueError('forecasts must not be outside of the unit interval '
'[0, 1]')
observations = np.asarray(observations)
if observations.ndim > 0:
valid_obs = observations[~np.isnan(observations)]
else:
valid_obs = observations if not np.isnan(observations) else []
if not set(np.unique(valid_obs)) <= {0, 1}:
raise ValueError('observations can only contain 0, 1, or NaN')
return (forecasts - observations) ** 2
def _threshold_brier_score_vectorized(observations, forecasts, thresholds):
observations = np.asarray(observations)
thresholds = np.asarray(thresholds)
forecasts = np.asarray(forecasts)
def exceedances(x):
# NaN safe calculation of threshold exceedances
# add an extra dimension to `x` and broadcast `thresholds` so that it
# varies along that new dimension
with suppress_warnings('invalid value encountered in greater'):
exceeds = (x[..., np.newaxis] >
thresholds.reshape((1,) * x.ndim + (-1,))
).astype(float)
if x.ndim == 0 and np.isnan(x):
exceeds[:] = np.nan
else:
exceeds[np.where(np.isnan(x))] = np.nan
return exceeds
binary_obs = exceedances(observations)
if observations.shape == forecasts.shape:
prob_forecast = exceedances(forecasts)
elif observations.shape == forecasts.shape[:-1]:
# axis=-2 should be the 'realization' axis, after swapping that axes
# to the end of forecasts and inserting one extra axis
with suppress_warnings('Mean of empty slice'):
prob_forecast = np.nanmean(exceedances(forecasts), axis=-2)
else:
raise AssertionError
return brier_score(binary_obs, prob_forecast)
try:
from ._gufuncs import _threshold_brier_score_gufunc as \
_threshold_brier_score_core
except ImportError:
_threshold_brier_score_core = _threshold_brier_score_vectorized
def threshold_brier_score(observations, forecasts, threshold, issorted=False,
axis=-1):
"""
Calculate the Brier scores of an ensemble for exceeding given thresholds.
According to the threshold decomposition of CRPS, the resulting Brier
scores can thus be summed along the last axis to calculate CRPS, as
.. math::
CRPS(F, x) = \int_z BS(F(z), H(z - x)) dz
where $F(x) = \int_{z \leq x} p(z) dz$ is the cumulative distribution
function (CDF) of the forecast distribution $F$, $x$ is a point estimate of
the true observation (observational error is neglected), $BS$ denotes the
Brier score and $H(x)$ denotes the Heaviside step function, which we define
here as equal to 1 for x >= 0 and 0 otherwise.
It is more efficient to calculate CRPS directly, but this threshold
decomposition itself provides a useful summary of model quality as a
function of measurement values.
The Numba accelerated version of this function is much faster for
calculating many thresholds simultaneously: it runs in time
O(N * (E * log(E) + T)), where N is the number of observations, E is the
ensemble size and T is the number of thresholds.
The non-Numba accelerated version requires time and space O(N * E * T).
Parameters
----------
observations : float or array_like
Observations float or array. Missing values (NaN) are given scores of
NaN.
forecasts : float or array_like
Array of forecasts ensemble members, of the same shape as observations
except for the extra axis corresponding to the ensemble. If forecasts
has the same shape as observations, the forecasts are treated as
deterministic. Missing values (NaN) are ignored.
threshold : scalar or 1d array_like
Threshold value(s) at which to calculate exceedence Brier scores.
issorted : bool, optional
Optimization flag to indicate that the elements of `ensemble` are
already sorted along `axis`.
axis : int, optional
Axis in forecasts which corresponds to different ensemble members,
along which to calculate the threshold decomposition.
Returns
-------
out : np.ndarray
Brier scores at each thresold for each ensemble forecast against the
observations. If ``threshold`` is a scalar, the result will have the
same shape as observations. Otherwise, it will have an additional final
dimension corresponding to the threshold levels.
References
----------
Gneiting, T. and Ranjan, R. Comparing density forecasts using threshold-
and quantile-weighted scoring rules. J. Bus. Econ. Stat. 29, 411-422
(2011). http://www.stat.washington.edu/research/reports/2008/tr533.pdf
See also
--------
crps_ensemble, brier_score
"""
observations = np.asarray(observations)
threshold = np.asarray(threshold)
forecasts = np.asarray(forecasts)
if axis != -1:
forecasts = move_axis_to_end(forecasts, axis)
if forecasts.shape == observations.shape:
forecasts = forecasts[..., np.newaxis]
if observations.shape != forecasts.shape[:-1]:
raise ValueError('observations and forecasts must have matching '
'shapes or matching shapes except along `axis=%s`'
% axis)
scalar_threshold = threshold.ndim == 0
if threshold.ndim > 1:
raise ValueError('threshold must be scalar or 1-dimensional')
if threshold.ndim == 1 and not (np.sort(threshold) == threshold).all():
raise ValueError('1D thresholds must be sorted')
threshold = threshold.reshape((1,) * observations.ndim + (-1,))
if not issorted:
forecasts = np.sort(forecasts, axis=-1)
result = _threshold_brier_score_core(observations, forecasts, threshold)
if scalar_threshold:
result = result.squeeze(axis=-1)
return result
| TheClimateCorporation/properscoring | properscoring/_brier.py | Python | apache-2.0 | 7,326 |
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from nailgun import consts
from nailgun import objects
from nailgun.db.sqlalchemy.models import Node
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import fake_tasks
from nailgun.utils import reverse
class TestHandlers(BaseIntegrationTest):
def test_node_get(self):
node = self.env.create_node(api=False)
resp = self.app.get(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
headers=self.default_headers)
self.assertEqual(200, resp.status_code)
self.assertEqual(node.id, resp.json_body['id'])
self.assertEqual(node.name, resp.json_body['name'])
self.assertEqual(node.mac, resp.json_body['mac'])
self.assertEqual(
node.pending_addition, resp.json_body['pending_addition'])
self.assertEqual(
node.pending_deletion, resp.json_body['pending_deletion'])
self.assertEqual(node.status, resp.json_body['status'])
self.assertEqual(
node.meta['cpu']['total'],
resp.json_body['meta']['cpu']['total']
)
self.assertEqual(node.meta['disks'], resp.json_body['meta']['disks'])
self.assertEqual(node.meta['memory'], resp.json_body['meta']['memory'])
def test_node_creation_fails_with_wrong_id(self):
node_id = '080000000003'
resp = self.app.post(
reverse('NodeCollectionHandler'),
jsonutils.dumps({'id': node_id,
'mac': self.env.generate_random_mac(),
'status': 'discover'}),
headers=self.default_headers,
expect_errors=True)
self.assertEqual(400, resp.status_code)
def test_node_deletion(self):
node = self.env.create_node(api=False)
resp = self.app.delete(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
"",
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(resp.status_code, 200)
def test_node_valid_metadata_gets_updated(self):
new_metadata = self.env.default_metadata()
node = self.env.create_node(api=False)
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps({'meta': new_metadata}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.db.refresh(node)
nodes = self.db.query(Node).filter(
Node.id == node.id
).all()
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].meta, new_metadata)
def test_node_hostname_gets_updated(self):
node = self.env.create_node(api=False)
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps({'hostname': 'new-name'}),
headers=self.default_headers)
self.assertEqual(200, resp.status_code)
self.db.refresh(node)
# lets put the same hostname again
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps({'hostname': 'new-name'}),
headers=self.default_headers)
self.assertEqual(200, resp.status_code)
self.db.refresh(node)
nodes = self.db.query(Node).filter(
Node.id == node.id
).all()
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].hostname, 'new-name')
def test_node_hostname_gets_updated_invalid(self):
node = self.env.create_node(api=False)
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps({'hostname': '!#invalid_%&name'}),
headers=self.default_headers,
expect_errors=True)
self.assertEqual(400, resp.status_code)
def test_node_hostname_gets_updated_ssl_conflict(self):
cluster = self.env.create_cluster(api=False)
node = self.env.create_node(cluster_id=cluster.id)
cluster_attrs = objects.Cluster.get_attributes(cluster).editable
test_hostname = 'test-hostname'
cluster_attrs['public_ssl']['hostname']['value'] = test_hostname
objects.Cluster.update_attributes(
cluster, {'editable': cluster_attrs})
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps({'hostname': test_hostname}),
headers=self.default_headers,
expect_errors=True)
self.assertEqual(400, resp.status_code)
self.assertEqual(
"New hostname '{0}' conflicts with public TLS endpoint"
.format(test_hostname), resp.json_body['message'])
def test_node_hostname_gets_updated_after_provisioning_starts(self):
node = self.env.create_node(api=False,
status=consts.NODE_STATUSES.provisioning)
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps({'hostname': 'new-name'}),
headers=self.default_headers,
expect_errors=True)
self.assertEqual(403, resp.status_code)
self.assertEqual(
'Node hostname may be changed only before provisioning.',
resp.json_body['message'])
def test_node_hostname_gets_updated_duplicate(self):
node = self.env.create_node(api=False)
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps({'hostname': 'new-name'}),
headers=self.default_headers)
self.assertEqual(200, resp.status_code)
self.db.refresh(node)
node_2 = self.env.create_node(api=False)
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node_2.id}),
jsonutils.dumps({'hostname': 'new-name'}),
headers=self.default_headers,
expect_errors=True)
self.assertEqual(409, resp.status_code)
def test_node_valid_status_gets_updated(self):
node = self.env.create_node(api=False)
params = {'status': 'error'}
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps(params),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
def test_node_action_flags_are_set(self):
flags = ['pending_addition', 'pending_deletion']
node = self.env.create_node(api=False)
for flag in flags:
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps({flag: True}),
headers=self.default_headers
)
self.assertEqual(resp.status_code, 200)
self.db.refresh(node)
node_from_db = self.db.query(Node).filter(
Node.id == node.id
).first()
for flag in flags:
self.assertEqual(getattr(node_from_db, flag), True)
def test_put_returns_400_if_no_body(self):
node = self.env.create_node(api=False)
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
"",
headers=self.default_headers,
expect_errors=True)
self.assertEqual(resp.status_code, 400)
def test_put_returns_400_if_wrong_status(self):
node = self.env.create_node(api=False)
params = {'status': 'invalid_status'}
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps(params),
headers=self.default_headers,
expect_errors=True)
self.assertEqual(resp.status_code, 400)
def test_do_not_create_notification_if_disks_meta_is_empty(self):
def get_notifications_count(**kwargs):
return objects.NotificationCollection.count(
objects.NotificationCollection.filter_by(None, **kwargs)
)
self.env.create(
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},
]
)
node = self.env.nodes[0]
node.meta['disks'] = []
node = {
'id': node.id,
'meta': node.meta,
'mac': node.mac,
'status': node.status
}
before_count = get_notifications_count(node_id=node['id'])
for i in range(5):
response = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node),
headers=self.default_headers
)
self.assertEqual(response.status_code, 200)
# check there's no notification created
after_count = get_notifications_count(node_id=node['id'])
self.assertEqual(before_count, after_count)
def test_no_volumes_changes_if_node_is_locked(self):
self.env.create(
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True,
'status': consts.NODE_STATUSES.ready},
]
)
node = self.env.nodes[0]
node_data = {
'id': node.id,
'meta': node.meta,
'mac': node.mac,
'status': node.status
}
node_data['meta']['disks'] = []
response = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers
)
self.assertEqual(response.status_code, 200)
# check volumes data wasn't reset
self.assertGreater(len(node.meta['disks']), 0)
@fake_tasks()
def test_interface_changes_for_new_node(self):
# Creating cluster with node
self.env.create(
cluster_kwargs={
'name': 'test_name'
},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True}
]
)
cluster = self.env.clusters[0]
def filter_changes(chg_type, chg_list):
return filter(lambda x: x.get('name') == chg_type, chg_list)
changes = filter_changes(
consts.CLUSTER_CHANGES.interfaces,
cluster['changes']
)
# Checking interfaces change added after node creation
self.assertEquals(1, len(changes))
deployment_task = self.env.launch_deployment()
self.env.wait_ready(deployment_task)
changes = filter_changes(
consts.CLUSTER_CHANGES.interfaces,
cluster['changes']
)
# Checking no interfaces change after deployment
self.assertEquals(0, len(changes))
def test_update_node_with_wrong_ip(self):
node = self.env.create_node(
api=False, ip='10.20.0.2',
status=consts.NODE_STATUSES.deploying)
ipaddress = '192.168.0.10'
self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps({'id': node.id,
'ip': ipaddress}),
headers=self.default_headers)
self.assertNotEqual(node.ip, ipaddress)
ipaddress = '10.20.0.25'
self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps({'id': node.id,
'ip': ipaddress}),
headers=self.default_headers)
self.assertEqual(node.ip, ipaddress)
def test_update_node_with_none_ip(self):
node = self.env.create_node(api=False, ip='10.20.0.2')
ipaddress = None
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps({'id': node.id,
'ip': ipaddress}),
headers=self.default_headers,
expect_errors=True)
self.assertEqual(resp.status_code, 400)
ipaddress = '10.20.0.4'
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps({'id': node.id,
'ip': ipaddress}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
| SmartInfrastructures/fuel-web-dev | nailgun/nailgun/test/integration/test_node_handler.py | Python | apache-2.0 | 12,873 |
try:
import exceptions
except ImportError: # Python 3
import builtins as exceptions
class ObjectToReturn:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def exception(self, name, msg=""):
exception = getattr(exceptions, name)
raise exception(msg)
| userzimmermann/robotframework-python3 | atest/testresources/testlibs/objecttoreturn.py | Python | apache-2.0 | 343 |
'''
Profile Formula Validation is an example of a plug-in to GUI menu that will profile formula execution.
(c) Copyright 2012 Mark V Systems Limited, All rights reserved.
'''
import os
from tkinter import simpledialog, messagebox
def profileFormulaMenuEntender(cntlr, menu):
# Extend menu with an item for the profile formula plugin
menu.add_command(label="Profile formula validation",
underline=0,
command=lambda: profileFormulaMenuCommand(cntlr) )
def profileFormulaMenuCommand(cntlr):
# save DTS menu item has been invoked
if cntlr.modelManager is None or cntlr.modelManager.modelXbrl is None:
cntlr.addToLog("No taxonomy loaded.")
return
# get file name into which to save log file while in foreground thread
profileReportFile = cntlr.uiFileDialog("save",
title=_("arelle - Save Formula Profile Report"),
initialdir=cntlr.config.setdefault("formulaProfileReportDir","."),
filetypes=[(_("Profile report file .log"), "*.log")],
defaultextension=".log")
if not profileReportFile:
return False
errMsg = ""
maxRunTime = 0
while (1):
timeout = simpledialog.askstring(_("arelle - Set formula run time limit"),
_("{0}You may enter the maximum number of minutes to run formulas.\n"
"(Leave empty for no run time limitation.)".format(errMsg)),
parent=cntlr.parent)
if timeout:
try:
maxRunTime = float(timeout)
break
except ValueError as err:
errMsg = str(err) + "\n\n"
excludeCompileTime = messagebox.askyesno(_("arelle - Exclude formula compile statistics"),
_("Should formula compiling be excluded from the statistics?\n"
"(Yes will make a separate compiling \"pass\" so that statistics include execution only.)".format(errMsg)),
parent=cntlr.parent)
cntlr.config["formulaProfileReportDir"] = os.path.dirname(profileReportFile)
cntlr.saveConfig()
# perform validation and profiling on background thread
import threading
thread = threading.Thread(target=lambda c=cntlr, f=profileReportFile, t=maxRunTime, e=excludeCompileTime: backgroundProfileFormula(c,f,t,e))
thread.daemon = True
thread.start()
def backgroundProfileFormula(cntlr, profileReportFile, maxRunTime, excludeCompileTime):
from arelle import Locale, XPathParser, ValidateXbrlDimensions, ValidateFormula
# build grammar before profiling (if this is the first pass, so it doesn't count in profile statistics)
XPathParser.initializeParser(cntlr.modelManager)
# load dimension defaults
ValidateXbrlDimensions.loadDimensionDefaults(cntlr.modelManager)
import cProfile, pstats, sys, time
# a minimal validation class for formula validator parameters that are needed
class Validate:
def __init__(self, modelXbrl, maxRunTime):
self.modelXbrl = modelXbrl
self.parameters = None
self.validateSBRNL = False
self.maxFormulaRunTime = maxRunTime
def close(self):
self.__dict__.clear()
val = Validate(cntlr.modelManager.modelXbrl, maxRunTime)
formulaOptions = val.modelXbrl.modelManager.formulaOptions
if excludeCompileTime:
startedAt = time.time()
cntlr.addToLog(_("pre-compiling formulas before profiling"))
val.validateFormulaCompileOnly = True
ValidateFormula.validate(val)
del val.validateFormulaCompileOnly
cntlr.addToLog(Locale.format_string(cntlr.modelManager.locale,
_("formula pre-compiling completed in %.2f secs"),
time.time() - startedAt))
cntlr.addToLog(_("executing formulas for profiling"))
else:
cntlr.addToLog(_("compiling and executing formulas for profiling"))
startedAt = time.time()
statsFile = profileReportFile + ".bin"
cProfile.runctx("ValidateFormula.validate(val)", globals(), locals(), statsFile)
cntlr.addToLog(Locale.format_string(cntlr.modelManager.locale,
_("formula profiling completed in %.2f secs"),
time.time() - startedAt))
# dereference val
val.close()
# specify a file for log
priorStdOut = sys.stdout
sys.stdout = open(profileReportFile, "w")
statObj = pstats.Stats(statsFile)
statObj.strip_dirs()
statObj.sort_stats("time")
statObj.print_stats()
statObj.print_callees()
statObj.print_callers()
sys.stdout.flush()
sys.stdout.close()
del statObj
sys.stdout = priorStdOut
os.remove(statsFile)
__pluginInfo__ = {
'name': 'Profile Formula Validation',
'version': '1.0',
'description': "This plug-in adds a profiled formula validation. "
"Includes XPath compilation in the profile if it is the first validation of instance; "
"to exclude XPath compile statistics, validate first the normal way (e.g., toolbar button) "
"and then validate again using this profile formula validation plug-in. ",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2012 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'CntlrWinMain.Menu.Validation': profileFormulaMenuEntender,
}
| sternshus/arelle2.7 | svr-2.7/arelle/plugin/profileFormula.py | Python | apache-2.0 | 5,608 |
__source__ = 'https://leetcode.com/problems/verify-preorder-sequence-in-binary-search-tree/description/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/verify-preorder-sequence-in-binary-search-tree.py
# Time: O(n)
# Space: O(1)]
# Stack
#
# Description: Leetcode # 255. Verify Preorder Sequence in Binary Search Tree
#
# Given an array of numbers, verify whether it is the correct preorder traversal sequence of a binary search tree.
#
# You may assume each number in the sequence is unique.
#
# Follow up:
# Could you do it using only constant space complexity?
#
# Hide Company Tags Zenefits
# Companies
# Zenefits
# Related Topics
# Tree Stack
# Similar Questions
# Binary Tree Preorder Traversal
#
import unittest
class Solution:
# @param {integer[]} preorder
# @return {boolean}
def verifyPreorder(self, preorder):
low = float("-inf")
i = -1
for p in preorder:
if p < -1:
return False
while i >= 0 and p > preorder[i]:
low = preorder[i]
i -= 1
i += 1
preorder[i] = p
return True
# Time: O(n)
# Space: O(h)
# 60ms 41.14%
class Solution2:
# @param {integer[]} preorder
# @return {boolean}
def verifyPreorder(self, preorder):
low = float("-inf")
path = []
for p in preorder:
if p < low:
return False
while path and p > path[-1]:
low = path[-1]
path.pop()
path.append(p)
return True
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought:
[10,7,4,8,6,40,23] should be false
# 31ms 56.09%
class Solution {
public boolean verifyPreorder(int[] preorder) {
int low = Integer.MIN_VALUE;
Stack<Integer> path = new Stack();
for (int p : preorder) {
if (p < low)
return false;
while (!path.empty() && p > path.peek())
low = path.pop();
path.push(p);
}
return true;
}
}
# assume no duplicate (since bst doesnt allow duplicate)
# we have to do it in place
# i = is the virtual stack that we maintained
# if we the array index we traverse is smaller than the previous one
# means that we are still traversing to the left subtree,
# if we find out the current index is bigger than the previous one we traverse it
# means that we are on the right subtree or the right hand side of the bst
# so we simply pop out all the elements in the stack that is smaller than the current index
# also use the popped value as the new min
# (since we are in right subtree means we must never come across a smaller number)
# index = index that traverse through the array
# 2ms 100%
class Solution {
public boolean verifyPreorder(int[] preorder) {
int index = -1;
int min = Integer.MIN_VALUE;
for (int i = 0; i < preorder.length; i++) {
if (preorder[i] < min) {
return false;
}
while (index >= 0 && preorder[index] < preorder[i]) {
min = preorder[index--];
}
preorder[++index] = preorder[i];
}
return true;
}
}
# 428ms 14.72%
class Solution {
public boolean verifyPreorder(int[] preorder) {
return verifyPreorder(preorder, 0, preorder.length - 1);
}
private boolean verifyPreorder(int[] preorder, int start, int end) {
if (start >= end) {
return true;
}
int root = preorder[start];
int index = start + 1;
while (index <= end && preorder[index] < root) {
index++;
}
for (int i = index + 1; i<= end; i++) {
if (preorder[i] < root) {
return false;
}
}
return verifyPreorder(preorder, start + 1, index - 1) && verifyPreorder(preorder, index, end);
}
}
'''
| JulyKikuAkita/PythonPrac | cs15211/VerifyPreorderSequenceinBinarySearchTree.py | Python | apache-2.0 | 4,042 |