code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from pts.magic.config.view import definition
# -----------------------------------------------------------------
default_extensions = ["fits"]
# -----------------------------------------------------------------
# Image paths
definition.add_positional_optional("image_paths", "filepath_list", "image paths")
definition.add_optional("extensions", "string_list", "extensions of files to load", default_extensions)
definition.add_flag("recursive", "recursively load from directory", False)
definition.add_optional("contains", "string_list", "strings that have to be contained in the name of the files to be loaded")
definition.add_optional("not_contains", "string_list", "strings that cannot be contained in the name of the files to be loaded")
definition.add_optional("exact_name", "string_list", "exact name(s)")
definition.add_optional("exact_not_name", "string_list", "exact not name(s)")
# Regions
definition.add_optional("regions_prefix", "string", "prefix of regions filenames", "")
definition.add_optional("regions_suffix", "string", "suffix of regions filenames", "")
definition.add_optional("regions", "string", "exact filename for the regions files (or absolute path)")
definition.add_optional("regions_extension", "string", "extension of regions files", "reg")
# -----------------------------------------------------------------
definition.add_optional("width", "positive_integer", "image width", 300)
definition.add_optional("height", "positive_integer", "image height", 300)
# -----------------------------------------------------------------
# Preload
definition.add_flag("preload_all", "preload all images", False)
definition.add_optional("preload", "string_list", "names for which to preload the image")
definition.add_flag("dynamic", "create the viewers dynamically", False)
# -----------------------------------------------------------------
definition.add_flag("info", "add info about the images", False)
# -----------------------------------------------------------------
definition.add_optional("max_ncharacters_title", "positive_integer", "maximum number of characters in the titles before breaking line", 45)
# -----------------------------------------------------------------
|
SKIRT/PTS
|
magic/config/multiview.py
|
Python
|
agpl-3.0
| 2,674
|
import military_status
|
karim-omran/openerp-addons
|
military_status/__init__.py
|
Python
|
agpl-3.0
| 23
|
# -*- coding: utf-8 -*-
"""
Settings for Bok Choy tests that are used when running LMS.
Bok Choy uses two different settings files:
1. test_static_optimized is used when invoking collectstatic
2. bok_choy is used when running the tests
Note: it isn't possible to have a single settings file, because Django doesn't
support both generating static assets to a directory and also serving static
from the same directory.
"""
import os
from path import Path as path
from tempfile import mkdtemp
from django.utils.translation import ugettext_lazy
from openedx.core.release import RELEASE_LINE
CONFIG_ROOT = path(__file__).abspath().dirname()
TEST_ROOT = CONFIG_ROOT.dirname().dirname() / "test_root"
########################## Prod-like settings ###################################
# These should be as close as possible to the settings we use in production.
# As in prod, we read in environment and auth variables from JSON files.
# Unlike in prod, we use the JSON files stored in this repo.
# This is a convenience for ensuring (a) that we can consistently find the files
# and (b) that the files are the same in Jenkins as in local dev.
os.environ['SERVICE_VARIANT'] = 'bok_choy_docker' if 'BOK_CHOY_HOSTNAME' in os.environ else 'bok_choy'
os.environ['CONFIG_ROOT'] = CONFIG_ROOT
from .production import * # pylint: disable=wildcard-import, unused-wildcard-import, wrong-import-position
######################### Testing overrides ####################################
# Redirect to the test_root folder within the repo
GITHUB_REPO_ROOT = (TEST_ROOT / "data").abspath()
LOG_DIR = (TEST_ROOT / "log").abspath()
# Configure modulestore to use the test folder within the repo
update_module_store_settings(
MODULESTORE,
module_store_options={
'fs_root': (TEST_ROOT / "data").abspath(),
},
xml_store_options={
'data_dir': (TEST_ROOT / "data").abspath(),
},
default_store=os.environ.get('DEFAULT_STORE', 'draft'),
)
# Capture the console log via template includes, until webdriver supports log capture again
CAPTURE_CONSOLE_LOG = True
PLATFORM_NAME = ugettext_lazy(u"édX")
PLATFORM_DESCRIPTION = ugettext_lazy(u"Open édX Platform")
# We need to test different scenarios, following setting effectively disbale rate limiting
PASSWORD_RESET_EMAIL_RATE_LIMIT = {
'no_of_emails': 1,
'per_seconds': 1
}
############################ STATIC FILES #############################
# Enable debug so that static assets are served by Django
DEBUG = True
# Serve static files at /static directly from the staticfiles directory under test root
# Note: optimized files for testing are generated with settings from test_static_optimized
STATIC_URL = "/static/"
STATICFILES_FINDERS = ['django.contrib.staticfiles.finders.FileSystemFinder']
STATICFILES_DIRS = [
(TEST_ROOT / "staticfiles" / "lms").abspath(),
]
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = TEST_ROOT / "uploads"
# Webpack loader must use webpack output setting
WEBPACK_LOADER['DEFAULT']['STATS_FILE'] = TEST_ROOT / "staticfiles" / "lms" / "webpack-stats.json"
# Don't use compression during tests
PIPELINE_JS_COMPRESSOR = None
################################# CELERY ######################################
CELERY_ALWAYS_EAGER = True
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
BLOCK_STRUCTURES_SETTINGS = dict(
# We have CELERY_ALWAYS_EAGER set to True, so there's no asynchronous
# code running and the celery routing is unimportant.
# It does not make sense to retry.
TASK_MAX_RETRIES=0,
# course publish task delay is irrelevant is because the task is run synchronously
COURSE_PUBLISH_TASK_DELAY=0,
# retry delay is irrelevent because we never retry
TASK_DEFAULT_RETRY_DELAY=0,
)
###################### Grades ######################
GRADES_DOWNLOAD = {
'STORAGE_TYPE': 'localfs',
'BUCKET': 'edx-grades',
'ROOT_PATH': os.path.join(mkdtemp(), 'edx-s3', 'grades'),
}
FEATURES['PERSISTENT_GRADES_ENABLED_FOR_ALL_TESTS'] = True
FEATURES['ASSUME_ZERO_GRADE_IF_ABSENT_FOR_ALL_TESTS'] = True
# Configure the LMS to use our stub XQueue implementation
XQUEUE_INTERFACE['url'] = 'http://localhost:8040'
# Configure the LMS to use our stub EdxNotes implementation
EDXNOTES_PUBLIC_API = 'http://localhost:8042/api/v1'
EDXNOTES_INTERNAL_API = 'http://localhost:8042/api/v1'
EDXNOTES_CONNECT_TIMEOUT = 10 # time in seconds
EDXNOTES_READ_TIMEOUT = 10 # time in seconds
NOTES_DISABLED_TABS = []
# Silence noisy logs
import logging
LOG_OVERRIDES = [
('track.middleware', logging.CRITICAL),
('edxmako.shortcuts', logging.ERROR),
('edx.discussion', logging.CRITICAL),
]
for log_name, log_level in LOG_OVERRIDES:
logging.getLogger(log_name).setLevel(log_level)
# Enable milestones app
FEATURES['MILESTONES_APP'] = True
# Enable oauth authentication, which we test.
FEATURES['ENABLE_OAUTH2_PROVIDER'] = True
# Enable pre-requisite course
FEATURES['ENABLE_PREREQUISITE_COURSES'] = True
# Enable Course Discovery
FEATURES['ENABLE_COURSE_DISCOVERY'] = True
# Enable student notes
FEATURES['ENABLE_EDXNOTES'] = True
# Enable teams feature
FEATURES['ENABLE_TEAMS'] = True
# Enable custom content licensing
FEATURES['LICENSING'] = True
# Use the auto_auth workflow for creating users and logging them in
FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] = True
FEATURES['RESTRICT_AUTOMATIC_AUTH'] = False
# Open up endpoint for faking Software Secure responses
FEATURES['ENABLE_SOFTWARE_SECURE_FAKE'] = True
# Disable instructor dash buttons for downloading course data when enrollment exceeds this number
FEATURES['MAX_ENROLLMENT_INSTR_BUTTONS'] = 4
FEATURES['ENABLE_ENROLLMENT_TRACK_USER_PARTITION'] = True
########################### Entrance Exams #################################
FEATURES['ENTRANCE_EXAMS'] = True
FEATURES['ENABLE_SPECIAL_EXAMS'] = True
YOUTUBE_HOSTNAME = os.environ.get('BOK_CHOY_HOSTNAME', '127.0.0.1')
# Point the URL used to test YouTube availability to our stub YouTube server
YOUTUBE_PORT = 9080
YOUTUBE['TEST_TIMEOUT'] = 5000
YOUTUBE['API'] = "http://{0}:{1}/get_youtube_api/".format(YOUTUBE_HOSTNAME, YOUTUBE_PORT)
YOUTUBE['METADATA_URL'] = "http://{0}:{1}/test_youtube/".format(YOUTUBE_HOSTNAME, YOUTUBE_PORT)
YOUTUBE['TEXT_API']['url'] = "{0}:{1}/test_transcripts_youtube/".format(YOUTUBE_HOSTNAME, YOUTUBE_PORT)
############################# SECURITY SETTINGS ################################
# Default to advanced security in common.py, so tests can reset here to use
# a simpler security model
FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False
FEATURES['SQUELCH_PII_IN_LOGS'] = False
FEATURES['PREVENT_CONCURRENT_LOGINS'] = False
FEATURES['ENABLE_MOBILE_REST_API'] = True # Show video bumper in LMS
FEATURES['ENABLE_VIDEO_BUMPER'] = True # Show video bumper in LMS
FEATURES['SHOW_BUMPER_PERIODICITY'] = 1
# Enable courseware search for tests
FEATURES['ENABLE_COURSEWARE_SEARCH'] = True
# Enable dashboard search for tests
FEATURES['ENABLE_DASHBOARD_SEARCH'] = True
# discussion home panel, which includes a subscription on/off setting for discussion digest emails.
FEATURES['ENABLE_DISCUSSION_HOME_PANEL'] = True
# Enable support for OpenBadges accomplishments
FEATURES['ENABLE_OPENBADGES'] = True
# Use MockSearchEngine as the search engine for test scenario
SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine"
# Path at which to store the mock index
MOCK_SEARCH_BACKING_FILE = (
TEST_ROOT / "index_file.dat"
).abspath()
# Verify student settings
VERIFY_STUDENT["SOFTWARE_SECURE"] = {
"API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB",
"API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
}
# this secret key should be the same as cms/envs/bok_choy.py's
SECRET_KEY = "very_secret_bok_choy_key"
# Set dummy values for profile image settings.
PROFILE_IMAGE_BACKEND = {
'class': 'storages.backends.overwrite.OverwriteStorage',
'options': {
'location': os.path.join(MEDIA_ROOT, 'profile-images/'),
'base_url': os.path.join(MEDIA_URL, 'profile-images/'),
},
}
# Make sure we test with the extended history table
FEATURES['ENABLE_CSMH_EXTENDED'] = True
INSTALLED_APPS.append('coursewarehistoryextended')
BADGING_BACKEND = 'lms.djangoapps.badges.backends.tests.dummy_backend.DummyBackend'
# Configure the LMS to use our stub eCommerce implementation
ECOMMERCE_API_URL = 'http://localhost:8043/api/v2/'
LMS_ROOT_URL = "http://localhost:{}".format(os.environ.get('BOK_CHOY_LMS_PORT', 8003))
CMS_BASE = "localhost:{}".format(os.environ.get('BOK_CHOY_CMS_PORT', 8031))
LOGIN_REDIRECT_WHITELIST = [CMS_BASE]
if RELEASE_LINE == "master":
# On master, acceptance tests use edX books, not the default Open edX books.
HELP_TOKENS_BOOKS = {
'learner': 'https://edx.readthedocs.io/projects/edx-guide-for-students',
'course_author': 'https://edx.readthedocs.io/projects/edx-partner-course-staff',
}
WAFFLE_OVERRIDE = True
############## Settings for Completion API #########################
COMPLETION_BY_VIEWING_DELAY_MS = 1000
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=wildcard-import
except ImportError:
pass
|
philanthropy-u/edx-platform
|
lms/envs/bok_choy.py
|
Python
|
agpl-3.0
| 9,302
|
#!/usr/bin/env python
## \file adjoint.py
# \brief python package for running adjoint problems
# \author T. Lukaczyk, F. Palacios
# \version 6.2.0 "Falcon"
#
# The current SU2 release has been coordinated by the
# SU2 International Developers Society <www.su2devsociety.org>
# with selected contributions from the open-source community.
#
# The main research teams contributing to the current release are:
# - Prof. Juan J. Alonso's group at Stanford University.
# - Prof. Piero Colonna's group at Delft University of Technology.
# - Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# - Prof. Alberto Guardone's group at Polytechnic University of Milan.
# - Prof. Rafael Palacios' group at Imperial College London.
# - Prof. Vincent Terrapon's group at the University of Liege.
# - Prof. Edwin van der Weide's group at the University of Twente.
# - Lab. of New Concepts in Aeronautics at Tech. Institute of Aeronautics.
#
# Copyright 2012-2019, Francisco D. Palacios, Thomas D. Economon,
# Tim Albring, and the SU2 contributors.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
import copy
from .. import io as su2io
from ..io.data import append_nestdict
from .. import mesh as su2mesh
def adaptation ( config , kind='' ):
# local copy
konfig = copy.deepcopy(config)
# check kind
if kind: konfig['KIND_ADAPT'] = kind
kind = konfig.get('KIND_ADAPT','NONE')
if kind == 'NONE':
return {}
# check adapted?
# get adaptation function
adapt_function = su2mesh.adapt.name_map[kind]
# setup problem
suffix = 'adapt'
meshname_orig = konfig['MESH_FILENAME']
meshname_new = su2io.add_suffix( konfig['MESH_FILENAME'], suffix )
konfig['MESH_OUT_FILENAME'] = meshname_new
# Run Adaptation
info = adapt_function(konfig)
# update super config
config['MESH_FILENAME'] = meshname_new
config['KIND_ADAPT'] = kind
# files out
files = { 'MESH' : meshname_new }
# info out
append_nestdict( info, { 'FILES' : files } )
return info
|
srange/SU2
|
SU2_PY/SU2/run/adaptation.py
|
Python
|
lgpl-2.1
| 2,720
|
#
# (C) Copyright 2003-2011 Jacek Konieczny <jajcus@jajcus.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""PyXMPP exceptions.
This module defines all exceptions raised by PyXMPP.
"""
# pylint: disable-msg=R0901
from __future__ import absolute_import, division
__docformat__ = "restructuredtext en"
import logging
class Error(StandardError):
"""Base class for all PyXMPP exceptions."""
pass
class JIDError(Error, ValueError):
"Exception raised when invalid JID is used"
pass
class PyXMPPIOError(Error):
"""Exception raised on I/O error."""
pass
class StreamError(Error):
"""Base class for all stream errors."""
pass
class StreamEncryptionRequired(StreamError):
"""Exception raised when stream encryption is requested, but not used."""
pass
class HostMismatch(StreamError):
"""Exception raised when the connected host name is other then requested."""
pass
class FatalStreamError(StreamError):
"""Base class for all fatal Stream exceptions.
When `FatalStreamError` is raised the stream is no longer usable."""
pass
class StreamParseError(FatalStreamError):
"""Raised when invalid XML is received in an XMPP stream."""
pass
class DNSError(FatalStreamError):
"""Raised when no host name could be resolved for the target."""
pass
class UnexpectedCNAMEError(DNSError):
"""Raised when CNAME record was found when A or AAAA was expected."""
pass
class StreamAuthenticationError(FatalStreamError):
"""Raised when stream authentication fails."""
pass
class TLSNegotiationFailed(FatalStreamError):
"""Raised when stream TLS negotiation fails."""
pass
class TLSError(FatalStreamError):
"""Raised on TLS error during stream processing."""
pass
class SASLNotAvailable(StreamAuthenticationError):
"""Raised when SASL authentication is requested, but not available."""
pass
class SASLMechanismNotAvailable(StreamAuthenticationError):
"""Raised when none of SASL authentication mechanisms requested is
available."""
pass
class SASLAuthenticationFailed(StreamAuthenticationError):
"""Raised when stream SASL authentication fails."""
pass
class StringprepError(Error):
"""Exception raised when string preparation results in error."""
pass
class ClientError(Error):
"""Raised on a client error."""
pass
class NoRouteError(Error):
"""Raised when a stanza cannot be routed internally."""
pass
class FatalClientError(ClientError):
"""Raised on a fatal client error."""
pass
class ClientStreamError(StreamError):
"""Raised on a client stream error."""
pass
class FatalClientStreamError(FatalStreamError):
"""Raised on a fatal client stream error."""
pass
class LegacyAuthenticationError(ClientStreamError):
"""Raised on a legacy authentication error."""
pass
class RegistrationError(ClientStreamError):
"""Raised on a in-band registration error."""
pass
class ComponentStreamError(StreamError):
"""Raised on a component error."""
pass
class FatalComponentStreamError(ComponentStreamError, FatalStreamError):
"""Raised on a fatal component error."""
pass
########################
# Protocol Errors
class ProtocolError(Error):
"""Raised when there is something wrong with a stanza processed.
When not processed earlier by an application, the exception will be catched
by the stanza dispatcher to return XMPP error to the stanza sender, when
allowed.
ProtocolErrors handled internally by PyXMPP will be logged via the logging
interface. Errors reported to the sender will be logged using
"pyxmpp2.ProtocolError.reported" channel and the ignored errors using
"pyxmpp2.ProtocolError.ignored" channel. Both with the "debug" level.
"""
logger_reported = logging.getLogger("pyxmpp2.ProtocolError.reported")
logger_ignored = logging.getLogger("pyxmpp2.ProtocolError.ignored")
def __init__(self, xmpp_name, message):
Error.__init__(self, xmpp_name, message)
@property
def xmpp_name(self):
"""XMPP error name which should be reported"""
return self.args[0]
@property
def message(self):
"""The error message."""
return self.args[1]
def log_reported(self):
"""Log message via the "pyxmpp.ProtocolError.reported" logger."""
self.logger_reported.debug(u"Protocol error detected: {0}"
.format(self.message))
def log_ignored(self):
"""Log message via the "pyxmpp.ProtocolError.ignored" logger."""
self.logger_ignored.debug(u"Protocol error detected: {0}"
.format(self.message))
def __unicode__(self):
return str(self.args[1])
def __repr__(self):
return "<ProtocolError %r %r>" % (self.xmpp_name, self.message)
class BadRequestProtocolError(ProtocolError):
"""Raised when invalid stanza is processed and 'bad-request' error should
be reported."""
def __init__(self, message):
ProtocolError.__init__(self, "bad-request", message)
class NotAcceptableProtocolError(ProtocolError):
"""Raised when invalid stanza is processed and 'not-acceptable' error
should be reported."""
def __init__(self, message):
ProtocolError.__init__(self, "not-acceptable", message)
class JIDMalformedProtocolError(ProtocolError, JIDError):
"""Raised when invalid JID is encountered."""
def __init__(self, message):
JIDError.__init__(self)
ProtocolError.__init__(self, "jid-malformed", message)
class FeatureNotImplementedProtocolError(ProtocolError):
"""Raised when stanza requests a feature which is not (yet) implemented."""
def __init__(self, message):
ProtocolError.__init__(self, "feature-not-implemented", message)
class ServiceUnavailableProtocolError(ProtocolError):
"""Raised when stanza requests a feature which is not (yet) implemented."""
def __init__(self, message):
ProtocolError.__init__(self, "service-unavailable", message)
class ResourceConstraintProtocolError(ProtocolError):
"""Raised when stanza requests a feature which is not (yet) implemented."""
def __init__(self, message):
ProtocolError.__init__(self, "resource-constraint", message)
# vi: sts=4 et sw=4
|
Jajcus/pyxmpp2
|
pyxmpp2/exceptions.py
|
Python
|
lgpl-2.1
| 6,975
|
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU Lesser General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (LGPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of LGPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/lgpl-2.0.txt.
#
# Jeff Ortel <jortel@redhat.com>
#
from logging import getLogger
from time import sleep
from gofer.mp import Process, Pipe
from gofer.common import Thread
from gofer.rmi.context import Context
from gofer.rmi.model import protocol
from gofer.rmi.model.child import Call as Target
log = getLogger(__file__)
POLL = 0.10
class Call(protocol.Call):
"""
The parent-side of the RMI call invoked in a child process.
After the fork, the child invokes the method and relays events
back using the inter-process queue.
"""
def __call__(self):
"""
Invoke the RMI as follows:
- Fork
- Start the monitor.
- Read and dispatch reply messages.
:return: Whatever method returned.
"""
pipe = Pipe()
target = Target(self.method, *self.args, **self.kwargs)
child = Process(target, pipe)
monitor = Monitor(Context.current(), child)
try:
child.start()
monitor.start()
pipe.writer.close()
retval = self.read(pipe.reader)
return retval
finally:
pipe.close()
monitor.stop()
child.wait()
def read(self, pipe):
"""
Read the reply queue and dispatch messages until *End* is raised..
:param pipe: A message queue.
:type pipe: gofer.mp.Reader
"""
while True:
try:
reply = protocol.Reply.read(pipe)
reply()
except protocol.End as end:
return end.result
class Monitor(Thread):
"""
Provides monitoring of cancellation.
When cancel is detected, the child process is terminated.
:ivar context: The RMI context.
:type context: Context
:ivar child: The child process.
:type child: Process
:ivar poll: Main polling loop boolean.
:type poll: bool
"""
NAME = 'monitor'
POLL = 0.10
def __init__(self, context, child):
"""
:param context: The RMI context.
:type context: Context
:param child: The child process.
:type child: Process
"""
super(Monitor, self).__init__(name=Monitor.NAME)
self.context = context
self.child = child
self.poll = True
self.setDaemon(True)
def stop(self):
"""
Stop the thread.
"""
self.poll = False
self.join()
def run(self):
"""
Test for cancellation.
When cancel is detected, the child process is terminated.
"""
while self.poll:
if self.context.cancelled():
self.child.terminate()
break
else:
sleep(Monitor.POLL)
class Result(protocol.Result):
"""
Called when a RESULT message is received.
"""
def __call__(self):
"""
:raise End: always.
"""
raise protocol.End(self.payload)
class Progress(protocol.Progress):
"""
Called when a PROGRESS message is received.
"""
def __call__(self):
"""
Relay to RMI context progress reporter.
"""
context = Context.current()
context.progress.__dict__.update(self.payload.__dict__)
context.progress.report()
class Error(protocol.Error):
"""
Called when a ERROR message is received.
"""
def __call__(self):
"""
An exception is raised to report the error.
:raise Exception: always.
"""
raise Exception(self.payload)
class Raised(protocol.Raised):
"""
Called when a RAISED (exception) message is received.
"""
def __call__(self):
"""
The reported exception is instantiated and raised.
:raise Exception: always.
"""
raise self.payload
class Ping(protocol.Ping):
"""
Called when a PING message is received.
"""
def __call__(self):
"""
The reported exception is instantiated and raised.
:raise Exception: always.
"""
log.debug('pinged by %d', self.payload)
# register reply message handling.
protocol.Reply.register(Result.CODE, Result)
protocol.Reply.register(Progress.CODE, Progress)
protocol.Reply.register(Error.CODE, Error)
protocol.Reply.register(Raised.CODE, Raised)
protocol.Reply.register(Ping.CODE, Ping)
|
jortel/gofer
|
src/gofer/rmi/model/parent.py
|
Python
|
lgpl-2.1
| 4,959
|
# TessuMod: Mod for integrating TeamSpeak into World of Tanks
# Copyright (C) 2016 Janne Hakonen
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import os
import xml.etree.ElementTree as ET
from ..infrastructure import gameapi
from ..infrastructure.timer import TimerMixin
from messenger.proto.events import g_messengerEvents
from PlayerEvents import g_playerEvents
class EnvironmentAdapter(object):
def get_mods_dirpath(self):
return gameapi.Environment.find_res_mods_version_path()
class BattleAdapter(TimerMixin):
POSITIONAL_DATA_PROVIDE_TIMEOUT = 0.1
def __init__(self, app):
super(BattleAdapter, self).__init__()
self.__app = app
g_playerEvents.onAvatarBecomePlayer += self.__on_avatar_become_player
g_playerEvents.onAccountBecomePlayer += self.__on_account_become_player
g_playerEvents.onAvatarReady += self.__on_avatar_ready
g_playerEvents.onAvatarBecomeNonPlayer += self.__on_avatar_become_non_player
g_messengerEvents.users.onUsersListReceived += self.__on_users_list_received
gameapi.Battle.patch_battle_replay_play(self.__on_battle_replay_play)
def get_camera_position(self):
return gameapi.Battle.get_camera_position()
def get_camera_direction(self):
return gameapi.Battle.get_camera_direction()
def get_vehicle(self, player_id):
result = {}
vehicle_id = gameapi.Battle.find_vehicle_id(lambda vehicle: vehicle["accountDBID"] == player_id)
if vehicle_id is None:
return result
vehicle = gameapi.Battle.get_vehicle(vehicle_id)
if vehicle:
result["is-alive"] = vehicle.get("isAlive", True)
entity = gameapi.Battle.get_entity(vehicle_id)
if entity and entity.position:
result["position"] = (entity.position.x, entity.position.y, entity.position.z)
return result
def __on_avatar_become_player(self):
self.__app["publish-gamenick-to-chatserver"]()
gameapi.Notifications.set_enabled(False)
def __on_account_become_player(self):
self.__app["publish-gamenick-to-chatserver"]()
def __on_avatar_ready(self):
self.__app["enable-positional-data-to-chatclient"](True)
self.on_timeout(self.POSITIONAL_DATA_PROVIDE_TIMEOUT, self.__on_provide_positional_data, repeat=True)
def __on_avatar_become_non_player(self):
self.__app["enable-positional-data-to-chatclient"](False)
self.off_timeout(self.__on_provide_positional_data)
gameapi.Notifications.set_enabled(True)
def __on_users_list_received(self, tags):
self.__app["populate-usercache-with-players"]()
def __on_provide_positional_data(self):
self.__app["provide-positional-data-to-chatclient"]()
def __on_battle_replay_play(self, original_self, original_method, *args, **kwargs):
self.__app["battle-replay-start"]()
return original_method(original_self, *args, **kwargs)
class PlayerAdapter(object):
def get_player_by_dbid(self, dbid):
return gameapi.Player.get_player_by_dbid(dbid)
def get_my_name(self):
return gameapi.Player.get_my_name()
def get_my_dbid(self):
return gameapi.Player.get_my_dbid()
def get_players(self, in_battle=False, in_prebattle=False, clanmembers=False, friends=False):
return gameapi.Player.get_players(in_battle, in_prebattle, clanmembers, friends)
class ChatIndicatorAdapter(object):
def __init__(self):
self.__speakers = set()
gameapi.VoiceChat.patch_is_participant_speaking(self.__on_is_participant_speaking)
def set_player_speaking(self, player, speaking):
if speaking and player["id"] not in self.__speakers:
self.__speakers.add(player["id"])
gameapi.VoiceChat.set_player_speaking(player["id"], True)
elif not speaking and player["id"] in self.__speakers:
self.__speakers.remove(player["id"])
gameapi.VoiceChat.set_player_speaking(player["id"], False)
def clear_all_players_speaking(self):
for speaker in self.__speakers:
gameapi.VoiceChat.set_player_speaking(speaker, False)
self.__speakers.clear()
def __on_is_participant_speaking(self, original_self, original_method, dbid):
'''Called by other game modules to determine current speaking status.'''
return True if dbid in self.__speakers else original_method(original_self, dbid)
class MinimapAdapter(object):
def __init__(self):
self.__running_animations = {}
self.__action = None
self.__interval = None
def set_action(self, action):
self.__action = action
def set_action_interval(self, interval):
self.__interval = interval
def set_player_speaking(self, player, speaking):
if not player["in_battle"]:
return
vehicle_id = player["vehicle_id"]
if speaking:
if vehicle_id not in self.__running_animations:
self.__running_animations[vehicle_id] = gameapi.MinimapMarkerAnimation(
vehicle_id, self.__interval, self.__action, self.__on_done)
self.__running_animations[vehicle_id].start()
else:
if vehicle_id in self.__running_animations:
self.__running_animations[vehicle_id].stop()
def clear_all_players_speaking(self):
for vehicle_id in self.__running_animations:
self.__running_animations[vehicle_id].stop()
def __on_done(self, vehicle_id):
self.__running_animations.pop(vehicle_id, None)
class NotificationsAdapter(object):
TSPLUGIN_INSTALL = "TessuModTSPluginInstall"
TSPLUGIN_MOREINFO = "TessuModTSPluginMoreInfo"
TSPLUGIN_IGNORED = "TessuModTSPluginIgnore"
def __init__(self, app):
self.__app = app
gameapi.Notifications.add_event_handler(self.TSPLUGIN_INSTALL, self.__on_plugin_install)
gameapi.Notifications.add_event_handler(self.TSPLUGIN_IGNORED, self.__on_plugin_ignore_toggled)
gameapi.Notifications.add_event_handler(self.TSPLUGIN_MOREINFO, self.__on_plugin_moreinfo_clicked)
self.__plugin_install_shown = False
def init(self):
gameapi.Notifications.init()
def show_info_message(self, message):
gameapi.Notifications.show_info_message(message)
def show_warning_message(self, message):
gameapi.Notifications.show_warning_message(message)
def show_error_message(self, message):
gameapi.Notifications.show_error_message(message)
def show_plugin_install_message(self, **data):
if not self.__plugin_install_shown:
tmpl_filepath = os.path.join(gameapi.Environment.find_res_mods_version_path(), "gui", "tessu_mod", "tsplugin_install_notification.xml")
with open(tmpl_filepath, "r") as tmpl_file:
params = self.__parse_xml(tmpl_file.read())
gameapi.Notifications.show_custom_message(
icon = params["icon"],
message = params["message"],
buttons_layout = params["buttons_layout"],
item = {
"moreinfo_url": "https://github.com/jhakonen/wot-teamspeak-mod/wiki/TeamSpeak-Plugins#tessumod-plugin",
"ignore_state": "off",
"install_action": self.TSPLUGIN_INSTALL,
"ignore_action": self.TSPLUGIN_IGNORED,
"moreinfo_action": self.TSPLUGIN_MOREINFO
}
)
self.__plugin_install_shown = True
def __parse_xml(self, xml_data):
root = ET.fromstring(xml_data)
params = {
"icon": root.findtext("./icon", default=""),
"message": self.__xml_element_contents_to_text(root.find("./message")),
"buttons_layout": []
}
for button in root.findall("./buttonsLayout/button"):
params["buttons_layout"].append({
"label": button.get("label", default=""),
"action": button.get("action", default=""),
"type": button.get("type", default="submit")
})
return params
def __xml_element_contents_to_text(self, element):
if element is None:
return ""
contents = []
contents.append(element.text or "")
for sub_element in element:
contents.append(ET.tostring(sub_element))
contents.append(element.tail or "")
return "".join(contents).strip()
def __on_plugin_install(self, type_id, msg_id, data):
self.__app["install-chatclient-plugin"]()
def __on_plugin_ignore_toggled(self, type_id, msg_id, data):
new_state = False if data["ignore_state"] == "on" else True
data["ignore_state"] = "on" if new_state else "off"
self.__app["ignore-chatclient-plugin-install-message"](new_state)
gameapi.Notifications.update_custom_message(type_id, msg_id, data)
def __on_plugin_moreinfo_clicked(self, type_id, msg_id, data):
self.__app["show-chatclient-plugin-info-url"](data["moreinfo_url"])
|
WorldOfTanksMods/wot-teamspeak-mod
|
tessumod/src/scripts/client/gui/mods/tessumod/adapters/wotgame.py
|
Python
|
lgpl-2.1
| 8,770
|
#!/usr/bin/python
"""Test of window title output."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("Down"))
sequence.append(PauseAction(3000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"1. Window Where Am I",
["BRAILLE LINE: 'GTK+ Code Demos'",
" VISIBLE: 'GTK+ Code Demos', cursor=0",
"SPEECH OUTPUT: 'GTK+ Code Demos'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
GNOME/orca
|
test/keystrokes/gtk-demo/role_window.py
|
Python
|
lgpl-2.1
| 682
|
# -*- coding: utf-8 -*-
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
'''Test cases for PySide API2 support'''
import unittest
import sys
from PySide2.QtGui import QIntValidator, QValidator
from PySide2.QtWidgets import QWidget, QSpinBox, QApplication
from helper import UsesQApplication
class WidgetValidatorQInt(QWidget, QIntValidator):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
QIntValidator.__init__(self, parent)
class WidgetValidatorQSpinBox(QSpinBox):
def __init__(self, parent=None):
QSpinBox.__init__(self, parent)
def fixup(self, text):
print("It was called!")
class DoubleQObjectInheritanceTest(UsesQApplication):
def testDouble(self):
'''Double inheritance from QObject classes'''
obj = WidgetValidatorQInt()
#QIntValidator methods
state, string, number = obj.validate('Test', 0)
self.assertEqual(state, QValidator.Invalid)
state, string, number = obj.validate('33', 0)
self.assertEqual(state, QValidator.Acceptable)
def testQSpinBox(self):
obj = WidgetValidatorQSpinBox()
obj.setRange(1, 10)
obj.setValue(0)
self.assertEqual(obj.value(), 1)
class QClipboardTest(UsesQApplication):
def testQClipboard(self):
#skip this test on MacOS because the clipboard is not available during the ssh session
#this cause problems in the buildbot
if sys.platform == 'darwin':
return
clip = QApplication.clipboard()
clip.setText("Testing this thing!")
text, subtype = clip.text("")
self.assertEqual(subtype, "plain")
self.assertEqual(text, "Testing this thing!")
if __name__ == '__main__':
unittest.main()
|
qtproject/pyside-pyside
|
tests/QtWidgets/api2_test.py
|
Python
|
lgpl-2.1
| 2,985
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------
# Copyright (c) 2010-2019 Denis Machard
# This file is part of the extensive automation project
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA
# -------------------------------------------------------------------
"""
Generic model
"""
from ea.libs import Logger
import sys
import zlib
import base64
# unicode = str with python3
if sys.version_info > (3,):
unicode = str
try:
xrange
except NameError: # support python3
xrange = range
class GenericModel(Logger.ClassLogger):
"""
Data model for generic test
"""
def __init__(self, level=6, compressed=False):
"""
Generic model
"""
self.level = level
self.compressed = compressed
def toXml(self):
"""
Returns to xml
"""
raise Exception('To reimplement')
def fixXML(self, data, key):
"""
Fix xml
@param data:
@type data:
@param key:
@type key:
"""
if isinstance(data[key], dict):
data[key] = [data[key]]
def fixPyXML(self, data, key):
"""
Fix xml
@param data:
@type data:
@param key:
@type key:
"""
if '@%s' % key in data:
nb = len(data[key])
tpl = []
for i in xrange(nb):
tpl.append({})
data['@%s' % key] = tpl
def write(self, absPath):
"""
Write the file on the disc
@param absPath:
@type absPath:
"""
ret = False
try:
xmlraw = self.toXml()
if xmlraw is None:
raise Exception("bad xml")
else:
f = open(absPath, 'wb')
raw = unicode(xmlraw).encode('utf-8')
if self.compressed:
f.write(zlib.compress(raw, self.level))
else:
f.write(raw)
f.close()
ret = True
except Exception as e:
self.error(e)
return ret
def getRaw(self):
"""
Return the content file zipped and encoded in the base64
"""
encoded = None
try:
xmlraw = self.toXml()
if xmlraw is None:
raise Exception("bad xml")
raw = unicode(xmlraw).encode('utf-8')
# compress and encode in base64 before to return it
if self.compressed:
compressed = zlib.compress(raw, self.compress)
encoded = base64.b64encode(compressed)
else:
encoded = base64.b64encode(raw)
if sys.version_info > (3,):
encoded = encoded.decode("utf-8")
except Exception as e:
self.error(e)
return encoded
def load(self, absPath=None, rawData=None):
"""
Load xml content from a file or raw data
@param absPath:
@type absPath:
@param rawData:
@type rawData:
"""
if absPath is None and rawData is None:
self.error('absPath and rawData are equal to None')
return False
read_data = rawData
if rawData is None:
try:
f = open(absPath, 'rb')
read_data = f.read()
f.close()
except Exception as e:
self.error("open file: %s" % e)
return False
# to be backward compatible, try to decompress the file
try:
decompressed_data = zlib.decompress(read_data)
except Exception:
# return the file as plain text
return self.onLoad(decompressedData=read_data)
else:
return self.onLoad(decompressedData=decompressed_data)
def onLoad(self, decompressedData):
"""
Called on data model loading
"""
raise Exception('To reimplement')
|
dmachard/extensive-testing
|
src/ea/libs/FileModels/GenericModel.py
|
Python
|
lgpl-2.1
| 4,751
|
#####################################################################
# app/controllers/dialog.py
#
# (c) Copyright 2015, Benjamin Parzella. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#####################################################################
from app import app, views
from flask import render_template_string, request, json
@app.route("/dialog/<dialog>", methods=['POST', 'GET'])
def renderDialog(dialog):
if hasattr(views, dialog):
dialogClass = getattr(views, dialog)
if request.method == 'POST':
dialogObject = dialogClass(postURL=request.url, **(request.args.to_dict()))
if dialogObject.validate_on_submit():
dialogObject.save()
return json.dumps("OK")
return json.dumps(dialogObject.errors)
else:
dialogObject = dialogClass(postURL=request.url, **(request.args.to_dict()))
dialogObject.load()
return render_template_string(dialogObject.render())
else:
return "Invalid dialog {}".format(dialog)
|
bparzella/gemma
|
app/controllers/dialog.py
|
Python
|
lgpl-2.1
| 1,551
|
#!/usr/bin/python
"""
PyTACS Error Classes
"""
class PyTACSError(Exception):
"The root of all PyTACS errors and exceptions"
pass
class ConfigurationError(PyTACSError):
"Something in a configuration file was incorrect"
pass
|
kremlinkev/pytacs
|
pytacs/Errors.py
|
Python
|
lgpl-2.1
| 229
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Canvas3D
A QGIS plugin
3D canvas
-------------------
begin : 2013-08-12
copyright : (C) 2013 by Oslandia
email : infos@oslandia.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
def name():
return "Horao"
def description():
return "3D canvas"
def version():
return "Version 0.1"
def icon():
return "icon.png"
def qgisMinimumVersion():
return "2.0"
def author():
return "Oslandia"
def email():
return "infos@oslandia.com"
def classFactory(iface):
# load Canvas3D class from file Canvas3D
from canvas3d import Canvas3D
return Canvas3D(iface)
|
Oslandia/horao
|
qgis_plugin/__init__.py
|
Python
|
lgpl-2.1
| 1,543
|
# python
# This file is generated by a program (mib2py).
import CISCO_SYSLOG_MIB
OIDMAP = {
'1.3.6.1.4.1.9.9.41': CISCO_SYSLOG_MIB.ciscoSyslogMIB,
'1.3.6.1.4.1.9.9.41.1': CISCO_SYSLOG_MIB.ciscoSyslogMIBObjects,
'1.3.6.1.4.1.9.9.41.1.1': CISCO_SYSLOG_MIB.clogBasic,
'1.3.6.1.4.1.9.9.41.1.2': CISCO_SYSLOG_MIB.clogHistory,
'1.3.6.1.4.1.9.9.41.2': CISCO_SYSLOG_MIB.ciscoSyslogMIBNotificationPrefix,
'1.3.6.1.4.1.9.9.41.2.0': CISCO_SYSLOG_MIB.ciscoSyslogMIBNotifications,
'1.3.6.1.4.1.9.9.41.3': CISCO_SYSLOG_MIB.ciscoSyslogMIBConformance,
'1.3.6.1.4.1.9.9.41.3.1': CISCO_SYSLOG_MIB.ciscoSyslogMIBCompliances,
'1.3.6.1.4.1.9.9.41.3.2': CISCO_SYSLOG_MIB.ciscoSyslogMIBGroups,
'1.3.6.1.4.1.9.9.41.1.1.1': CISCO_SYSLOG_MIB.clogNotificationsSent,
'1.3.6.1.4.1.9.9.41.1.1.2': CISCO_SYSLOG_MIB.clogNotificationsEnabled,
'1.3.6.1.4.1.9.9.41.1.1.3': CISCO_SYSLOG_MIB.clogMaxSeverity,
'1.3.6.1.4.1.9.9.41.1.1.4': CISCO_SYSLOG_MIB.clogMsgIgnores,
'1.3.6.1.4.1.9.9.41.1.1.5': CISCO_SYSLOG_MIB.clogMsgDrops,
'1.3.6.1.4.1.9.9.41.1.2.1': CISCO_SYSLOG_MIB.clogHistTableMaxLength,
'1.3.6.1.4.1.9.9.41.1.2.2': CISCO_SYSLOG_MIB.clogHistMsgsFlushed,
'1.3.6.1.4.1.9.9.41.1.2.3.1.1': CISCO_SYSLOG_MIB.clogHistIndex,
'1.3.6.1.4.1.9.9.41.1.2.3.1.2': CISCO_SYSLOG_MIB.clogHistFacility,
'1.3.6.1.4.1.9.9.41.1.2.3.1.3': CISCO_SYSLOG_MIB.clogHistSeverity,
'1.3.6.1.4.1.9.9.41.1.2.3.1.4': CISCO_SYSLOG_MIB.clogHistMsgName,
'1.3.6.1.4.1.9.9.41.1.2.3.1.5': CISCO_SYSLOG_MIB.clogHistMsgText,
'1.3.6.1.4.1.9.9.41.1.2.3.1.6': CISCO_SYSLOG_MIB.clogHistTimestamp,
'1.3.6.1.4.1.9.9.41.2.0.1': CISCO_SYSLOG_MIB.clogMessageGenerated,
'1.3.6.1.4.1.9.9.41.3.2.1': CISCO_SYSLOG_MIB.ciscoSyslogMIBGroup,
}
|
xiangke/pycopia
|
mibs/pycopia/mibs/CISCO_SYSLOG_MIB_OID.py
|
Python
|
lgpl-2.1
| 1,663
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test EWA Dask-based resamplers."""
import logging
from unittest import mock
import numpy as np
import pytest
from pyproj import CRS
import pyresample.ewa
da = pytest.importorskip("dask.array")
xr = pytest.importorskip("xarray")
dask_ewa = pytest.importorskip("pyresample.ewa.dask_ewa")
legacy_dask_ewa = pytest.importorskip("pyresample.ewa._legacy_dask_ewa")
DaskEWAResampler = pyresample.ewa.DaskEWAResampler
LegacyDaskEWAResampler = pyresample.ewa.LegacyDaskEWAResampler
LOG = logging.getLogger(__name__)
def _fill_mask(data):
if np.issubdtype(data.dtype, np.floating):
return np.isnan(data)
elif np.issubdtype(data.dtype, np.integer):
return data == np.iinfo(data.dtype).max
else:
raise ValueError("Not sure how to get fill mask.")
def _get_test_array(input_shape, input_dtype, chunk_size):
if np.issubdtype(input_dtype, np.integer):
dinfo = np.iinfo(input_dtype)
data = da.random.randint(dinfo.min + 1, dinfo.max, size=input_shape,
chunks=chunk_size, dtype=input_dtype)
else:
data = da.random.random(input_shape, chunks=chunk_size).astype(input_dtype)
return data
def _get_test_swath_def(input_shape, chunk_size, geo_dims):
from pyresample.geometry import SwathDefinition
from pyresample.test.utils import create_test_latitude, create_test_longitude
lon_arr = create_test_longitude(-95.0, -75.0, input_shape, dtype=np.float64)
lat_arr = create_test_latitude(15.0, 30.0, input_shape, dtype=np.float64)
lons = da.from_array(lon_arr, chunks=chunk_size)
lats = da.from_array(lat_arr, chunks=chunk_size)
swath_def = SwathDefinition(
xr.DataArray(lons, dims=geo_dims),
xr.DataArray(lats, dims=geo_dims))
return swath_def
def _get_test_target_area(output_shape, output_proj=None):
from pyresample.geometry import AreaDefinition
from pyresample.utils import proj4_str_to_dict
if output_proj is None:
output_proj = ('+proj=lcc +datum=WGS84 +ellps=WGS84 '
'+lon_0=-95. +lat_0=25 +lat_1=25 +units=m +no_defs')
target = AreaDefinition(
'test_target',
'test_target',
'test_target',
proj4_str_to_dict(output_proj),
output_shape[1], # width
output_shape[0], # height
(-100000., -150000., 100000., 150000.),
)
return target
def get_test_data(input_shape=(100, 50), output_shape=(200, 100), output_proj=None,
input_chunks=10,
input_dims=('y', 'x'), input_dtype=np.float64):
"""Get common data objects used in testing.
Returns: tuple with the following elements
input_data_on_swath: DataArray with dimensions as if it is a swath.
input_swath: SwathDefinition of the above DataArray
target_area_def: AreaDefinition to be used as a target for resampling
"""
data = _get_test_array(input_shape, input_dtype, input_chunks)
ds1 = xr.DataArray(data,
dims=input_dims,
attrs={'name': 'test', 'test': 'test'})
if input_dims and 'bands' in input_dims:
ds1 = ds1.assign_coords(bands=list('RGBA'[:ds1.sizes['bands']]))
input_area_shape = tuple(ds1.sizes[dim] for dim in ds1.dims
if dim in ['y', 'x'])
geo_dims = ('y', 'x') if input_dims else None
swath_def = _get_test_swath_def(input_area_shape, input_chunks, geo_dims)
ds1.attrs['area'] = swath_def
crs = CRS.from_string('+proj=latlong +datum=WGS84 +ellps=WGS84')
ds1 = ds1.assign_coords(crs=crs)
target_area = _get_test_target_area(output_shape, output_proj)
return ds1, swath_def, target_area
def _create_second_test_data(swath_data):
swath_data2 = swath_data.copy(deep=True)
swath_data2.attrs['test'] = 'test2'
swath_data2.attrs['name'] = 'test2'
return swath_data2
def _data_attrs_coords_checks(new_data, output_shape, input_dtype, target_area,
test_attr, name_attr):
assert new_data.shape == output_shape
assert new_data.dtype == input_dtype
assert new_data.attrs['test'] == test_attr
assert new_data.attrs['name'] == name_attr
assert new_data.attrs['area'] is target_area
if new_data.ndim == 3:
assert list(new_data.coords['bands']) == ['R', 'G', 'B']
def _coord_and_crs_checks(new_data, target_area, has_bands=False):
assert 'y' in new_data.coords
assert 'x' in new_data.coords
if has_bands:
assert 'bands' in new_data.coords
assert 'crs' in new_data.coords
assert isinstance(new_data.coords['crs'].item(), CRS)
assert 'lcc' in new_data.coords['crs'].item().to_proj4()
assert new_data.coords['y'].attrs['units'] == 'meter'
assert new_data.coords['x'].attrs['units'] == 'meter'
assert target_area.crs == new_data.coords['crs'].item()
if has_bands:
np.testing.assert_equal(new_data.coords['bands'].values,
['R', 'G', 'B'])
def _get_num_chunks(source_swath, resampler_class, rows_per_scan=10):
if resampler_class is DaskEWAResampler:
# ignore column-wise chunks because DaskEWA should rechunk to use whole scans
num_chunks = len(source_swath.lons.chunks[0]) if rows_per_scan == 10 else 1
else:
num_chunks = len(source_swath.lons.chunks[0]) * len(source_swath.lons.chunks[1])
return num_chunks
class TestDaskEWAResampler:
"""Test Dask EWA resampler class."""
@pytest.mark.parametrize(
('resampler_class', 'resampler_mod'),
[
(DaskEWAResampler, dask_ewa),
(LegacyDaskEWAResampler, legacy_dask_ewa),
])
@pytest.mark.parametrize(
('input_shape', 'input_dims'),
[
((100, 50), ('y', 'x')),
((3, 100, 50), ('bands', 'y', 'x')),
]
)
@pytest.mark.parametrize('input_dtype', [np.float32, np.float64, np.int8])
@pytest.mark.parametrize('maximum_weight_mode', [False, True])
@pytest.mark.parametrize('rows_per_scan', [10, 0, 100])
def test_xarray_basic_ewa(self, resampler_class, resampler_mod,
input_shape, input_dims, input_dtype,
maximum_weight_mode, rows_per_scan):
"""Test EWA with basic xarray DataArrays."""
is_legacy = resampler_class is LegacyDaskEWAResampler
is_int = np.issubdtype(input_dtype, np.integer)
if is_legacy and is_int:
pytest.skip("Legacy dask resampler does not properly support "
"integer inputs.")
if is_legacy and rows_per_scan == 0:
pytest.skip("Legacy dask resampler does not support rows_per_scan "
"of 0.")
output_shape = (200, 100)
if len(input_shape) == 3:
output_shape = (input_shape[0], output_shape[0], output_shape[1])
swath_data, source_swath, target_area = get_test_data(
input_shape=input_shape, output_shape=output_shape[-2:],
input_dims=input_dims, input_dtype=input_dtype,
)
num_chunks = _get_num_chunks(source_swath, resampler_class, rows_per_scan)
with mock.patch.object(resampler_mod, 'll2cr', wraps=resampler_mod.ll2cr) as ll2cr, \
mock.patch.object(source_swath, 'get_lonlats', wraps=source_swath.get_lonlats) as get_lonlats:
resampler = resampler_class(source_swath, target_area)
new_data = resampler.resample(swath_data, rows_per_scan=rows_per_scan,
maximum_weight_mode=maximum_weight_mode)
_data_attrs_coords_checks(new_data, output_shape, input_dtype, target_area,
'test', 'test')
# make sure we can actually compute everything
new_data.compute()
lonlat_calls = get_lonlats.call_count
ll2cr_calls = ll2cr.call_count
# resample a different dataset and make sure cache is used
swath_data2 = _create_second_test_data(swath_data)
new_data = resampler.resample(swath_data2, rows_per_scan=rows_per_scan,
maximum_weight_mode=maximum_weight_mode)
_data_attrs_coords_checks(new_data, output_shape, input_dtype, target_area,
'test2', 'test2')
_coord_and_crs_checks(new_data, target_area,
has_bands='bands' in input_dims)
result = new_data.compute()
# ll2cr will be called once more because of the computation
assert ll2cr.call_count == ll2cr_calls + num_chunks
# but we should already have taken the lonlats from the SwathDefinition
assert get_lonlats.call_count == lonlat_calls
# check how many valid pixels we have
band_mult = 3 if 'bands' in result.dims else 1
fill_mask = _fill_mask(result.values)
assert np.count_nonzero(~fill_mask) == 468 * band_mult
@pytest.mark.parametrize(
('input_chunks', 'input_shape', 'input_dims'),
[
(10, (100, 50), ('y', 'x')),
((100, 50), (100, 50), ('y', 'x')),
(10, (3, 100, 50), ('bands', 'y', 'x')),
]
)
@pytest.mark.parametrize('input_dtype', [np.float32, np.float64, np.int8])
@pytest.mark.parametrize('maximum_weight_mode', [False, True])
def test_xarray_ewa_empty(self, input_chunks, input_shape, input_dims,
input_dtype, maximum_weight_mode):
"""Test EWA with xarray DataArrays where the result is all fills."""
# projection that should result in no output pixels
output_proj = ('+proj=lcc +datum=WGS84 +ellps=WGS84 '
'+lon_0=-55. +lat_0=25 +lat_1=25 +units=m +no_defs')
output_shape = (200, 100)
if len(input_shape) == 3:
output_shape = (input_shape[0], output_shape[0], output_shape[1])
# different chunk sizes produces different behaviors for dask reduction
swath_data, source_swath, target_area = get_test_data(
input_shape=input_shape, output_shape=output_shape[-2:],
input_chunks=input_chunks,
input_dims=input_dims, input_dtype=input_dtype,
output_proj=output_proj
)
resampler = DaskEWAResampler(source_swath, target_area)
new_data = resampler.resample(swath_data, rows_per_scan=10,
maximum_weight_mode=maximum_weight_mode)
_data_attrs_coords_checks(new_data, output_shape, input_dtype, target_area,
'test', 'test')
# make sure we can actually compute everything
computed_data = new_data.compute()
fill_value = 127 if np.issubdtype(input_dtype, np.integer) else np.nan
np.testing.assert_array_equal(computed_data, fill_value)
@pytest.mark.parametrize(
('input_shape', 'input_dims', 'maximum_weight_mode'),
[
((100, 50), ('y', 'x'), False),
# ((3, 100, 50), ('bands', 'y', 'x'), False),
((100, 50), ('y', 'x'), True),
# ((3, 100, 50), ('bands', 'y', 'x'), True),
]
)
def test_numpy_basic_ewa(self, input_shape, input_dims, maximum_weight_mode):
"""Test EWA with basic xarray DataArrays."""
from pyresample.geometry import SwathDefinition
output_shape = (200, 100)
if len(input_shape) == 3:
output_shape = (input_shape[0], output_shape[0], output_shape[1])
swath_data, source_swath, target_area = get_test_data(
input_shape=input_shape, output_shape=output_shape[-2:],
input_dims=input_dims,
)
swath_data = swath_data.data.astype(np.float32).compute()
source_swath = SwathDefinition(*source_swath.get_lonlats())
resampler = DaskEWAResampler(source_swath, target_area)
new_data = resampler.resample(swath_data, rows_per_scan=10,
maximum_weight_mode=maximum_weight_mode)
assert new_data.shape == output_shape
assert new_data.dtype == np.float32
assert isinstance(new_data, np.ndarray)
# check how many valid pixels we have
band_mult = 3 if len(output_shape) == 3 else 1
assert np.count_nonzero(~np.isnan(new_data)) == 468 * band_mult
@pytest.mark.parametrize(
('input_shape', 'input_dims', 'maximum_weight_mode'),
[
((100, 50), ('y', 'x'), False),
((3, 100, 50), ('bands', 'y', 'x'), False),
((100, 50), ('y', 'x'), True),
((3, 100, 50), ('bands', 'y', 'x'), True),
]
)
def test_compare_to_legacy(self, input_shape, input_dims, maximum_weight_mode):
"""Make sure new and legacy EWA algorithms produce the same results."""
output_shape = (200, 100)
if len(input_shape) == 3:
output_shape = (input_shape[0], output_shape[0], output_shape[1])
swath_data, source_swath, target_area = get_test_data(
input_shape=input_shape, output_shape=output_shape[-2:],
input_dims=input_dims,
)
swath_data.data = swath_data.data.astype(np.float32)
resampler = DaskEWAResampler(source_swath, target_area)
new_data = resampler.resample(swath_data, rows_per_scan=10,
maximum_weight_mode=maximum_weight_mode)
new_arr = new_data.compute()
legacy_resampler = LegacyDaskEWAResampler(source_swath, target_area)
legacy_data = legacy_resampler.resample(swath_data, rows_per_scan=10,
maximum_weight_mode=maximum_weight_mode)
legacy_arr = legacy_data.compute()
np.testing.assert_allclose(new_arr, legacy_arr)
@pytest.mark.parametrize(
('input_shape', 'input_dims', 'as_np'),
[
((100,), ('y',), False),
((4, 100, 50, 25), ('bands', 'y', 'x', 'time'), False),
((100,), ('y',), True),
((4, 100, 50, 25), ('bands', 'y', 'x', 'time'), True),
]
)
def test_bad_input(self, input_shape, input_dims, as_np):
"""Check that 1D array inputs are not currently supported."""
output_shape = (200, 100)
swath_data, source_swath, target_area = get_test_data(
input_shape=input_shape, output_shape=output_shape,
input_dims=input_dims,
)
swath_data.data = swath_data.data.astype(np.float32)
resampler = DaskEWAResampler(source_swath, target_area)
exp_exc = ValueError if len(input_shape) != 4 else NotImplementedError
with pytest.raises(exp_exc):
resampler.resample(swath_data, rows_per_scan=10)
|
davidh-ssec/pyresample
|
pyresample/test/test_dask_ewa.py
|
Python
|
lgpl-3.0
| 15,586
|
# encoding: utf8
# noinspection PyPep8Naming
from SmallScrewdriver import Point, Size, Rect
class Shelf(Rect):
def __init__(self, max_size, origin=Point()):
Rect.__init__(self, origin, Size())
self.maxSize = max_size
self.images = []
def addImage(self, image):
"""
Добавить изображение на полку
:param image: добавляемое изображение
:return: True если изображение может быть добавлено
False если не может
"""
free_size = Size(self.maxSize.width - self.size.width, self.maxSize.height)
image_size = Size(image.crop.size.height, image.crop.size.width) if image.rotated else image.crop.size
if image_size < free_size: # >= image_size
image.origin = Point(self.size.width, self.origin.y)
self.images.append(image)
self.size.width += image_size.width
if image_size.height > self.size.height:
self.size.height += image_size.height
return True
else:
return False
|
Ingener74/Small-Screwdriver
|
SmallScrewdriver/Shelf/Shelf.py
|
Python
|
lgpl-3.0
| 1,165
|
from django.test import TestCase
from django.test.client import Client
from django.conf import settings
from models import *
from views import *
import datetime
"""
class InstallationTest(TestCase):
def test_urls(self):
from django.conf.urls import patterns, include, url
good_urlpatterns = patterns('',
url(r'',include("tinycms.urls"))
)
error_urlpatterns = patterns('',)
self.assertTrue(check_url(good_urlpatterns))
self.assertFalse(check_url(error_urlpatterns))
def test_languages(self):
pass
def test_templates(self):
pass
class ConfigurationTest(TestCase):
pass
class CreateAndShowPageTest(TestCase):
pass
"""
class ModellTest(TestCase):
def setUp(self):
Dispatcher.clear()
self.c512 =""
for i in range(0,511):
self.c512 += "a"
def test_model_normal(self):
testDispatch={}
page = Page(slug="test",template="tinycms/shelltest.html",is_active=True)
page.save()
page2 = Page(slug="test2",template="tinycms/shelltest.html",parent=page,is_active=True)
page2.save()
testDispatch[u'test/']=page
testDispatch[u'test/test2/']=page2
with self.assertRaises(Exception):
page = Page(slug="test",template="tinycms/shelltest.html",is_active=True)
page.save()
page = Page(slug=self.c512,template="tinycms/shelltest.html",is_active=True)
page.save()
testDispatch[unicode(self.c512+"/")]=page
cont = Content(page=page,value_name="main",language="ja",content="test")
cont.save()
Dispatcher.register()
self.assertEqual(Dispatcher.dispatchURLs,testDispatch)#,"Invalid dispatch url\n"+str(Dispatcher.dispatchURLs))
def test_slash(self):
page = Page(slug="/test/",template="tinycms/shelltest.html",is_active=True)
page.save()
page2 = Page(slug="/test2/",template="tinycms/shelltest.html",parent=page,is_active=True)
page2.save()
page3 = Page(slug="/test3",template="tinycms/shelltest.html",parent=page,is_active=True,url_overwrite="/test3")
page3.save()
page4 = Page(slug="/",template="tinycms/shelltest.html",is_active=True)
page4.save()
testDispatch={}
testDispatch[u'test/']=page
testDispatch[u'test/test2/']=page2
testDispatch[u'test3']=page3
testDispatch[u'']=page4
Dispatcher.register()
self.assertEqual(Dispatcher.dispatchURLs,testDispatch)#,"Invalid dispatch url\n"+str(Dispatcher.dispatchURLs))
def test_slash2(self):
page4 = Page(slug="home",template="tinycms/shelltest.html",is_active=True,url_overwrite="/")
page4.save()
testDispatch={}
testDispatch[u'']=page4
Dispatcher.register()
self.assertEqual(Dispatcher.dispatchURLs,testDispatch)#,"Invalid dispatch url\n"+str(Dispatcher.dispatchURLs))
class DummyRequest:
def __init__(self,user=None,GET={}):
self.user=user
self.GET=GET
self.POST={}
self.method="GET"
class ViewTest(TestCase):
def setUp(self):
Dispatcher.clear()
def test_content(self):
page = Page(slug="test",template="tinycms/shelltest.html",is_active=True)
page.save()
page2 = Page(slug="test2",template="tinycms/shelltest.html",parent=page,is_active=True)
page2.save()
cont = Content(page=page,value_name="main",language="ja",content="test")
cont.save()
cont = Content(page=page,value_name="main",language="en",content="test")
cont.save()
req = DummyRequest()
result = show_page(req,"test/")
candResult = '<html><body><p>test</p></body></html>'
self.assertEqual(result.content,candResult)
with self.assertRaises(Exception):
result = show_page(req,"test2/")
def test_menu(self):
page = Page(slug="test",template="tinycms/menutest.html",is_active=True)
page.save()
page2 = Page(slug="test2",template="tinycms/menutest.html",parent=page,is_active=True)
page2.save()
cont = Content(page=page,value_name="main",language="ja",content="test")
cont.save()
cont = Content(page=page,value_name="main",language="en",content="test")
cont.save()
cont = Content(page=page,value_name="menu_title",language="en",content="test")
cont.save()
cont = Content(page=page2,value_name="menu_title",language="en",content="test2")
cont.save()
req = DummyRequest()
result = show_page(req,"test/")
candResult = "<html><body><ul><li><a href='/en/test/'>test</a><ul><li><a href='/en/test/test2/'>test2</a></li></ul></li></ul><p>test</p></body></html>"
self.assertEqual(result.content,candResult)
def test_slash(self):
page4 = Page(slug="/",template="tinycms/shelltest.html",is_active=True)
page4.save()
cont = Content(page=page4,value_name="main",language="en",content="test")
cont.save()
cont = Content(page=page4,value_name="main",language="ja",content="test")
cont.save()
Dispatcher.register()
from django.test import Client
c = Client()
response = c.get('/en/')
self.assertEqual(response.status_code,200)
|
ccat/django_tinycms
|
tinycms/tests.py
|
Python
|
lgpl-3.0
| 5,389
|
from numpy import array, rot90, fliplr, array_equal
from checkerboardpuzzle_stone import Rotation
def generate_rotated_nparrays(nparray):
"""generate rotated and mirrored versions of given nparray."""
r1 = rot90(nparray)
r2 = rot90(r1)
r3 = rot90(r2)
f1 = fliplr(nparray)
f2 = fliplr(r1)
f3 = fliplr(r2)
f4 = fliplr(r3)
all_rot = [nparray,r1,r2,r3,f1,f2,f3,f4]
return all_rot
def generate_rotations(fields):
"""generate all rotations of that stone."""
#r1 = rot90(fields)
#r2 = rot90(r1)
#r3 = rot90(r2)
#f1 = fliplr(fields)
#f2 = fliplr(r1)
#f3 = fliplr(r2)
#f4 = fliplr(r3)
#all_rot = [r1,r2,r3,f1,f2,f3,f4]
all_rot = generate_rotated_nparrays(fields)
# check if rotations are equal
rotations = [] # [Rotation(fields)]
for r_new in all_rot:
l = len(filter(lambda r_old:array_equal(r_old.nparray,r_new), rotations))
if l > 1:
raise Exception('Rotations doubled? That should be impossible!')
elif l == 0:
# not in rotations yet, add
rotations = rotations + [Rotation(r_new)]
return rotations
def unique_nparrays(nparrays):
"""return unique list of nparrays."""
unique = []
for a in nparrays:
for u in unique:
if (a == u).all():
break
else:
unique = unique + [a]
return unique
def append_to_file(filepath, text):
"""append text to given file."""
with open(filepath, 'a') as myfile:
myfile.write(text)
myfile.close()
|
pirius/draught-board-puzzle-aka-checkerboard-puzzle-solver
|
python/source/checkerboardpuzzle_utils.py
|
Python
|
lgpl-3.0
| 1,570
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import os
extensions = [
Extension("*", [os.path.join(os.path.dirname(__file__), "CSXCAD","*.pyx")],
language="c++", # generate C++ code
libraries = ['CSXCAD',]),
]
setup(
name="CSXCAD",
version = '0.6.2',
description = "Python interface for the CSXCAD library",
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries :: Python Modules',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
],
author = 'Thorsten Liebig',
author_email = 'Thorsten.Liebig@gmx.de',
maintainer = 'Thorsten Liebig',
maintainer_email = 'Thorsten.Liebig@gmx.de',
url = 'http://openEMS.de',
packages=["CSXCAD", ],
package_data={'CSXCAD': ['*.pxd']},
ext_modules = cythonize(extensions)
)
|
thliebig/CSXCAD
|
python/setup.py
|
Python
|
lgpl-3.0
| 1,243
|
import jsonrpclib
# server proxy object
url = "http://%s:%s/jsonrpc" % (HOST, PORT)
server = jsonrpclib.Server(url)
# log in the given database
uid = server.call(service="common", method="login", args=[DB, USER, PASS])
# helper function for invoking model methods
def invoke(model, method, *args):
args = [DB, uid, PASS, model, method] + list(args)
return server.call(service="object", method="execute", args=args)
# # create a new note
# args = {
# 'color' : 8,
# 'memo' : 'This is another note',
# 'create_uid': uid,
# }
# note_id = invoke('res.partner', 'read', args)
# print note_id
ids = [1]
print invoke('res.partner', 'read', ids)
|
tintumonmartin/OPAC
|
rpc/jsonrpc.py
|
Python
|
lgpl-3.0
| 643
|
###############################################################################
# Copyright 2016 - Climate Research Division
# Environment and Climate Change Canada
#
# This file is part of the "EC-CAS diags" package.
#
# "EC-CAS diags" is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "EC-CAS diags" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with "EC-CAS diags". If not, see <http://www.gnu.org/licenses/>.
###############################################################################
def extract_timeseries (model, fieldname, units, lat, lon, location):
from ..common import number_of_levels, number_of_timesteps, convert
field = model.find_best(fieldname, maximize=(number_of_levels, number_of_timesteps))
field = field(lat=lat,lon=lon)
field = model.cache.write(field, prefix=model.name+'_'+fieldname+'_'+location)
field = convert(field,units)
return field
def shortexper_diffcheck(models, obs, location, outdir):
from pygeode.axis import Height
from os.path import exists
from pygeode.plot import plotvar
from .contouring import get_range, get_contours
from matplotlib import pyplot as pl
import numpy as np
from ..common import select_surface
co2_obs = obs.find_best('CO2')
co2_obs = select_surface(co2_obs)
# Cache the observation data, for faster subsequent access
# (uses same cache as timeseries diagnostics)
co2_obs = obs.cache.write(co2_obs, prefix=obs.name+'_sfc_CO2', split_time=False)
co2_obs = co2_obs(station=location)
lat = co2_obs.station.lat[0]
lon = co2_obs.station.lon[0] % 360
# Limit to the length of the experiment
test_field = extract_timeseries(models[0], 'CO2', 'ppm', lat, lon, location)
time = test_field.time.values
del test_field
co2_obs = co2_obs(time=(min(time),max(time)))
assert len(models) in (1,2)
if len(models) == 1:
fig = pl.figure(figsize=(15,15))
n = 2
else:
fig = pl.figure(figsize=(8,15))
n = 1
# Initialize ranges
ktn_min = float('inf')
ktn_max = float('-inf')
co2_min = float('inf')
co2_max = float('-inf')
co2_sfc_min = np.nanmin(co2_obs.get())
co2_sfc_max = np.nanmax(co2_obs.get())
if np.isnan(co2_sfc_min): co2_sfc_min = float('inf')
if np.isnan(co2_sfc_max): co2_sfc_max = float('-inf')
# Get the data, and compute the global ranges
for i,dataset in enumerate(models):
if dataset is None: continue
ktn = extract_timeseries(dataset,'eddy_diffusivity','m2 s-1',lat,lon,location)
mn, mx = get_range(ktn)
ktn_min = min(ktn_min, mn)
ktn_max = max(ktn_max, mx)
co2 = extract_timeseries(dataset,'CO2','ppm',lat,lon,location)
mn, mx = get_range(co2)
co2_min = min(co2_min, mn)
co2_max = max(co2_max, mx)
if not ( co2.hasaxis("hybrid") or co2.hasaxis("loghybrid") ):
raise TypeError("Unrecognized z axis type %s"%co2.getaxis("zaxis"))
co2_sfc_min = min(co2_sfc_min, co2(zaxis=1.0).min())
co2_sfc_max = max(co2_sfc_max, co2(zaxis=1.0).max())
# Do the plots
for i,dataset in enumerate(models):
if dataset is None: continue
ktn = extract_timeseries(dataset,'eddy_diffusivity','m2 s-1',lat,lon,location)
co2 = extract_timeseries(dataset,'CO2','ppm',lat,lon,location)
co2.name = 'CO2'
# Put the variables on a height coordinate
# TODO: proper vertical interpolation
gz = extract_timeseries(dataset,'geopotential_height','m',lat,lon,location)(i_time=0).squeeze()
# Match GZ to the tracer levels (in GEM4, GZ has both thermo/momentum levs)
co2_iz = np.searchsorted(gz.zaxis.values, co2.zaxis.values)
ktn_iz = np.searchsorted(gz.zaxis.values, ktn.zaxis.values)
co2_height = Height(gz.get(i_zaxis=co2_iz))
ktn_height = Height(gz.get(i_zaxis=ktn_iz))
ktn = ktn.replace_axes(zaxis=ktn_height)
co2 = co2.replace_axes(zaxis=co2_height)
pbl = extract_timeseries(dataset,'PBL_height','m',lat,lon,location)
# Adjust pbl to use the same height units for plotting.
pbl *= Height.plotatts.get('scalefactor',1)
axis = pl.subplot(3,n,0*n+i+1)
plotvar(ktn(z=(0,10000)), ax=axis, title='%s KTN (%s)'%(location,dataset.name), clevs=get_contours(ktn_min,ktn_max))
plotvar(pbl, color='white', ax=axis, hold=True)
axis = pl.subplot(3,n,1*n+i+1)
plotvar(co2(z=(0,10000)), ax=axis, title='%s CO2 (%s)'%(location,dataset.name), clevs=get_contours(co2_min,co2_max))
plotvar(pbl, color='white', ax=axis, hold=True)
axis = pl.subplot(3,n,2*n+i+1)
plotvar(co2(z=0), color='blue', ax=axis, title='%s CO2 (%s)'%(location,dataset.name))
plotvar(co2_obs, color='green', ax=axis, hold=True)
axis.set_ylim([co2_sfc_min,co2_sfc_max])
axis.legend ([dataset.title, 'Obs'])
outfile = outdir+"/%s_%s_diffcheck.png"%('_'.join(m.name for m in models),location)
if not exists(outfile):
fig.savefig(outfile)
pl.close(fig)
|
neishm/EC-CAS-diags
|
eccas_diags/diagnostics/shortexper_diffcheck.py
|
Python
|
lgpl-3.0
| 5,308
|
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from .geostrophic_problem import GeostrophicProblem
from .geostrophic_reduced_problem import GeostrophicReducedProblem
from .geostrophic_pod_galerkin_reduced_problem import GeostrophicPODGalerkinReducedProblem
__all__ = [
"GeostrophicProblem",
"GeostrophicReducedProblem",
"GeostrophicPODGalerkinReducedProblem"
]
|
mathLab/RBniCS
|
tutorials/11_quasi_geostrophic/problems/__init__.py
|
Python
|
lgpl-3.0
| 456
|
############################ Copyrights and license ############################
# #
# Copyright 2019 Nick Campbell <nicholas.j.campbell@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
import github.NamedUser
import github.TimelineEventSource
class TimelineEvent(github.GithubObject.NonCompletableGithubObject):
"""
This class represents IssueTimelineEvents. The reference can be found here https://developer.github.com/v3/issues/timeline/
"""
def __repr__(self):
return self.get__repr__({"id": self._id.value})
@property
def actor(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
return self._actor.value
@property
def commit_id(self):
"""
:type: string
"""
return self._commit_id.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
return self._created_at.value
@property
def event(self):
"""
:type: string
"""
return self._event.value
@property
def id(self):
"""
:type: integer
"""
return self._id.value
@property
def node_id(self):
"""
:type: string
"""
return self._node_id.value
@property
def commit_url(self):
"""
:type: string
"""
return self._commit_url.value
@property
def source(self):
"""
:type: :class:`github.TimelineEventSource.TimelineEventSource`
"""
# only available on `cross-referenced` events.
if (
self.event == "cross-referenced"
and self._source is not github.GithubObject.NotSet
):
return self._source.value
return None
@property
def body(self):
"""
:type string
"""
if self.event == "commented" and self._body is not github.GithubObject.NotSet:
return self._body.value
return None
@property
def author_association(self):
"""
:type string
"""
if (
self.event == "commented"
and self._author_association is not github.GithubObject.NotSet
):
return self._author_association.value
return None
@property
def url(self):
"""
:type: string
"""
return self._url.value
def _initAttributes(self):
self._actor = github.GithubObject.NotSet
self._commit_id = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._event = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._node_id = github.GithubObject.NotSet
self._commit_url = github.GithubObject.NotSet
self._source = github.GithubObject.NotSet
self._body = github.GithubObject.NotSet
self._author_association = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "actor" in attributes: # pragma no branch
self._actor = self._makeClassAttribute(
github.NamedUser.NamedUser, attributes["actor"]
)
if "commit_id" in attributes: # pragma no branch
self._commit_id = self._makeStringAttribute(attributes["commit_id"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "event" in attributes: # pragma no branch
self._event = self._makeStringAttribute(attributes["event"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "node_id" in attributes: # pragma no branch
self._node_id = self._makeStringAttribute(attributes["node_id"])
if "commit_url" in attributes: # pragma no branch
self._commit_url = self._makeStringAttribute(attributes["commit_url"])
if "source" in attributes: # pragma no branch
self._source = self._makeClassAttribute(
github.TimelineEventSource.TimelineEventSource, attributes["source"]
)
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "author_association" in attributes: # pragma no branch
self._author_association = self._makeStringAttribute(
attributes["author_association"]
)
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
|
ahmad88me/PyGithub
|
github/TimelineEvent.py
|
Python
|
lgpl-3.0
| 6,237
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
from numpy import *
from os import system
import time
import sys
x = ssym("x",3,4)
f1 = SXFunction([x],[sin(x)])
f1.init()
a = msym("A",3,4)
b = msym("b",4,1)
[z] = f1.call([a])
c = mul(z,b)
c = 2 + c
f = MXFunction([a,b],[c])
f.init()
f.generateCode("f_mx.c")
system("gcc -fPIC -shared f_mx.c -o f_mx.so")
ef = ExternalFunction("./f_mx.so")
ef.init()
a_val = array([[1,2,3,3],[2,3,4,5],[3,4,5,6]])
b_val = array([7,6,5,4])
f.setInput(a_val,0);
f.setInput(b_val,1);
f.evaluate()
print f.getOutput()
ef.setInput(a_val,0);
ef.setInput(b_val,1);
ef.evaluate()
print f.getOutput()
|
ghorn/debian-casadi
|
experimental/joel/mx_codegen.py
|
Python
|
lgpl-3.0
| 1,646
|
#!/usr/bin/env python
# encoding: utf-8
import sys
from messages import notice, error
def options():
arq_input, arq_path = define_input()
output_choice, output = define_output()
return (arq_input, arq_path, output_choice, output)
def read_input(arq_input, arq_path):
if arq_input:
try:
fp = open(arq_path, "r")
except IOError:
sys.stderr.write(error["file missing"])
sys.exit(1)
strings = fp.readlines()
fp.close()
else:
sys.stdout.write(notice["input instructions"])
strings = []
line = sys.stdin.readline()
while line != "\n":
strings.append(line)
line = sys.stdin.readline()
return strings
def define_input():
arq_input = 0
while arq_input < 1 or arq_input > 2:
sys.stdout.write(notice["input location"])
try:
arq_input = int(sys.stdin.readline().strip())
except ValueError:
pass
if arq_input == 2:
arq_input = True
sys.stdout.write("\n\nDigite o caminho para o arquivo:\n> ")
arq_path = sys.stdin.readline().strip()
else:
arq_input = False
arq_path = None
return arq_input, arq_path
def define_output():
"""
Define se a saída do programa será em um arquivo ou na saída padrão.
Caso seja escolhido um arquivo, devolve um ponteiro para o mesmo.
"""
output_choice = 0
while output_choice < 1 or output_choice > 2:
sys.stdout.write(notice["output location"])
try:
output_choice = int(sys.stdin.readline().strip())
except ValueError:
output_choice = 0
if output_choice == 1:
output = sys.stdout
elif output_choice == 2:
sys.stdout.write("\n\nDigite o nome do arquivo de saída\n> ")
output = open(sys.stdin.readline().strip(), "w")
return output_choice, output
|
netcriptus/Canivete
|
lib/IOHandler.py
|
Python
|
lgpl-3.0
| 1,776
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016, Maximilian Köhl <mail@koehlma.de>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from .. import base, common, error, handle
from ..library import lib
@base.handle_callback('uv_prepare_cb')
def uv_prepare_cb(prepare_handle):
"""
:type prepare_handle:
uv.Prepare
"""
prepare_handle.on_prepare(prepare_handle)
@handle.HandleTypes.PREPARE
class Prepare(handle.UVHandle):
"""
Prepare handles will run the given callback once per loop
iteration, right before polling for IO.
:raises uv.UVError:
error while initializing the handle
:param loop:
event loop the handle should run on
:param on_prepare:
callback which should run right before polling for IO after the
handle has been started
:type loop:
uv.Loop
:type on_prepare:
((uv.Prepare) -> None) | ((Any, uv.Prepare) -> None)
"""
__slots__ = ['uv_prepare', 'on_prepare']
uv_handle_type = 'uv_prepare_t*'
uv_handle_init = lib.uv_prepare_init
def __init__(self, loop=None, on_prepare=None):
super(Prepare, self).__init__(loop)
self.uv_prepare = self.base_handle.uv_object
self.on_prepare = on_prepare or common.dummy_callback
"""
Callback which should run right before polling for IO after the
handle has been started.
.. function:: on_prepare(prepare_handle)
:param prepare_handle:
handle the call originates from
:type prepare_handle:
uv.Prepare
:readonly:
False
:type:
((uv.Prepare) -> None) | ((Any, uv.Prepare) -> None)
"""
def start(self, on_prepare=None):
"""
Start the handle. The callback will run once per loop iteration
right before polling for IO from now on.
:raises uv.UVError:
error while starting the handle
:raises uv.ClosedHandleError:
handle has already been closed or is closing
:param on_prepare:
callback which should run right before polling for IO
(overrides the current callback if specified)
:type on_prepare:
((uv.Prepare) -> None) | ((Any, uv.Prepare) -> None)
"""
if self.closing:
raise error.ClosedHandleError()
self.on_prepare = on_prepare or self.on_prepare
code = lib.uv_prepare_start(self.uv_prepare, uv_prepare_cb)
if code != error.StatusCodes.SUCCESS:
raise error.UVError(code)
self.set_pending()
def stop(self):
"""
Stop the handle. The callback will no longer be called.
:raises uv.UVError:
error while stopping the handle
"""
if self.closing:
return
code = lib.uv_prepare_stop(self.uv_prepare)
if code != error.StatusCodes.SUCCESS:
raise error.UVError(code)
self.clear_pending()
__call__ = start
|
koehlma/uv
|
uv/handles/prepare.py
|
Python
|
lgpl-3.0
| 3,660
|
from ajenti.api import *
from ajenti.ui import *
class TutorialPlugin(CategoryPlugin):
text = 'Tutorial' # name for the left pane
icon = '/dl/tutorial/icon.png'
folder = 'apps'
def get_ui(self):
ui = self.app.inflate('tutorial:main')
return ui
|
DmZ/ajenti
|
plugins/tutorial/main.py
|
Python
|
lgpl-3.0
| 283
|
## script to convert a lookup table of unicode characters from a javascript thing I found on
## github into a lookup table that can be stored in the flash on the oggbox
##
## https://github.com/yvg/js-replace-diacritics/blob/master/replace-diacritics.js
##
fr = open("diacritical_marks_js.txt", "rb")
d = fr.read()
fr.close()
fw = open("unicode_table.c", "wb")
d = d.splitlines()
cs = {}
for line in d:
k,v = line.strip().split(":")
if k.strip("\'") == "":
k = "\x00"
v = v.strip("/[]ig,\\u")
v = v.split("\\u")
v = map(lambda x: int(x,16), v)
for c in v:
if(c > 127):
cs[c] = k
keys = cs.keys()
keys.sort()
fw.write("#include \"unicode_table.h\"\n")
fw.write("\n")
fw.write("const int unicode_table_len = %d;\n" % len(keys))
fw.write("\n")
fw.write("const struct unicode_item unicode_table[] = {\n")
for k in keys:
if len(cs[k]) == 1:
fw.write(" {%d, \"\\x%02x\"},\n" % (k, ord(cs[k])))
else:
fw.write(" {%d, \"\\x%02x\\x%02x\"},\n" % (k, ord(cs[k][0]), ord(cs[k][1])))
print "Total characters:", len(cs)
print "Highest value:", max(cs.keys())
fw.write("};\n")
fw.close()
|
hairymnstr/oggbox
|
font/convert_dict_to_c.py
|
Python
|
lgpl-3.0
| 1,124
|
# -*- coding: utf-8 -*-
# Python stdlib
import unittest
# py.test
import pytest
# Python tfstate
from tfstate.provider.aws import AwsResource, AwsSecurityGroupResource
from tfstate.exceptions import InvalidResource
# Unit tests
from unit_tests.base import BaseResourceUnitTest
@pytest.mark.provider_aws
class AwsSecurityGroupResourceUnitTest(BaseResourceUnitTest):
def test_object_constructor(self):
self.load_example_json('aws/aws_security_group/aws_security_group_example.json')
resource_name, resource_data = self.example_data.popitem()
sg_resource = AwsSecurityGroupResource(resource_name, resource_data)
self.assertIsInstance(
sg_resource, AwsResource, "AwsSecurityGroupResource object does not inherit from AwsResource")
self.assertEqual(
sg_resource.resource_type, "aws_security_group", "Resource type is not aws_security_group")
# Attribute checks
native_primary = sg_resource.primary_data
native_attributes = native_primary['attributes']
self.assertEqual(sg_resource.id, native_primary['id'], "Resource ID does not match")
self.assertEqual(
sg_resource.description, native_attributes['description'], "Resource description does not match")
self.assertEqual(sg_resource.name, native_attributes['name'], "Resource name does not match")
self.assertEqual(sg_resource.owner_id, native_attributes['owner_id'], "Resource owner_id does not match")
self.assertEqual(sg_resource.vpc_id, native_attributes['vpc_id'], "Resource vpc_id does not match")
# Tags checking
self.check_tags(sg_resource, native_attributes)
# Egress rules
self.assertTrue(hasattr(sg_resource, "egress"), "Resource egress does not exist")
egress_count = int(native_attributes['egress.#'])
self.assertEqual(len(sg_resource.egress), egress_count, 'Resource egress count does not match')
# Ingress rules
self.assertTrue(hasattr(sg_resource, "ingress"), "Resource ingress does not exist")
ingress_count = int(native_attributes['ingress.#'])
self.assertEqual(len(sg_resource.ingress), ingress_count, 'Resource ingress count does not match')
def test_object_constructor_invalid_type(self):
self.load_example_json(
'aws/aws_security_group/aws_security_group_example_invalid_type.json')
resource_name, resource_data = self.example_data.popitem()
with self.assertRaises(InvalidResource):
AwsSecurityGroupResource(resource_name, resource_data)
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(AwsSecurityGroupResourceUnitTest))
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
rodynnz/python-tfstate
|
unit_tests/test_tfstate/test_provider/test_aws/test_aws_security_group.py
|
Python
|
lgpl-3.0
| 2,851
|
import os
from ajenti.com import *
from ajenti.utils import *
def is_installed():
return os.path.exists('/etc/samba/')
def restart():
shell('service smbd restart')
shell('service samba restart') # older samba packages
class SambaConfig(Plugin):
shares = {}
general = {}
users = {}
general_defaults = {
'server string': '',
'workgroup': 'WORKGROUP',
'interfaces': '',
'socket options': 'TCP_NODELAY',
'password server': '',
'security': 'user'
}
defaults = {
'available': 'yes',
'browseable': 'yes',
'valid users': '',
'path': '/dev/null',
'read only': 'yes',
'guest ok': 'yes',
'guest only': 'no'
}
editable = {
'Account Flags': '-c',
'User SID': '-U',
'Primary Group SID': '-G',
'Full Name': '-f',
'Home Directory': '-h',
'HomeDir Drive': '-D',
'Logon Script': '-S',
'Profile Path': '-p',
'Kickoff time': '-K'
}
fields = []
def load(self):
self.shares = {}
ss = open('/etc/samba/smb.conf', 'r').read().split('\n')
cs = ''
for s in ss:
s = s.strip()
try:
if s[0] != '#' and s[0] != ';':
if s[0] == '[':
cs = s[1:-1]
self.shares[cs] = self.new_share() if cs != 'global' else self.general_defaults.copy()
else:
s = s.split('=')
self.shares[cs][s[0].strip()] = s[1].strip()
except:
pass
self.general = self.shares['global']
self.shares.pop('global')
self.users = {}
ss = [s.split(',')[0].split(':')[0] for s in shell('pdbedit -L').split('\n')]
for s in ss:
if s != '':
x = shell('pdbedit -L -v -u ' + s).split('\n')
self.users[s] = {}
self.fields = []
for l in x:
try:
self.users[s][l.split(':')[0]] = l.split(':')[1].strip()
self.fields.append(l.split(':')[0])
except:
pass
def save(self):
with open('/etc/samba/smb.conf', 'w') as f:
f.write('[global]\n')
for k in self.general:
if not k in self.general_defaults or \
self.general[k] != self.general_defaults[k]:
f.write('\t%s = %s\n' % (k,self.general[k]))
for s in self.shares:
f.write('\n[%s]\n' % s)
for k in self.shares[s]:
if not k in self.defaults or self.shares[s][k] != self.defaults[k]:
f.write('\t%s = %s\n' % (k,self.shares[s][k]))
def modify_user(self, u, p, v):
shell('pdbedit -r -u %s %s "%s"' % (u,self.editable[p],v))
def del_user(self, u):
shell('pdbedit -x -u ' + u)
def add_user(self, u):
with open('/tmp/pdbeditnn', 'w') as f:
f.write('\n\n\n')
shell('pdbedit -a -t -u ' + u + ' < /tmp/pdbeditnn')
os.unlink('/tmp/pdbeditnn')
def get_shares(self):
return self.shares.keys()
def new_share(self):
return self.defaults.copy()
def set_param(self, share, param, value):
if share == 'general':
self.general[param] = value
else:
self.shares[share][param] = value
def set_param_from_vars(self, share, param, vars):
if share == 'general':
value = vars.getvalue(param, self.general_defaults[param])
else:
value = vars.getvalue(param, self.defaults[param])
self.set_param(share, param, value)
def set_param_from_vars_yn(self, share, param, vars):
if share == 'general':
value = 'yes' if vars.getvalue(param, self.general_defaults[param]) == '1' else 'no'
else:
value = 'yes' if vars.getvalue(param, self.defaults[param]) == '1' else 'no'
self.set_param(share, param, value)
|
DmZ/ajenti
|
plugins/samba/backend.py
|
Python
|
lgpl-3.0
| 4,175
|
# -*- coding: utf-8 -*-
#
# Zaguan documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 21 15:51:32 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import absolute_import
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../zaguan'))
sys.path.insert(0, os.path.abspath('../../zaguan/examples/colors'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.todo'
]
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = True
napoleon_use_ivar = True
intersphinx_mapping = {
'https://docs.python.org/3.7/': None,
'https://lazka.github.io/pgi-docs/Gtk-3.0/': None,
'https://lazka.github.io/pgi-docs/WebKit2-4.0/': None,
'https://lazka.github.io/pgi-docs/GLib-2.0/': None
}
autodoc_mock_imports = ['gi']
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zaguan'
copyright = u'2013-2015, Grupo MSA S.A.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.2.0'
# The full version, including alpha/beta/rc tags.
release = '3.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
todo_include_todos = False
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Zaguandoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Zaguan.tex', u'Zaguan Documentation',
u'Msa Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zaguan', u'Zaguan Documentation',
[u'Msa Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Zaguan', u'Zaguan Documentation',
u'Msa Team', 'Zaguan', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
from sphinx.util.docfields import TypedField
from docutils import nodes
from sphinx import addnodes
def overrided_make_field(self,
types, # type: Dict[unicode, List[nodes.Node]]
domain, # type: unicode
items, # type: Tuple
env=None, # type: BuildEnvironment
):
# type: (...) -> nodes.field
def handle_item(fieldarg, content):
# type: (unicode, unicode) -> nodes.paragraph
par = nodes.paragraph()
par += addnodes.literal_strong('', fieldarg) # Patch: this line added
#par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
# addnodes.literal_strong, env=env))
if fieldarg in types:
par += nodes.Text(' (')
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = u''.join(n.astext() for n in fieldtype)
par.extend(self.make_xrefs(self.typerolename, domain, typename,
addnodes.literal_emphasis, env=env))
else:
par += fieldtype
par += nodes.Text(')')
par += nodes.Text(' -- ')
par += content
return par
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
bodynode = handle_item(fieldarg, content)
else:
bodynode = self.list_type()
for fieldarg, content in items:
bodynode += nodes.list_item('', handle_item(fieldarg, content))
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody)
TypedField.make_field = overrided_make_field
|
MSA-Argentina/zaguan
|
zaguan/docs/conf.py
|
Python
|
lgpl-3.0
| 10,456
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from datetime import datetime
from nose.tools import assert_equal
import leancloud
from leancloud import Conversation
from leancloud import SysMessage
def setup():
leancloud.client.USE_MASTER_KEY = None
leancloud.client.APP_ID = None
leancloud.client.APP_KEY = None
leancloud.client.MASTER_KEY = None
leancloud.init(os.environ["APP_ID"], master_key=os.environ["MASTER_KEY"])
def test_sys_message():
conv = Conversation("testConversation", is_system=True)
conv.save()
msg = SysMessage()
msg.set("conv", conv)
msg.set("bin", False)
msg.set("msgId", "testmsgid")
msg.set("from", "testfromclient")
msg.set("fromIp", "0.0.0.0")
msg.set("data", '{"_lctext":"test!","_lctype":-1}')
msg.set("timestamp", 1503908409224)
msg.set("ackAt", 1503908409237)
msg.save()
savedMsg = SysMessage.query.get(msg.id)
assert_equal(msg.conversation.id, savedMsg.conversation.id)
assert_equal(msg.message_id, savedMsg.message_id)
assert_equal(msg.from_client, savedMsg.from_client)
assert_equal(msg.from_ip, savedMsg.from_ip)
assert_equal(msg.data, savedMsg.data)
assert_equal(type(savedMsg.message_created_at), datetime)
assert_equal(type(savedMsg.ack_at), datetime)
msg.destroy()
|
leancloud/python-sdk
|
tests/test_sys_message.py
|
Python
|
lgpl-3.0
| 1,405
|
# -*- coding: utf-8 -*-
# Copyright(C) 2015 Baptiste Delpey
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from weboob.exceptions import BrowserIncorrectPassword, BrowserPasswordExpired
from weboob.browser import LoginBrowser, URL, need_login
from weboob.browser.switch import SiteSwitch
from weboob.tools.capabilities.bank.transactions import sorted_transactions
from .pages import LoginPage, ErrorPage, AccountsPage, TransactionsPage, \
TiCardPage, TiHistoPage, ComingPage, HistoPage, HomePage
class BnpcartesentrepriseBrowser(LoginBrowser):
BASEURL = 'https://www.cartesentreprise.bnpparibas.com'
login = URL('/ce_internet_public/seConnecter.builder.do', LoginPage)
error = URL('.*.seConnecter.event.do',
'.*.compteGestChgPWD.builder.do',
'/ce_internet_prive_ti/compteTituChgPWD.builder.do',
r'/ce_internet_corporate_ti/compteTituChgPWDCorporate.builder.do',
ErrorPage)
home = URL('/ce_internet_prive_ge/accueilInternetGe.builder.do',
'/ce_internet_(prive|corporate)_ti/accueilInternetTi(Corporate)?.builder.do', HomePage)
accounts = URL('/ce_internet_prive_ge/carteAffaireParc.builder.do',
'/ce_internet_prive_ge/carteAffaireParcChange.event.do',
'/ce_internet_prive_ge/pageParcCarteAffaire.event.do', AccountsPage)
coming = URL('/ce_internet_prive_ge/operationEnCours.builder.do',
'/ce_internet_prive_ge/operationEnCours.event.do', ComingPage)
history = URL('/ce_internet_prive_ge/operationHisto.builder.do',
'/ce_internet_prive_ge/operationHisto.event.do', HistoPage)
transactions = URL('ce_internet_prive_ge/operationEnCoursDetail.builder.do.*',
'ce_internet_prive_ge/pageOperationEnCoursDetail.event.do.*',
'ce_internet_prive_ge/operationHistoDetail.builder.do.*',
'ce_internet_prive_ge/pageOperationHistoDetail.event.do.*',
TransactionsPage)
ti_card = URL('/ce_internet_prive_ti/operationEnCoursDetail.builder.do',
'/ce_internet_(prive|corporate)_ti/operation(Corporate)?EnCoursDetail(Afficher|Appliquer)?.event.do.*',
'/ce_internet_prive_ti/pageOperationEnCoursDetail.event.do.*', TiCardPage)
ti_corporate_card = URL('/ce_internet_corporate_ti/operationCorporateEnCoursDetail.builder.do', TiCardPage)
ti_histo = URL('/ce_internet_prive_ti/operationHistoDetail.builder.do',
'/ce_internet_(prive|corporate)_ti/operation(Corporate)?HistoDetail(Afficher|Appliquer)?.event.do.*',
'/ce_internet_prive_ti/pageOperationHistoDetail.event.do.*', TiHistoPage)
ti_corporate_histo = URL('/ce_internet_corporate_ti/operationCorporateHistoDetail.builder.do', TiHistoPage)
TIMEOUT = 60.0
def __init__(self, type, *args, **kwargs):
super(BnpcartesentrepriseBrowser, self).__init__(*args, **kwargs)
self.type = type
self.is_corporate = False
self.transactions_dict = {}
def do_login(self):
assert isinstance(self.username, basestring)
assert isinstance(self.password, basestring)
self.login.stay_or_go()
assert self.login.is_here()
self.page.login(self.type, self.username, self.password)
if self.error.is_here() or self.page.is_error():
raise BrowserIncorrectPassword()
if self.type == '2' and self.page.is_corporate():
raise SiteSwitch('corporate')
# ti corporate and ge corporate are not detected the same way ..
if 'corporate' in self.page.url:
self.is_corporate = True
def ti_card_go(self):
if self.is_corporate:
self.ti_corporate_card.go()
else:
self.ti_card.go()
def ti_histo_go(self):
if self.is_corporate:
self.ti_corporate_histo.go()
else:
self.ti_histo.go()
@need_login
def iter_accounts(self):
if self.type == '1':
self.ti_card_go()
elif self.type == '2':
self.accounts.go()
if self.error.is_here():
raise BrowserPasswordExpired()
if self.type == '1':
for account in self.page.iter_accounts(rib=None):
self.page.expand(account=account)
account.coming = self.page.get_balance()
yield account
if self.type == '2':
for company in self.page.get_companies():
self.accounts.stay_or_go()
self.page.expand(company=company)
for rib in self.page.get_rib_list():
self.page.expand(rib=rib, company=company)
accounts = list(self.page.iter_accounts(rib=rib, company=company))
ids = {}
prev_rib = None
for account in accounts:
if account.id in ids:
self.logger.warning('duplicate account %r', account.id)
account.id += '_%s' % ''.join(account.label.split())
if prev_rib != account._rib:
self.coming.go()
self.page.expand(rib=account._rib, company=account._company)
account.coming = self.page.get_balance(account)
prev_rib = account._rib
ids[account.id] = account
yield account
# Could be the very same as non corporate but this shitty website seems
# completely bugged
def get_ti_corporate_transactions(self, account):
if account.id not in self.transactions_dict:
self.transactions_dict[account.id] = []
self.ti_histo_go()
self.page.expand(self.page.get_periods()[0], account=account, company=account._company)
for tr in sorted_transactions(self.page.get_history()):
self.transactions_dict[account.id].append(tr)
return self.transactions_dict[account.id]
def get_ti_transactions(self, account):
self.ti_card_go()
self.page.expand(account=account, company=account._company)
for tr in sorted_transactions(self.page.get_history()):
yield tr
self.ti_histo_go()
self.page.expand(self.page.get_periods()[0], account=account, company=account._company)
for period in self.page.get_periods():
self.page.expand(period, account=account, company=account._company)
for tr in sorted_transactions(self.page.get_history()):
yield tr
def get_ge_transactions(self, account):
transactions = []
self.coming.go()
self.page.expand(account=account, rib=account._rib, company=account._company)
link = self.page.get_link(account)
if link:
self.location(link)
transactions += self.page.get_history()
self.history.go()
for period in self.page.get_periods():
self.page.expand(period, rib=account._rib, company=account._company, account=account)
link = self.page.get_link(account)
if link:
self.location(link)
transactions += self.page.get_history()
self.history.go()
return sorted_transactions(transactions)
@need_login
def get_transactions(self, account):
if self.type == '1':
if self.is_corporate:
return self.get_ti_corporate_transactions(account)
return self.get_ti_transactions(account)
return self.get_ge_transactions(account)
|
vicnet/weboob
|
modules/bnpcards/browser.py
|
Python
|
lgpl-3.0
| 8,411
|
# Ifc4D - IFC scheduling utility
# Copyright (C) 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is part of Ifc4D.
#
# Ifc4D is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ifc4D is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Ifc4D. If not, see <http://www.gnu.org/licenses/>.
import datetime
from datetime import timedelta
import ifcopenshell
import ifcopenshell.api
import ifcopenshell.util.date
import xml.etree.ElementTree as ET
class MSP2Ifc:
def __init__(self):
self.xml = None
self.file = None
self.ns = None
self.work_plan = None
self.project = {}
self.calendars = {}
self.tasks = {}
self.resources = {}
self.RESOURCE_TYPES_MAPPING = {"1": "LABOR", "0": "MATERIAL", "2": None}
def execute(self):
self.parse_xml()
self.create_ifc()
def parse_xml(self):
tree = ET.parse(self.xml)
project = tree.getroot()
self.ns = {"pr": project.tag[1:].partition("}")[0]}
self.project["Name"] = project.findtext("pr:Name", namespaces=self.ns) or "Unnamed"
self.project["CalendarUID"] = project.findtext("pr:CalendarUID", namespaces=self.ns) or None
self.project["MinutesPerDay"] = project.findtext("pr:MinutesPerDay", namespaces=self.ns) or None
self.outline_level = 0
self.outline_parents = {}
self.parse_task_xml(project)
self.parse_calendar_xml(project)
# TODO Doesn't do anything right now
# self.parse_resources_xml(project)
def parse_relationship_xml(self, task):
relationships = {}
id = 0
if task.findall("pr:PredecessorLink", self.ns):
for relationship in task.findall("pr:PredecessorLink", self.ns):
relationships[id] = {
"PredecessorTask": relationship.find("pr:PredecessorUID", self.ns).text,
"Type": relationship.find("pr:Type", self.ns).text,
}
id += 1
return relationships
def parse_task_xml(self, project):
if self.project["MinutesPerDay"]:
hours_per_day = int(self.project["MinutesPerDay"]) / 60
else:
hours_per_day = 8
for task in project.find("pr:Tasks", self.ns):
task_id = task.find("pr:UID", self.ns).text
task_index_level = task.find("pr:OutlineLevel", self.ns).text
wbs_id = task.find("pr:WBS", self.ns).text
relationships = self.parse_relationship_xml(task)
outline_level = int(task.find("pr:OutlineLevel", self.ns).text)
if outline_level != 0:
parent_task = self.tasks[self.outline_parents[outline_level - 1]]
parent_task["subtasks"].append(task_id)
self.outline_level = outline_level
self.outline_parents[outline_level] = task_id
# Microsoft Project stores durations in terms of hours.
duration = ifcopenshell.util.date.ifc2datetime(task.find("pr:Duration", self.ns).text)
hours = duration.days * 24
hours += duration.seconds / 60 / 60
# Let's convert it into days, where days is the appropriate hours per day
duration = timedelta(days=hours / float(hours_per_day))
self.tasks[task_id] = {
"Name": task.find("pr:Name", self.ns).text,
"OutlineNumber": task.find("pr:OutlineNumber", self.ns).text,
"OutlineLevel": outline_level,
"Start": datetime.datetime.fromisoformat(task.find("pr:Start", self.ns).text),
"Finish": datetime.datetime.fromisoformat(task.find("pr:Finish", self.ns).text),
"Duration": duration,
"Priority": task.find("pr:Priority", self.ns).text,
"CalendarUID": task.find("pr:CalendarUID", self.ns).text,
"PredecessorTasks": relationships if relationships else None,
"subtasks": [],
"ifc": None,
}
def parse_calendar_xml(self, project):
for calendar in project.find("pr:Calendars", self.ns).findall("pr:Calendar", self.ns):
calendar_id = calendar.find("pr:UID", self.ns).text
week_days = []
for week_day in calendar.find("pr:WeekDays", self.ns).findall("pr:WeekDay", self.ns):
working_times = []
if week_day.find("pr:WorkingTimes", self.ns):
for working_time in week_day.find("pr:WorkingTimes", self.ns).findall("pr:WorkingTime", self.ns):
if working_time.find("pr:FromTime", self.ns) is None:
continue
working_times.append(
{
"Start": datetime.time.fromisoformat(working_time.find("pr:FromTime", self.ns).text),
"Finish": datetime.time.fromisoformat(working_time.find("pr:ToTime", self.ns).text),
}
)
week_days.append(
{
"DayType": week_day.find("pr:DayType", self.ns).text,
"WorkingTimes": working_times,
"ifc": None,
}
)
exceptions = {}
self.calendars[calendar_id] = {
"Name": calendar.find("pr:Name", self.ns).text,
"StandardWorkWeek": week_days,
}
def create_ifc(self):
if not self.file:
self.create_boilerplate_ifc()
if not self.work_plan:
self.work_plan = ifcopenshell.api.run("sequence.add_work_plan", self.file)
work_schedule = self.create_work_schedule()
self.create_calendars()
self.create_tasks(work_schedule)
self.create_rel_sequences()
def create_boilerplate_ifc(self):
self.file = ifcopenshell.file(schema="IFC4")
self.work_plan = self.file.create_entity("IfcWorkPlan")
def create_tasks(self, work_schedule):
for task_id in self.tasks:
task = self.tasks[task_id]
if task["OutlineLevel"] == 0:
self.create_task(task, work_schedule=work_schedule)
def create_work_schedule(self):
return ifcopenshell.api.run(
"sequence.add_work_schedule", self.file, name=self.project["Name"], work_plan=self.work_plan
)
def create_calendars(self):
for calendar in self.calendars.values():
calendar["ifc"] = ifcopenshell.api.run("sequence.add_work_calendar", self.file, name=calendar["Name"])
self.process_working_week(calendar["StandardWorkWeek"], calendar["ifc"])
def create_task(self, task, work_schedule=None, parent_task=None):
task["ifc"] = ifcopenshell.api.run(
"sequence.add_task",
self.file,
work_schedule=work_schedule if work_schedule else None,
parent_task=parent_task["ifc"] if parent_task else None,
)
calendar = None
if task["CalendarUID"] != "-1":
calendar = self.calendars[task["CalendarUID"]]["ifc"]
elif not parent_task and self.project["CalendarUID"]:
calendar = self.calendars[self.project["CalendarUID"]]["ifc"]
if calendar:
ifcopenshell.api.run(
"control.assign_control",
self.file,
**{
"relating_control": calendar,
"related_object": task["ifc"],
},
)
ifcopenshell.api.run(
"sequence.edit_task",
self.file,
task=task["ifc"],
attributes={
"Name": task["Name"],
"Identification": task["OutlineNumber"],
"IsMilestone": task["Start"] == task["Finish"],
},
)
task_time = ifcopenshell.api.run("sequence.add_task_time", self.file, task=task["ifc"])
ifcopenshell.api.run(
"sequence.edit_task_time",
self.file,
task_time=task_time,
attributes={
"ScheduleStart": task["Start"],
"ScheduleFinish": task["Finish"],
"DurationType": "WORKTIME" if task["Duration"] else None,
"ScheduleDuration": task["Duration"] if task["Duration"] else None,
},
)
for subtask_id in task["subtasks"]:
self.create_task(self.tasks[subtask_id], parent_task=task)
def process_working_week(self, week, calendar):
day_map = {
"1": 7, # Sunday
"2": 1, # Monday
"3": 2, # Tuesday
"4": 3, # Wednesday
"5": 4, # Thursday
"6": 5, # Friday
"7": 6, # Saturday
}
for day in week:
if day["ifc"]:
continue
day["ifc"] = ifcopenshell.api.run(
"sequence.add_work_time", self.file, work_calendar=calendar, time_type="WorkingTimes"
)
weekday_component = [day_map[day["DayType"]]]
for day2 in week:
if day["DayType"] == day2["DayType"]:
continue
if day["WorkingTimes"] == day2["WorkingTimes"]:
weekday_component.append(day_map[day2["DayType"]])
# Don't process the next day, as we can group it
day2["ifc"] = day["ifc"]
work_time_name = "Weekdays: {}".format(", ".join([str(c) for c in sorted(weekday_component)]))
ifcopenshell.api.run(
"sequence.edit_work_time",
self.file,
work_time=day["ifc"],
attributes={"Name": work_time_name},
)
recurrence = ifcopenshell.api.run(
"sequence.assign_recurrence_pattern", self.file, parent=day["ifc"], recurrence_type="WEEKLY"
)
ifcopenshell.api.run(
"sequence.edit_recurrence_pattern",
self.file,
recurrence_pattern=recurrence,
attributes={"WeekdayComponent": weekday_component},
)
for work_time in day["WorkingTimes"]:
ifcopenshell.api.run(
"sequence.add_time_period",
self.file,
recurrence_pattern=recurrence,
start_time=work_time["Start"],
end_time=work_time["Finish"],
)
def create_rel_sequences(self):
self.sequence_type_map = {
"0": "FINISH_FINISH",
"1": "FINISH_START",
"2": "START_FINISH",
"3": "START_START",
}
for task in self.tasks.values():
if not task["PredecessorTasks"]:
continue
for predecessor in task["PredecessorTasks"].values():
rel_sequence = ifcopenshell.api.run(
"sequence.assign_sequence",
self.file,
related_process=task["ifc"],
relating_process=self.tasks[predecessor["PredecessorTask"]]["ifc"],
)
if predecessor["Type"]:
ifcopenshell.api.run(
"sequence.edit_sequence",
self.file,
rel_sequence=rel_sequence,
attributes={"SequenceType": self.sequence_type_map[predecessor["Type"]]},
)
def parse_resources_xml(self, project):
resources_lst = project.find("pr:Resources", self.ns)
resources = resources_lst.findall("pr:Resource", self.ns)
# print("Resource text", resources[4].find("pr:Name", self.ns).text)
for resource in resources:
name = resource.find("pr:Name", self.ns)
id = resource.find("pr:ID", self.ns).text
if name is not None:
name = name.text
else:
# print("- No Name")
name = None
self.resources[id] = {
"Name": name,
"Code": resource.find("pr:UID", self.ns).text,
"ParentObjectId": None,
"Type": self.RESOURCE_TYPES_MAPPING[resource.find("pr:Type", self.ns).text],
"ifc": None,
"rel": None,
}
print("Resource found", self.resources)
|
IfcOpenShell/IfcOpenShell
|
src/ifc4d/ifc4d/msp2ifc.py
|
Python
|
lgpl-3.0
| 13,042
|
# Generated by Django 2.2.3 on 2019-09-06 12:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='order',
name='braintree_id',
field=models.CharField(blank=True, max_length=150),
),
]
|
ch1huizong/dj
|
onlineshop/myshop/orders/migrations/0002_order_braintree_id.py
|
Python
|
unlicense
| 390
|
from setuptools import setup, find_packages
setup\
( name = 'kanone'
, version = '0.4.6'
, description = 'a validation library'
, long_description = open('README.txt').read()
, author = 'don`catnip'
, url = 'http://github.com/doncatnip/kanone'
, classifiers =\
[ "Development Status :: 4 - Beta"
, "Topic :: Software Development :: Libraries :: Python Modules"
, "License :: Public Domain"
, "Programming Language :: Python :: 2.7"
, "Programming Language :: Python :: 3"
, 'Intended Audience :: Developers'
]
, license = 'Unlicense'
, keywords = 'validation library form twisted stateful'
, packages = find_packages('src')
, package_dir = {'':'src'}
, install_requires = [ ]
, namespace_packages = [ ]
, include_package_data = True
)
|
doncatnip/kanone
|
setup.py
|
Python
|
unlicense
| 853
|
from django.conf import settings
from django.views.generic.base import View
from restful.decorators import restful_view_templates
@restful_view_templates
class FaqView(View):
def get(self, request):
return {
"page": "inner about faq",
"projects": settings.FAKE_DB,
}
|
tochev/obshtestvo.bg
|
web/views/faq.py
|
Python
|
unlicense
| 313
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
# Creating reStructuredText Directives
# @see http://docutils.sourceforge.net/docs/howto/rst-directives.html
from docutils.parsers.rst import directives, Directive
from docutils import nodes
from pelican import signals
_CONTENT_PATH = None
_DEBUG = False
from os.path import basename
from os.path import join
ad1st = """
<!-- insert your 1st ad code here. Or leave it as it is. -->
"""
ad2nd = """
<!-- insert your 2nd ad code here. Or leave it as it is. -->
"""
class embed_adsense_code(Directive):
required_arguments = 1
has_content = False
def run(self):
sel = self.arguments[0].strip()
html = ""
if sel == "1":
html = ad1st
if sel == "2":
html = ad2nd
return [nodes.raw('', html, format='html')]
def init_adsense_plugin(pelican_obj):
global _CONTENT_PATH
if _CONTENT_PATH is None:
_CONTENT_PATH = pelican_obj.settings['PATH']
def register():
signals.get_generators.connect(init_adsense_plugin)
directives.register_directive('adsense', embed_adsense_code)
|
siongui/pelican-template
|
plugins/adsense/adsense.py
|
Python
|
unlicense
| 1,070
|
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field
class LacitySalariesItem(Item):
# define the fields for your item here like:
# name = Field()
department = Field()
position = Field()
employee = Field()
salary = Field()
|
dgeske/lacity-salaries
|
lacity_salaries/items.py
|
Python
|
unlicense
| 358
|
from importlib import import_module
from collapsar.const import CONST
from collapsar.exc import ImproperlyConfigured
from collapsar.config.scheme import Rel, InitArgs
__all__ = [
'ClassResolver',
'PropertiesResolver',
'ScopeResolver',
'FactoryResolver',
'BooleanResolver',
'InitArgsResolver',
]
class BaseResolver(object):
def resolve_import(self, source):
module_name, attr_names = source.split(':', 1)
obj = import_module(module_name)
attr_names_list = attr_names.split('.')
for attr_name in attr_names_list:
obj = getattr(obj, attr_name)
return obj
def resolve_rel(self, source):
rel_source = source['rel']
if isinstance(rel_source, dict):
return Rel(**rel_source)
else:
return Rel(rel_source)
def is_rel_source(self, source):
return isinstance(source, dict) and 'rel' in source
class ClassResolver(BaseResolver):
def resolve(self, source):
return self.resolve_import(source)
class PropertiesResolver(BaseResolver):
BUILDIN_TYPES = {
'int': int,
'long': long,
'string': str,
'unicode': unicode,
'bool': bool,
'set': set,
'frozenset': frozenset,
'dict': dict,
}
def __init__(self):
resolvers = {}
resolvers.update(self.BUILDIN_TYPES)
self.resolvers = resolvers
def resolve(self, source):
result = {}
for name, value in source.iteritems():
result[name] = self.resolve_property(value)
return result
def resolve_property(self, source):
if isinstance(source, dict):
if self.is_rel_source(source):
return self.resolve_rel(source)
else:
return self.resolve_plain_obj(source)
else:
return source
def resolve_plain_obj(self, source):
type_name = source['type']
value_sourve = source['value']
return self.get_resolver(type_name)(value_sourve)
def get_resolver(self, type_name):
return self.BUILDIN_TYPES[type_name]
class ScopeResolver(BaseResolver):
SKOPES = frozenset(CONST.SCOPE.values())
def resolve(self, source):
if source not in self.SKOPES:
raise ImproperlyConfigured('invalid scope %s' % source)
return source
class FactoryResolver(BaseResolver):
def resolve(self, source):
if self.is_rel_source(source):
return self.resolve_rel(source)
else:
return self.resolve_import(source)
class BooleanResolver(BaseResolver):
def resolve(self, source):
if not isinstance(source, bool):
raise ImproperlyConfigured('Must be boolean, not %r' % source)
return source
class InitArgsResolver(PropertiesResolver):
ARGS_KEY = 'args*'
def resolve(self, source):
if isinstance(source, (list, tuple)):
return InitArgs(args=self.resolve_list(source))
elif isinstance(source, dict):
init_args = InitArgs()
if self.ARGS_KEY in source:
init_args.args = self.resolve_list(source.pop(self.ARGS_KEY))
for name, value in source.iteritems():
init_args.kwargs[name] = self.resolve_property(value)
return init_args
else:
return InitArgs(args=[source])
def resolve_list(self, source):
return map(self.resolve_property, source)
|
minmax/collapsar
|
collapsar/config/resolvers.py
|
Python
|
unlicense
| 3,505
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ++++++++++++++++++++++++++++++++++++++++++++++
# Authors:
#
# Sebastian Spaar <spaar@stud.uni-heidelberg.de>
# Dennis Ulmer <d.ulmer@stud.uni-heidelberg.de>
#
# Project:
# CrOssinG (CompaRing Of AngliciSmS IN German)
#
# ++++++++++++++++++++++++++++++++++++++++++++++
"""anglicisms.py:
This module extracts anglicisms from the online wikipedia wictionary.
(Check it out here: https://www.wiktionary.org/)
The program looks into the html-code and checks if an anglicisms has an own
entry in the wictionary. If so, it will search in it for a
german translation.
"""
#-------------------------------- Imports -------------------------------------
import urllib2
import re
from bs4 import BeautifulSoup as BS
from scrn_out import w, wil, fl
#----------------------------- Main functions ---------------------------------
def getAnglicismsList(url):
"""Extracts a list of anglicisms from a wiktionary page."""
anglicisms_list_html = BS(urllib2.urlopen(url)) # Extract the html-code
# Extracting every relevant section from the html-code
sections = anglicisms_list_html.find_all("p")
wil("Extracting anglicisms from wictionary.", 30)
entries = [] # Array for anglicisms
for section in sections:
# The many variants of seperators
section_ = re.split("( - | – | -|- |– )", str(section))
for s in section_:
entries.append(s)
entries = entries[3:len(entries)-1] # Using only the relevant parts
fl()
wil("Extracting anglicisms from wictionary..")
for i in range(len(entries)-1, -1, -1):
if entries[i] in [" - ", "- ", " -", " – ", "– "]:
entries.pop(i) # Popping redundant matches
fl()
wil("Extracting anglicisms from wictionary...Complete!", 30, "\n")
return entries
def generateEntries(list, printErrors=True):
"""Generates array of tuples (anglicism, wiktionary-link)."""
tuples = [] # Array for tuples (anglicism, wiktionary-link)
errors = []
for e in list:
percentage = list.index(e)*1.0/len(list)*100
wil("Creating tuples of anglicisms and their wikilink -"
"%.2f%% complete" %(percentage), 60)
try:
anglicism = re.findall(">[0-9a-zA-Z-. äöüÄÖÜßé]+<", e)
if anglicism == []:
continue
# Extracting the anglicisms
anglicism = anglicism[0].replace("<", "").replace(">", "")
wikilink = ""
if "(Seite nicht vorhanden)" not in str(e):
# Extracting the wikilink
wikilink = re.findall('=".+"\s', e)[0].replace('="',
"").replace('" ', "")
wikilink = "http://de.wiktionary.org" + wikilink
tuples.append((anglicism, wikilink))
except Exception, ex:
errors.append((str(e), str(ex)))
continue
finally:
fl()
if printErrors == True:
wil("The following errors occured:", 150, "\n")
for error in errors:
print "Error at entry: %s - %s" %(error[0], error[1])
wil("Creating tuples of anglicisms and their wikilinks...Complete!",
30, "\n")
return tuples
def lookUpTranslations(list, printErrors=True):
"""Looks up the English translation of an anglicism."""
# Array for tuples with format (anglzism, [translation 1, translation2])
tuples = []
for e in list:
percentage = list.index(e)*1.0/len(list)*100
wil("Looking up translations for %s - %.2f%% complete"
%(e[0].replace("ä", "ae").replace("é", "e"), percentage), 20)
if e[1] == "": # If there is no wikilink
fl()
continue
try:
# Extracting the html-code of wiktionary-page
r = urllib2.Request(e[1])
html = BS(urllib2.urlopen(r))
# If there are English translations
if len(re.findall("/wiki/Englisch.+<\/li>", str(html))) > 0:
translations = re.findall("/wiki/Englisch.+<\/li>",
unicode(html))[0]
translations = re.findall(">[0-9a-zA-Z-. äöüÄÖÜßé]+<",
translations)
for i in range(len(translations)-1, -1, -1):
if translations[i] == "> <" or \
translations[i] == ">Englisch<":
translations.pop(i) # Popping redundant matches...
else:
# ...or just formatting the results
translations[i] = translations[i].replace(">",
"").replace("<", "")
else:
translations = [] # Default
tuples.append((e[0].decode('utf-8'), translations))
except Exception, ex:
if printErrors:
print str(ex)
fl()
wil("Looking up translations...Complete!%s\n" %(40* " "))
return tuples
|
Eroica/crossing
|
bin/anglicisms.py
|
Python
|
unlicense
| 5,083
|
import itertools
import unittest
from typing import List
import utils
# O(n * mlog(m)). O(nm) space. Anagram, sorting.
# n = strs.length
# m = strs[i].length
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
sorted_strs = sorted((sorted(s), s) for s in strs)
groups = itertools.groupby(sorted_strs, key=lambda t: t[0])
return [[t[1] for t in group] for _, group in groups]
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution, check_result=self.check_result)
def check_result(self, case, actual, msg):
expected = sorted(sorted(group) for group in case.expected)
actual = sorted(sorted(group) for group in actual)
self.assertEqual(expected, actual, msg)
if __name__ == '__main__':
unittest.main()
|
chrisxue815/leetcode_python
|
problems/test_0049_sort.py
|
Python
|
unlicense
| 833
|
import unittest
class Solution:
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
result = []
slen = len(s)
# [0,p1) [p1,p2) [p2,p3) [p3,slen)
for p1 in range(1, 4):
for p2 in range(p1 + 1, p1 + 4):
for p3 in range(p2 + 1, p2 + 4):
l4 = slen - p3
if l4 < 1 or l4 > 3:
continue
if p1 > 1 and s[0] == '0':
continue
l2 = p2 - p1
if l2 > 1 and s[p1] == '0':
continue
l3 = p3 - p2
if l3 > 1 and s[p2] == '0':
continue
if l4 > 1 and s[p3] == '0':
continue
s1 = s[0:p1]
b1 = int(s1)
if b1 > 255:
continue
s2 = s[p1:p2]
b2 = int(s2)
if b2 > 255:
continue
s3 = s[p2:p3]
b3 = int(s3)
if b3 > 255:
continue
s4 = s[p3:slen]
b4 = int(s4)
if b4 > 255:
continue
result.append(s1 + '.' + s2 + '.' + s3 + '.' + s4)
return result
class Test(unittest.TestCase):
def test(self):
self._test('25525511135', [
'255.255.11.135',
'255.255.111.35',
])
self._test('10999', [
'10.9.9.9',
'1.0.99.9',
'1.0.9.99',
# Should not contain 1.9.9.9
])
def _test(self, s, expected):
actual = Solution().restoreIpAddresses(s)
self.assertCountEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
chrisxue815/leetcode_python
|
problems/test_0093_iterative_post_validate.py
|
Python
|
unlicense
| 1,973
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014, Baidu Inc.
# Author: Zhong Yi <zhongyi01@baidu.com>
import sys
from HTMLParser import HTMLParser
filtered_tags = ['meta', 'div']
class EvernoteHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.list_prefix_ = []
def handle_starttag(self, tag, attrs):
if tag in filtered_tags:
return
if tag == 'ul':
self.list_prefix_.append('- ')
elif tag == 'ol':
self.list_prefix_.append('1. ')
print ''
elif tag == 'li':
print ''.join(self.list_prefix_),
# print '<' + tag,
# if tag == 'a':
# print attrs[0][1],
def handle_endtag(self, tag):
if tag in filtered_tags:
return
if tag == 'ul' or tag == 'ol':
self.list_prefix_.pop()
elif tag == 'b':
print '\n===================================================================================='
elif tag == 'font' or tag == 'br':
print '',
elif len(self.list_prefix_) > 1:
print '',
else:
print ''
# print tag + '>'
def handle_data(self, data):
if data == "\n":
return
print data,
html = open(sys.argv[1], 'rb').read()
parser = EvernoteHTMLParser()
# parser.feed("<html><head><title>Test</title></head>"
# "<body><h1>Parse me!</h1></body></html>")
parser.feed(html)
|
elvestar/codelab
|
evernote2rst/html2rst.py
|
Python
|
unlicense
| 1,363
|
import os
import string
from os import listdir
from os.path import isfile, join
from osgeo import gdal
from osgeo.gdalconst import *
import numpy
import numpy.ma as ma
# input the parent directory
root='/data/ifs/users/xcwu'
#Function to build vrt
def buildVrtFile (root1,root2,product):
fileList1=[]
for path1, subdirs1, files1 in os.walk(root1):
for name1 in files1:
if ".tif" == name1[-4:]: fileList1.append([os.path.join(path1,name1)])
fileList1.sort()
print len(fileList1),'files were built into a vrt file'
filename=os.path.join('/data/ifs/users/xcwu/VPM_GPP/LSWImax/temp',year+tile+product+'_list.txt')
outFilelist=open(filename,'w')
for file1 in fileList1[23:46]:
outFilelist.write(file1[0]+'\r\n')
fileList2=[]
for path2, subdirs2, files2 in os.walk(root2):
for name2 in files2:
if ".tif" == name2[-4:]: fileList2.append([os.path.join(path2,name2)])
fileList2.sort()
print len(fileList2),'files were built into a vrt file'
for file2 in fileList2[0:23]:
outFilelist.write(file2[0]+'\r\n')
outFilelist.close()
return filename
# Function to write array to tiff file
def write_file(output_name,output_array,GeoT,xsize,ysize,proJ,driverName='GTiff'):
print "creating", output_name
dr=gdal.GetDriverByName(driverName)
dr.Register()
do=dr.Create(output_name,xsize,ysize,1,gdal.GDT_Float32)
do.SetGeoTransform(GeoT)
do.SetProjection(proJ)
do.GetRasterBand(1).WriteArray(output_array)
do.GetRasterBand(1).SetNoDataValue(32767)
do=None
'''
'h12v01','h13v01','h14v01','h15v01','h16v01','h17v01','h18v01','h19v01','h20v01','h21v01','h22v01',\
'h23v01','h09v02','h10v02','h11v02','h12v02','h13v02','h14v02','h15v02','h16v02','h17v02',\
'h18v02','h19v02','h20v02','h21v02','h22v02','h23v02','h24v02','h25v02','h26v02','h06v03','h07v03',\
'h08v03','h09v03','h10v03','h11v03','h12v03','h13v03','h14v03','h15v03','h17v03','h18v03','h19v03',\
'h20v03','h21v03','h22v03','h23v03','h24v03','h25v03','h26v03','h27v03','h28v03','h29v03','h08v04',\
'h09v04','h10v04','h11v04','h12v04','h13v04','h14v04','h17v04','h18v04','h19v04','h20v04','h21v04',\
'h22v04','h23v04','h24v04','h25v04','h26v04','h27v04','h28v04','h07v05','h08v05','h09v05','h10v05',\
'h11v05','h12v05','h15v05','h16v05','h17v05','h18v05','h19v05','h20v05','h21v05','h22v05','h23v05',\
'h24v05','h25v05','h26v05','h27v05','h28v05','h29v05','h30v05','h02v06','h03v06','h07v06','h08v06',\
'h09v06','h10v06','h11v06','h16v06','h17v06','h18v06','h19v06','h20v06','h21v06','h22v06','h23v06',\
'h24v06','h25v06','h26v06','h27v06','h28v06','h29v06','h30v06','h31v06','h01v07','h03v07','h07v07',\
'h08v07','h09v07','h10v07','h11v07','h12v07','h15v07','h16v07','h17v07','h18v07','h19v07','h20v07',\
'h21v07','h22v07','h23v07','h24v07','h25v07','h26v07','h27v07','h28v07','h29v07','h30v07','h31v07',\
'h32v07','h33v07','h34v07','h00v08','h01v08','h02v08','h08v08','h09v08','h10v08','h11v08','h12v08',\
'h13v08','h16v08','h17v08','h18v08','h19v08','h20v08','h21v08','h22v08','h23v08','h25v08','h26v08',\
'h27v08','h28v08','h29v08','h30v08','h31v08','h32v08','h33v08','h34v08','h35v08','h00v09','h01v09',\
'h02v09','h03v09','h04v09','h08v09','h09v09','h10v09','h11v09','h12v09','h13v09','h14v09','h16v09',\
'h18v09','h19v09','h20v09','h21v09','h22v09','h23v09','h25v09','h27v09','h28v09','h29v09','h30v09',\
'h31v09','h32v09','h33v09','h34v09','h35v09'
'''
#for year in ['2004','2005','2006','2007','2008','2009','2010','2011','2012','2013','2014']:
for year in ['2003','2004','2005','2006','2007','2008','2009','2010','2011','2012','2013','2014','2015']:
for tile in ['h00v10','h01v10','h02v10','h03v10','h04v10','h05v10','h10v10','h11v10',
'h12v10','h13v10','h14v10','h17v10','h19v10','h20v10','h21v10','h22v10','h23v10','h27v10',
'h28v10','h29v10','h30v10','h31v10','h32v10','h33v10','h34v10','h35v10','h01v11','h02v11',
'h03v11','h04v11','h05v11','h06v11','h08v11','h10v11','h11v11','h12v11','h13v11','h14v11',
'h15v11','h19v11','h20v11','h21v11','h22v11','h23v11','h27v11','h28v11','h29v11','h30v11',
'h31v11','h32v11','h33v11','h11v12','h12v12','h13v12','h16v12','h17v12','h19v12','h20v12',
'h24v12','h27v12','h28v12','h29v12','h30v12','h31v12','h32v12','h05v13','h12v13','h13v13',
'h17v13','h20v13','h21v13','h22v13','h28v13','h29v13','h30v13','h31v13','h13v14','h14v14',
'h15v14','h16v14','h18v14','h22v14','h27v14','h28v14']:
# Output directories for LSWImax
dirLSWImax=root+'/VPM_GPP/LSWImax/LSWI/'+year
# Output directories for SOS and EOS
dirDOYSOSEOS=root+'/VPM_GPP/LSWImax/SOSEOS/'+year+'/DOYSOSEOS'
# if the output directories don't exist, create the new directories
if not os.path.exists(dirLSWImax):
os.makedirs(dirLSWImax)
if not os.path.exists(dirDOYSOSEOS):
os.makedirs(dirDOYSOSEOS)
# The directory for the multi-nighttime LST, LSWI, Cloud
nextyear=string.atoi(year)+1
nextyear=str(nextyear)
dirLST1='/data/ifs/modis/products_006/myd11a2/'+year+'/'+tile
dirLST2='/data/ifs/modis/products_006/myd11a2/'+nextyear+'/'+tile
dirLSWI1='/data/ifs/modis/products_006/mod09a1/geotiff/lswi/'+year+'/'+tile
dirLSWI2='/data/ifs/modis/products_006/mod09a1/geotiff/lswi/'+nextyear+'/'+tile
dirNDVI1='/data/ifs/modis/products_006/mod09a1/geotiff/ndvi/'+year+'/'+tile
dirNDVI2='/data/ifs/modis/products_006/mod09a1/geotiff/ndvi/'+nextyear+'/'+tile
dirCloud1='/data/ifs/modis/products_006/mod09a1/geotiff/cloudmask/'+year+'/'+tile
dirCloud2='/data/ifs/modis/products_006/mod09a1/geotiff/cloudmask/'+nextyear+'/'+tile
# build LSWI vrt file and read as an array
file=buildVrtFile(dirLSWI1,dirLSWI2,'lswi')
vrtLSWI=os.path.join(os.path.dirname(file),year+tile+'LSWI_vrt.vrt')
print "Building the vrt file: ", vrtLSWI
os.system('gdalbuildvrt -separate -input_file_list '+file+' '+vrtLSWI)
global rows, cols, geoProj,geoTran
inLSWI=gdal.Open(vrtLSWI)
print "reading the multi-LSWI..."
LSWI=inLSWI.ReadAsArray()
LSWIorg=LSWI
rows = 2400
cols = 2400
geoTran=inLSWI.GetGeoTransform()
geoProj=inLSWI.GetProjection()
# build NDVI vrt file and read as an array
file=buildVrtFile(dirNDVI1,dirNDVI2,'ndvi')
vrtNDVI=os.path.join(os.path.dirname(file),year+tile+'NDVI_vrt.vrt')
print "Building the vrt file: ", vrtNDVI
os.system('gdalbuildvrt -separate -input_file_list '+file+' '+vrtNDVI)
inNDVI=gdal.Open(vrtNDVI)
print "reading the multi-NDVI..."
NDVI=inNDVI.ReadAsArray()
# build cloud vrt file and read as an array
file=buildVrtFile(dirCloud1,dirCloud2,'cloud')
vrtCloud=os.path.join(os.path.dirname(file),year+tile+'cloud_vrt.vrt')
print "Building the vrt file: ", vrtCloud
os.system('gdalbuildvrt -separate -input_file_list '+file+' '+vrtCloud)
inCloud=gdal.Open(vrtCloud)
print "reading the multi-Cloud..."
Cloud=inCloud.ReadAsArray()
# build nighttime LST vrt file and read as an array
file=buildVrtFile(dirLST1,dirLST2,'ngtLST')
vrtLST=os.path.join(os.path.dirname(file),year+tile+'ngtLST_vrt.vrt')
print "Building the vrt file: ", vrtLST
os.system('gdalbuildvrt -separate -input_file_list '+file+' '+vrtLST)
inLST=gdal.Open(vrtLST)
print "reading the multi-night LST..."
LST=inLST.ReadAsArray()
# Convert the MODIS LST using scalar and offset 0.02, 273
LST=LST*0.02-273
# calculate the end of growing season
# The first day when nightLST <10 DEG during the 2nd half year
LSTEOS=numpy.where((LST==-273)|(LST>=10),0, 1)
# calculate the start of growing season
# The first day when there are three points with nightLST >5 DEG
LST=numpy.where(LST<0,0, LST)
LST=LST/5
LST=LST.astype(int)
LST=numpy.where(LST>0,1, LST)
iniLST=LST[0,:,:]*LST[1,:,:]*LST[2,:,:]
for i in range(43):
LST=numpy.roll(LST,-1,axis=0)
tempLST=LST[0,:,:]*LST[1,:,:]*LST[2,:,:]
iniLST=numpy.append(iniLST,tempLST,axis=0)
iniLST=iniLST.reshape(44,1200,1200)
# to calculate if all year nightLST < 5 DEG. Yes->-10 (no growing season/LSWI=0.25)
maskSOS=numpy.sum(iniLST,axis=0)
# calculate the SOS
inSOSLST=numpy.argmax(iniLST,axis=0)
inSOSLST=numpy.where(maskSOS==0,-10,inSOSLST)
iniLST=None
maskSOS=None
# resample 1-km LST to 500 m
temp=numpy.zeros((2400,2400))
SOSLST = numpy.array([inSOSLST[x/2,y/2] for x, y in numpy.ndindex(temp.shape)])
SOSLST=SOSLST.reshape(2400,2400)
print "saving the SOS"
write_file(dirDOYSOSEOS+'/'+tile+'.'+year+'.SOS_ngtLST_5d.tif',SOSLST,geoTran,rows,cols,geoProj,driverName='GTiff')
temp=None
print "start to calculate the EOS falling below 10 degree"
iniLSTEOS=LSTEOS[23:,:,:]
EOSLST = numpy.argmax(iniLSTEOS,axis=0)
EOSLST=EOSLST+23
# if iniLSTEOS==0 means that there is no date below <10 DEG, Use the last DOY as EOS
midEOSLST=numpy.sum(iniLSTEOS,axis=0)
EOSLST=numpy.where(midEOSLST==0,45,EOSLST )
# here is no growing season
EOSLST=numpy.where(inSOSLST==-10,-10,EOSLST )
midEOSLST=None
# resample 1-km LST to 500 m
temp=numpy.zeros((2400,2400))
EOSLSTOUT = numpy.array([EOSLST[x/2,y/2] for x, y in numpy.ndindex(temp.shape)])
EOSLSTOUT=EOSLSTOUT.reshape(2400,2400)
temp=None
EOSLST=None
print "saving the EOS below 10 degree"
write_file(dirDOYSOSEOS+'/'+tile+'.'+year+'.EOS_ngtLST_10d.tif',EOSLSTOUT,geoTran,rows,cols,geoProj,driverName='GTiff')
# for the v01 tiles, the scene numbers are not 46
if tile in ['h16v01','h15v01','h14v01','h13v01','h12v01','h17v01','h18v01','h19v01','h20v01','h21v01','h22v01','h23v01']:
before=numpy.zeros((2400,2400))
LSWI=numpy.insert(LSWI,0,before,axis=0)
Cloud=numpy.insert(Cloud,0,before,axis=0)
LSWIorg=numpy.insert(LSWIorg,0,before,axis=0)
NDVI=numpy.insert(NDVI,0,before,axis=0)
before=None
## after=numpy.arange(23040000)*0
## after=after.reshape((4,2400,2400))
after=numpy.arange(23040000)*0
after=after.reshape((4,2400,2400))
Cloud=numpy.append(Cloud,after,axis=0)
LSWI=numpy.append(LSWI,after,axis=0)
LSWIorg=numpy.append(LSWIorg,after,axis=0)
NDVI=numpy.append(NDVI,after,axis=0)
print LSWI.shape
print Cloud.shape
print NDVI.shape
after=None
#if LSWI.shape[1] == 45:
# middle=-1*numpy.ones((2400,2400))
# LSWI=numpy.insert(LSWI,22,middle,axis=0)
# Cloud=numpy.insert(Cloud,22,middle,axis=0)
# LSWIorg=numpy.insert(LSWIorg,22,middle,axis=0)
# exclude the LSWI affected by cloud
LSWI = ma.masked_where(Cloud > 1,LSWI)
print "calculating the maximum LSWI"
#Build (46,2400,2400) temporal indices
temp=numpy.arange(46)
temp=numpy.repeat(temp,2400*2400)
#maskout the LSWI beyond the range [SOSLSTO
temp=temp.reshape(46,2400,2400)
# calculate the maximum NDVI during June to August
NDVIgrowingseason=ma.masked_where((temp < 23)|(temp > 30),NDVI)
NDVImaxday=numpy.argmax(NDVIgrowingseason,axis=0)
# if all year nighttime LST<5 DEG, get LSWImax on the day of NDVImax.
SOSLSTOUT=numpy.where(SOSLST==-10,NDVImaxday-1,SOSLST)
EOSLSTOUT1=EOSLSTOUT
EOSLSTOUT=numpy.where(EOSLSTOUT==-10,NDVImaxday+1,EOSLSTOUT)
# if SOS>EOS, SOS=NDVI max day -1, EOS=NDVI max day +1
SOSLSTOUT=numpy.where(SOSLSTOUT>EOSLSTOUT,NDVImaxday-1,SOSLSTOUT)
EOSLSTOUT=numpy.where(SOSLSTOUT>EOSLSTOUT,NDVImaxday+1,EOSLSTOUT)
LSWI=ma.masked_where((temp < SOSLSTOUT)|(temp > EOSLSTOUT), LSWI)
LSWIorg=ma.masked_where((temp < SOSLSTOUT)|(temp > EOSLSTOUT), LSWIorg)
#return the LSWI maximum without considering NAA
maxLSWI=numpy.nanmax(LSWI,axis=0)
maxLSWIorg=numpy.nanmax(LSWIorg,axis=0)
temp=None
SOSLSTOUT=None
# Set 0.25 to fill the no growing season, LSWI not [1,1]
maxLSWI=numpy.where((maxLSWI>=10000)|(maxLSWI<=-10000),maxLSWIorg,maxLSWI)
EOSLSTOUT=None
EOSLSTOUT1=None
write_file(dirLSWImax+'/'+tile+'.'+year+'.maxLSWI_5d_10d.tif',maxLSWI,geoTran,rows,cols,geoProj,driverName='GTiff')
maxLSWI=None
LSWI=None
|
zhangyaonju/Global_GPP_VPM_NCEP_C3C4
|
LSWImax/LSWImax_SouthH.py
|
Python
|
apache-2.0
| 13,248
|
from __future__ import unicode_literals
from django.apps import AppConfig
class ResearcherConfig(AppConfig):
name = 'researcher'
|
Goodly/TextThresher
|
researcher/apps.py
|
Python
|
apache-2.0
| 136
|
from django.conf import settings
def is_desktop(request):
desktopapp = not request.is_mobile and not request.is_tablet
return {
'is_desktop': desktopapp
}
|
uw-it-aca/course-dashboards
|
coursedashboards/context_processors.py
|
Python
|
apache-2.0
| 179
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
from qpid.messaging import *
if __name__ == "__main__":
if len(sys.argv) < 5:
sys.exit(-1)
print 'app name {}, broke ip {}, broker port {}, queue id {}'.format(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
broker = "{}/xxxx@{}:{}".format(sys.argv[1], sys.argv[2], sys.argv[3])
address = "{}".format(sys.argv[4])
conn_options = {
'transport' : 'ssl',
'ssl_keyfile' : "ssl_cert_file/MSP.Key.pem",
'ssl_certfile' : "ssl_cert_file/MSP.pem.cer",
'ssl_trustfile' : "ssl_cert_file/Wireless Root CA.pem.cer",
'ssl_skip_hostname_check' : True,
}
connection = Connection(broker, **conn_options)
try:
connection.open()
session = connection.session()
receiver = session.receiver(address)
print "session create success"
while True:
message = receiver.fetch()
print "%r" % message.content
session.acknowledge()
except MessagingError, m:
print "MessagingError", m
connection.close()
|
SVADemoAPP/Server
|
WebContent/WEB-INF/python/MQReceiver.py
|
Python
|
apache-2.0
| 1,996
|
# Copyright 2016 Raytheon BBN Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
__all__ = ['Electromagnet']
import numpy as np
import time
from auspex.log import logger
class Electromagnet(object):
"""Wrapper for electromagnet """
def __init__(self, calibration_file, field_getter, current_setter, current_getter, field_averages=5):
super(Electromagnet, self).__init__()
self.name = "Composite Magnet Instrument"
with open(calibration_file) as cf:
lines = [l for l in cf.readlines() if l[0] != '#']
if len(lines) != 1:
raise Exception("Invalid magnet control calibration file, must contain one line.")
try:
# Construct the fit
poly_coeffs = np.array(lines[0].split(), dtype=np.float)
self.current_vs_field = np.poly1d(poly_coeffs)
except:
raise TypeError("Could not convert calibration coefficients into list of floats")
self.field_getter = field_getter
self.current_setter = current_setter
self.current_getter = current_getter
self.field_averages = field_averages
self.calibrated_slope = poly_coeffs[0]
@property
def field(self):
return np.mean( [self.field_getter() for i in range(self.field_averages)] )
@field.setter
def field(self, target_field):
# logging.debug("Appropriate current is: %f" % self.current_vs_field(target_field))
self.current_setter( self.current_vs_field(target_field) )
time.sleep(0.6)
# logging.debug("Arrived at: %f" % self.field)
field_offset = self.field - target_field
# logging.debug("Revising: Field offset is %f" % field_offset)
revised_field = target_field - field_offset
# logging.debug("Revising: Revised target field is %f" % revised_field)
self.current_setter( self.current_vs_field(revised_field) )
# logging.debug("Arrived at: %f, repeat measurement %f" % (self.field, self.field) )
# hackathon
def set_field(self, value):
self.field = value
# hackathon continues
def get_field(self):
return self.field
def __repr__(self):
name = "Mystery Instrument" if self.name == "" else self.name
return "{} @ {}".format(name, self.resource_name)
|
BBN-Q/Auspex
|
src/auspex/instruments/magnet.py
|
Python
|
apache-2.0
| 2,530
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import webob
from nova import flags
from nova.api import openstack
from nova import test
from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
class ImageMetaDataTest(test.TestCase):
def setUp(self):
super(ImageMetaDataTest, self).setUp()
fakes.stub_out_glance(self.stubs)
def test_index(self):
req = webob.Request.blank('/v1.1/123/images/123/metadata')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
expected = {'metadata': {'key1': 'value1'}}
self.assertEqual(res_dict, expected)
def test_show(self):
req = webob.Request.blank('/v1.1/fake/images/123/metadata/key1')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertTrue('meta' in res_dict)
self.assertEqual(len(res_dict['meta']), 1)
self.assertEqual('value1', res_dict['meta']['key1'])
def test_show_not_found(self):
req = webob.Request.blank('/v1.1/fake/images/123/metadata/key9')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_show_image_not_found(self):
req = webob.Request.blank('/v1.1/fake/images/100/metadata/key1')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_create(self):
req = webob.Request.blank('/v1.1/fake/images/123/metadata')
req.method = 'POST'
req.body = '{"metadata": {"key7": "value7"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
actual_output = json.loads(res.body)
expected_output = {'metadata': {'key1': 'value1', 'key7': 'value7'}}
self.assertEqual(expected_output, actual_output)
def test_create_image_not_found(self):
req = webob.Request.blank('/v1.1/fake/images/100/metadata')
req.method = 'POST'
req.body = '{"metadata": {"key7": "value7"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_update_all(self):
req = webob.Request.blank('/v1.1/fake/images/123/metadata')
req.method = 'PUT'
req.body = '{"metadata": {"key9": "value9"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
actual_output = json.loads(res.body)
expected_output = {'metadata': {'key9': 'value9'}}
self.assertEqual(expected_output, actual_output)
def test_update_all_image_not_found(self):
req = webob.Request.blank('/v1.1/fake/images/100/metadata')
req.method = 'PUT'
req.body = '{"metadata": {"key9": "value9"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_update_item(self):
req = webob.Request.blank('/v1.1/fake/images/123/metadata/key1')
req.method = 'PUT'
req.body = '{"meta": {"key1": "zz"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
actual_output = json.loads(res.body)
expected_output = {'meta': {'key1': 'zz'}}
self.assertEqual(actual_output, expected_output)
def test_update_item_image_not_found(self):
req = webob.Request.blank('/v1.1/fake/images/100/metadata/key1')
req.method = 'PUT'
req.body = '{"meta": {"key1": "zz"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_update_item_bad_body(self):
req = webob.Request.blank('/v1.1/fake/images/123/metadata/key1')
req.method = 'PUT'
req.body = '{"key1": "zz"}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_update_item_too_many_keys(self):
req = webob.Request.blank('/v1.1/fake/images/123/metadata/key1')
req.method = 'PUT'
overload = {}
for num in range(FLAGS.quota_metadata_items + 1):
overload['key%s' % num] = 'value%s' % num
req.body = json.dumps({'meta': overload})
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_update_item_body_uri_mismatch(self):
req = webob.Request.blank('/v1.1/fake/images/123/metadata/bad')
req.method = 'PUT'
req.body = '{"meta": {"key1": "value1"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_update_item_xml(self):
req = webob.Request.blank('/v1.1/fake/images/123/metadata/key1')
req.method = 'PUT'
req.body = '<meta key="key1">five</meta>'
req.headers["content-type"] = "application/xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
actual_output = json.loads(res.body)
expected_output = {'meta': {'key1': 'five'}}
self.assertEqual(actual_output, expected_output)
def test_delete(self):
req = webob.Request.blank('/v1.1/fake/images/123/metadata/key1')
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(204, res.status_int)
self.assertEqual('', res.body)
def test_delete_not_found(self):
req = webob.Request.blank('/v1.1/fake/images/123/metadata/blah')
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_delete_image_not_found(self):
req = webob.Request.blank('/v1.1/fake/images/100/metadata/key1')
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_too_many_metadata_items_on_create(self):
data = {"metadata": {}}
for num in range(FLAGS.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
json_string = str(data).replace("\'", "\"")
req = webob.Request.blank('/v1.1/fake/images/123/metadata')
req.method = 'POST'
req.body = json_string
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(413, res.status_int)
def test_too_many_metadata_items_on_put(self):
FLAGS.quota_metadata_items = 1
req = webob.Request.blank('/v1.1/fake/images/123/metadata/blah')
req.method = 'PUT'
req.body = '{"meta": {"blah": "blah"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(413, res.status_int)
|
30loops/nova
|
nova/tests/api/openstack/test_image_metadata.py
|
Python
|
apache-2.0
| 7,983
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class V1ComponentStatusList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
V1ComponentStatusList - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'kind': 'str',
'api_version': 'str',
'metadata': 'UnversionedListMeta',
'items': 'list[V1ComponentStatus]'
}
self.attribute_map = {
'kind': 'kind',
'api_version': 'apiVersion',
'metadata': 'metadata',
'items': 'items'
}
self._kind = None
self._api_version = None
self._metadata = None
self._items = None
@property
def kind(self):
"""
Gets the kind of this V1ComponentStatusList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1ComponentStatusList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1ComponentStatusList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1ComponentStatusList.
:type: str
"""
self._kind = kind
@property
def api_version(self):
"""
Gets the api_version of this V1ComponentStatusList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:return: The api_version of this V1ComponentStatusList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1ComponentStatusList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1ComponentStatusList.
:type: str
"""
self._api_version = api_version
@property
def metadata(self):
"""
Gets the metadata of this V1ComponentStatusList.
Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The metadata of this V1ComponentStatusList.
:rtype: UnversionedListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1ComponentStatusList.
Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param metadata: The metadata of this V1ComponentStatusList.
:type: UnversionedListMeta
"""
self._metadata = metadata
@property
def items(self):
"""
Gets the items of this V1ComponentStatusList.
List of ComponentStatus objects.
:return: The items of this V1ComponentStatusList.
:rtype: list[V1ComponentStatus]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1ComponentStatusList.
List of ComponentStatus objects.
:param items: The items of this V1ComponentStatusList.
:type: list[V1ComponentStatus]
"""
self._items = items
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
danielfrg/jupyterhub-kubernetes_spawner
|
kubernetes_spawner/swagger_client/models/v1_component_status_list.py
|
Python
|
apache-2.0
| 6,270
|
import hashlib
import sys
input_filename = sys.argv[1]
with open(input_filename) as f:
input = f.read()
common_hash = hashlib.md5()
common_hash.update(input.encode())
i = 0
password = ['_'] * 8
while True:
hash = common_hash.copy()
hash.update(str(i).encode())
md5 = hash.hexdigest()
if md5.startswith('00000'):
pos = int(md5[5], 16)
if pos < len(password) and password[pos] == '_':
password[pos] = md5[6]
if '_' not in password:
break
i += 1
print(''.join(password))
|
mofr/advent-of-code
|
2016/day05.py
|
Python
|
apache-2.0
| 552
|
# Copyright 2015-2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytree nodes with extra formatting information.
This is a thin wrapper around a pytree.Leaf node.
"""
import keyword
import re
from lib2to3 import pytree
from lib2to3.pgen2 import token
from yapf.yapflib import py3compat
from yapf.yapflib import pytree_utils
from yapf.yapflib import style
CONTINUATION = token.N_TOKENS
token.N_TOKENS += 1
class Subtype(object):
"""Subtype information about tokens.
Gleaned from parsing the code. Helps determine the best formatting.
"""
NONE = 0
UNARY_OPERATOR = 1
BINARY_OPERATOR = 2
SUBSCRIPT_COLON = 3
SUBSCRIPT_BRACKET = 4
DEFAULT_OR_NAMED_ASSIGN = 5
VARARGS_STAR = 6
KWARGS_STAR_STAR = 7
ASSIGN_OPERATOR = 8
DICTIONARY_KEY = 9
DICTIONARY_VALUE = 10
DICT_SET_GENERATOR = 11
COMP_FOR = 12
COMP_IF = 13
DEFAULT_OR_NAMED_ASSIGN_ARG_LIST = 14
FUNC_DEF = 15
class FormatToken(object):
"""A wrapper around pytree Leaf nodes.
This represents the token plus additional information useful for reformatting
the code.
Attributes:
next_token: The token in the unwrapped line after this token or None if this
is the last token in the unwrapped line.
previous_token: The token in the unwrapped line before this token or None if
this is the first token in the unwrapped line.
matching_bracket: If a bracket token ('[', '{', or '(') the matching
bracket.
whitespace_prefix: The prefix for the whitespace.
spaces_required_before: The number of spaces required before a token. This
is a lower-bound for the formatter and not a hard requirement. For
instance, a comment may have n required spaces before it. But the
formatter won't place n spaces before all comments. Only those that are
moved to the end of a line of code. The formatter may use different
spacing when appropriate.
can_break_before: True if we're allowed to break before this token.
must_break_before: True if we're required to break before this token.
total_length: The total length of the unwrapped line up to and including
whitespace and this token. However, this doesn't include the initial
indentation amount.
split_penalty: The penalty for splitting the line before this token.
"""
def __init__(self, node):
"""Constructor.
Arguments:
node: (pytree.Leaf) The node that's being wrapped.
"""
self.node = node
self.next_token = None
self.previous_token = None
self.matching_bracket = None
self.whitespace_prefix = ''
self.can_break_before = False
self.must_break_before = False
self.total_length = 0 # TODO(morbo): Think up a better name.
self.split_penalty = 0
if self.is_comment:
self.spaces_required_before = style.Get('SPACES_BEFORE_COMMENT')
else:
self.spaces_required_before = 0
def AddWhitespacePrefix(self, newlines_before, spaces=0, indent_level=0):
"""Register a token's whitespace prefix.
This is the whitespace that will be output before a token's string.
Arguments:
newlines_before: (int) The number of newlines to place before the token.
spaces: (int) The number of spaces to place before the token.
indent_level: (int) The indentation level.
"""
indent_char = '\t' if style.Get('USE_TABS') else ' '
indent_before = (
indent_char * indent_level * style.Get('INDENT_WIDTH') + ' ' * spaces)
if self.is_comment:
comment_lines = [s.lstrip() for s in self.value.splitlines()]
self.node.value = ('\n' + indent_before).join(comment_lines)
if not self.whitespace_prefix:
self.whitespace_prefix = (
'\n' * (self.newlines or newlines_before) + indent_before)
else:
self.whitespace_prefix += indent_before
def AdjustNewlinesBefore(self, newlines_before):
"""Change the number of newlines before this token."""
self.whitespace_prefix = (
'\n' * newlines_before + self.whitespace_prefix.lstrip('\n'))
def RetainHorizontalSpacing(self, first_column, depth):
"""Retains a token's horizontal spacing."""
previous = self.previous_token
if previous is None:
return
cur_lineno = self.lineno
prev_lineno = previous.lineno
if previous.is_multiline_string:
prev_lineno += previous.value.count('\n')
if (cur_lineno != prev_lineno or
(previous.is_pseudo_paren and previous.value != ')' and
cur_lineno != previous.previous_token.lineno)):
self.spaces_required_before = (
self.column - first_column + depth * style.Get('INDENT_WIDTH'))
return
cur_column = self.node.column
prev_column = previous.node.column
prev_len = len(previous.value)
if previous.is_pseudo_paren and previous.value == ')':
prev_column -= 1
prev_len = 0
if previous.is_multiline_string:
prev_len = len(previous.value.split('\n')[-1])
if '\n' in previous.value:
prev_column = 0 # Last line starts in column 0.
self.spaces_required_before = cur_column - (prev_column + prev_len)
def OpensScope(self):
return self.value in pytree_utils.OPENING_BRACKETS
def ClosesScope(self):
return self.value in pytree_utils.CLOSING_BRACKETS
def __repr__(self):
msg = 'FormatToken(name={0}, value={1}'.format(self.name, self.value)
msg += ', pseudo)' if self.is_pseudo_paren else ')'
return msg
@property
def value(self):
if self.is_continuation:
return self.node.value.rstrip()
return self.node.value
@property
@py3compat.lru_cache()
def node_split_penalty(self):
"""Split penalty attached to the pytree node of this token."""
return pytree_utils.GetNodeAnnotation(
self.node, pytree_utils.Annotation.SPLIT_PENALTY, default=0)
@property
def newlines(self):
"""The number of newlines needed before this token."""
return pytree_utils.GetNodeAnnotation(self.node,
pytree_utils.Annotation.NEWLINES)
@property
def must_split(self):
"""Return true if the token requires a split before it."""
return pytree_utils.GetNodeAnnotation(self.node,
pytree_utils.Annotation.MUST_SPLIT)
@property
def column(self):
"""The original column number of the node in the source."""
return self.node.column
@property
def lineno(self):
"""The original line number of the node in the source."""
return self.node.lineno
@property
@py3compat.lru_cache()
def subtypes(self):
"""Extra type information for directing formatting."""
value = pytree_utils.GetNodeAnnotation(self.node,
pytree_utils.Annotation.SUBTYPE)
return [Subtype.NONE] if value is None else value
@property
@py3compat.lru_cache()
def is_binary_op(self):
"""Token is a binary operator."""
return Subtype.BINARY_OPERATOR in self.subtypes
@property
@py3compat.lru_cache()
def name(self):
"""A string representation of the node's name."""
return pytree_utils.NodeName(self.node)
@property
def is_comment(self):
return self.node.type == token.COMMENT
@property
def is_continuation(self):
return self.node.type == CONTINUATION
@property
@py3compat.lru_cache()
def is_keyword(self):
return keyword.iskeyword(self.value)
@property
@py3compat.lru_cache()
def is_name(self):
return self.node.type == token.NAME and not self.is_keyword
@property
def is_number(self):
return self.node.type == token.NUMBER
@property
def is_string(self):
return self.node.type == token.STRING
@property
@py3compat.lru_cache()
def is_multiline_string(self):
return (self.is_string and
re.match(r'^[uUbB]?[rR]?(?P<delim>"""|\'\'\').*(?P=delim)$',
self.value, re.DOTALL) is not None)
@property
@py3compat.lru_cache()
def is_docstring(self):
return self.is_multiline_string and not self.node.prev_sibling
@property
@py3compat.lru_cache()
def is_pseudo_paren(self):
return hasattr(self.node, 'is_pseudo') and self.node.is_pseudo
@property
def is_pylint_comment(self):
return self.is_comment and re.match(r'#\s*\bpylint:\b', self.value)
|
wklken/yapf
|
yapf/yapflib/format_token.py
|
Python
|
apache-2.0
| 8,807
|
"""
The Tile class is an object representation of a tile on the screen
"""
__author__ = 'William Fiset'
import pygame
class Tile(pygame.Rect):
floor_tile_img = pygame.image.load('images/tiles/surface_tile_gray.png')
wall_tile_img = pygame.image.load("images/tiles/dark_wall.png")
# Define all the different kinds of tiles that exist
class Type:
FLOOR = '-'
WALL = 'w'
list_ = []
_tile_list = {}
TILE_SIZE = 32
total_tiles = 1
H, V = 1, 22
def __init__(self, x, y, tileType):
# AStar Variables
self.parent = None
self.H, self.G, self.F = 0, 0, 0
# Tile object specific variables
self.type = tileType
self.number = Tile.total_tiles
# Determine if this tile will be walkable or not
if tileType == Tile.Type.FLOOR:
self.walkable = True
elif tileType == Tile.Type.WALL:
self.walkable = False
pygame.Rect.__init__(self, (x, y), (Tile.TILE_SIZE, Tile.TILE_SIZE))
Tile.list_.append(self)
Tile._tile_list.update( { Tile.total_tiles : self } )
Tile.total_tiles += 1
@staticmethod
def get_tile(number):
# return Tile._tile_list[number]
for tile in Tile.list_:
if tile.number == number:
return tile
@staticmethod
def update(screen):
for tile in Tile.list_:
if tile.type == Tile.Type.FLOOR:
screen.blit(Tile.floor_tile_img, (tile.x, tile.y))
elif tile.type == Tile.Type.WALL:
screen.blit(Tile.wall_tile_img, (tile.x, tile.y))
|
william-fiset/Survival
|
Tile.py
|
Python
|
apache-2.0
| 1,675
|
# Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A east text detector inferencer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from PIL import Image
import tensorflow as tf
import numpy as np
import sys
import os
import copy
import argparse
import cv2
import lanms
import model
from icdar import restore_rectangle
import east_multi_infer
from uai.arch.tf_model import TFAiUcloudModel
class EASTTextDetectModel(TFAiUcloudModel):
""" EASTTextDetectModel example model
"""
def __init__(self, conf):
super(EASTTextDetectModel, self).__init__(conf)
def load_model(self):
predictor = east_multi_infer.EastPredictor('./checkpoint_dir')
predictor.load_serve_model()
self._predictor = predictor
def execute(self, data, batch_size):
predictor = self._predictor
ret = []
images = []
for i in range(batch_size):
image = Image.open(data[i])
images.append(image)
results = predictor.do_serve_predict(images)
return results
|
ucloud/uai-sdk
|
examples/tensorflow/inference/east/code/east_multi_inference.py
|
Python
|
apache-2.0
| 1,765
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from abc import abstractmethod
from pants.base.exceptions import TestFailedTaskError
from pants.util.timeout import Timeout, TimeoutReached
class TestTaskMixin(object):
"""A mixin to combine with test runner tasks.
The intent is to migrate logic over time out of JUnitRun and PytestRun, so the functionality
expressed can support both languages, and any additional languages that are added to pants.
"""
@classmethod
def register_options(cls, register):
super(TestTaskMixin, cls).register_options(register)
register('--skip', action='store_true', help='Skip running tests.')
register('--timeouts', action='store_true', default=True,
help='Enable test target timeouts. If timeouts are enabled then tests with a timeout= parameter '
'set on their target will time out after the given number of seconds if not completed. '
'If no timeout is set, then either the default timeout is used or no timeout is configured. '
'In the current implementation, all the timeouts for the test targets to be run are summed and '
'all tests are run with the total timeout covering the entire run of tests. If a single target '
'in a test run has no timeout and there is no default, the entire run will have no timeout. This '
'should change in the future to provide more granularity.')
register('--timeout-default', action='store', default=0, type=int,
help='The default timeout (in seconds) for a test if timeout is not set on the target.')
def execute(self):
"""Run the task."""
if not self.get_options().skip:
test_targets = self._get_test_targets()
all_targets = self._get_targets()
for target in test_targets:
self._validate_target(target)
timeout = self._timeout_for_targets(test_targets)
try:
with Timeout(timeout):
self._execute(all_targets)
except TimeoutReached:
raise TestFailedTaskError(failed_targets=test_targets)
def _timeout_for_target(self, target):
return getattr(target, 'timeout', None)
def _timeout_for_targets(self, targets):
"""Calculate the total timeout based on the timeout configuration for all the targets.
Because the timeout wraps all the test targets rather than individual tests, we have to somehow
aggregate all the target specific timeouts into one value that will cover all the tests. If some targets
have no timeout configured (or set to 0), their timeout will be set to the default timeout.
If there is no default timeout, or if it is set to zero, there will be no timeout, if any of the test targets
have a timeout set to 0 or no timeout configured.
:param targets: list of test targets
:return: timeout to cover all the targets, in seconds
"""
if not self.get_options().timeouts:
return None
timeout_default = self.get_options().timeout_default
# Gather up all the timeouts.
timeouts = [self._timeout_for_target(target) for target in targets]
# If any target's timeout is None or 0, then set it to the default timeout
timeouts_w_default = [timeout or timeout_default for timeout in timeouts]
# Even after we've done that, there may be a 0 or None in the timeout list if the
# default timeout is set to 0 or None. So if that's the case, then the timeout is
# disabled
if 0 in timeouts_w_default or None in timeouts_w_default:
return None
else:
# Sum the timeouts for all the targets, using the default timeout where one is not set
return sum(timeouts_w_default)
def _get_targets(self):
"""This is separated out so it can be overridden for testing purposes.
:return: list of targets
"""
return self.context.targets()
def _get_test_targets(self):
"""Returns the targets that are relevant test targets."""
test_targets = list(filter(self._test_target_filter(), self._get_targets()))
return test_targets
@abstractmethod
def _test_target_filter(self):
"""A filter to run on targets to see if they are relevant to this test task.
:return: function from target->boolean
"""
@abstractmethod
def _validate_target(self, target):
"""Ensures that this target is valid. Raises TargetDefinitionException if the target is invalid.
We don't need the type check here because _get_targets() combines with _test_target_type to
filter the list of targets to only the targets relevant for this test task.
im
:param target: the target to validate
:raises: TargetDefinitionException
"""
@abstractmethod
def _execute(self, all_targets):
"""Actually goes ahead and runs the tests for the targets.
:param targets: list of the targets whose tests are to be run
"""
|
slyphon/pants
|
src/python/pants/backend/core/tasks/test_task_mixin.py
|
Python
|
apache-2.0
| 5,100
|
#! usr/bin/env python3
import os
import re
import shutil
from bs4 import BeautifulSoup
def map_the_assets(src_asset_dir, dest_asset_dir, docname=None, html_doc_path=None, html_source=None):
'''
Given an HTML file, take all image assets and remap for deconst.
'''
if not docname:
docname = 'test.html'
if html_doc_path:
the_path = os.path.join(os.getcwd(), html_doc_path)
with open(the_path, 'r') as html_doc_sample:
soup = BeautifulSoup(html_doc_sample, 'html.parser')
elif html_source:
soup = BeautifulSoup(html_source, 'html.parser')
else:
raise ValueError(
'You need to send this some HTML. Use either a path or a source file.')
changed_envelope = {}
element1 = str(soup)
element2 = 0
for img in soup.find_all('img'):
tag_string = str(img)
begin_offset = element1.index(tag_string)
src_offset = tag_string.index('src="')
# To get the end of the tag: end_offset = len(tag_string) +
# begin_offset
final_offset = len('src="') + src_offset + begin_offset
n = img['src']
img['src'] = element2
changed_envelope[n] = final_offset
element2 += 1
listed_env = list(changed_envelope)
for key in listed_env:
path_to_key = src_asset_dir + key
new_path = dest_asset_dir + key
# potential BUG: What if the same image is used twice in the doc (or
# reused in another doc)?
shutil.copy(path_to_key, dest_asset_dir)
changed_envelope[new_path] = changed_envelope.pop(key)
final_body = str(soup.body)[6:-7]
return final_body, changed_envelope
|
nimbinatus/deconst-raml-preparer
|
ramlpreparer/builders/asset_mapper.py
|
Python
|
apache-2.0
| 1,682
|
from .cqsdk import *
|
moondropx/dogbot
|
dogbot/cqsdk/__init__.py
|
Python
|
apache-2.0
| 21
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
async web application.
'''
import logging; logging.basicConfig(level=logging.INFO)
import asyncio, os, json, time
from datetime import datetime
from aiohttp import web
from jinja2 import Environment, FileSystemLoader
from config import configs
import orm
from coroweb import add_routes, add_static
from handlers import cookie2user, COOKIE_NAME
def init_jinja2(app, **kw):
logging.info('init jinja2...')
options = dict(
autoescape = kw.get('autoescape', True),
block_start_string = kw.get('block_start_string', '{%'),
block_end_string = kw.get('block_end_string', '%}'),
variable_start_string = kw.get('variable_start_string', '{{'),
variable_end_string = kw.get('variable_end_string', '}}'),
auto_reload = kw.get('auto_reload', True)
)
path = kw.get('path', None)
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
logging.info('set jinja2 template path: %s' % path)
env = Environment(loader=FileSystemLoader(path), **options)
filters = kw.get('filters', None)
if filters is not None:
for name, f in filters.items():
env.filters[name] = f
app['__templating__'] = env
@asyncio.coroutine
def logger_factory(app, handler):
@asyncio.coroutine
def logger(request):
logging.info('Request: %s %s' % (request.method, request.path))
# yield from asyncio.sleep(0.3)
return (yield from handler(request))
return logger
@asyncio.coroutine
def auth_factory(app, handler):
@asyncio.coroutine
def auth(request):
logging.info('check user: %s %s' % (request.method, request.path))
request.__user__ = None
cookie_str = request.cookies.get(COOKIE_NAME)
if cookie_str:
user = yield from cookie2user(cookie_str)
if user:
logging.info('set current user: %s' % user.email)
request.__user__ = user
if request.path.startswith('/manage/') and (request.__user__ is None or not request.__user__.admin):
return web.HTTPFound('/signin')
return (yield from handler(request))
return auth
@asyncio.coroutine
def data_factory(app, handler):
@asyncio.coroutine
def parse_data(request):
if request.method == 'POST':
if request.content_type.startswith('application/json'):
request.__data__ = yield from request.json()
logging.info('request json: %s' % str(request.__data__))
elif request.content_type.startswith('application/x-www-form-urlencoded'):
request.__data__ = yield from request.post()
logging.info('request form: %s' % str(request.__data__))
return (yield from handler(request))
return parse_data
@asyncio.coroutine
def response_factory(app, handler):
@asyncio.coroutine
def response(request):
logging.info('Response handler...')
r = yield from handler(request)
if isinstance(r, web.StreamResponse):
return r
if isinstance(r, bytes):
resp = web.Response(body=r)
resp.content_type = 'application/octet-stream'
return resp
if isinstance(r, str):
if r.startswith('redirect:'):
return web.HTTPFound(r[9:])
resp = web.Response(body=r.encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r, dict):
template = r.get('__template__')
if template is None:
resp = web.Response(body=json.dumps(r, ensure_ascii=False, default=lambda o: o.__dict__).encode('utf-8'))
resp.content_type = 'application/json;charset=utf-8'
return resp
else:
r['__user__'] = request.__user__
resp = web.Response(body=app['__templating__'].get_template(template).render(**r).encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r, int) and t >= 100 and t < 600:
return web.Response(t)
if isinstance(r, tuple) and len(r) == 2:
t, m = r
if isinstance(t, int) and t >= 100 and t < 600:
return web.Response(t, str(m))
# default:
resp = web.Response(body=str(r).encode('utf-8'))
resp.content_type = 'text/plain;charset=utf-8'
return resp
return response
def datetime_filter(t):
delta = int(time.time() - t)
if delta < 60:
return u'1分钟前'
if delta < 3600:
return u'%s分钟前' % (delta // 60)
if delta < 86400:
return u'%s小时前' % (delta // 3600)
if delta < 604800:
return u'%s天前' % (delta // 86400)
dt = datetime.fromtimestamp(t)
return u'%s年%s月%s日' % (dt.year, dt.month, dt.day)
@asyncio.coroutine
def init(loop):
yield from orm.create_pool(loop=loop, **configs.db)
app = web.Application(loop=loop, middlewares=[
logger_factory, auth_factory, response_factory
])
init_jinja2(app, filters=dict(datetime=datetime_filter))
add_routes(app, 'handlers')
add_static(app)
srv = yield from loop.create_server(app.make_handler(), '127.0.0.1', 8888)
logging.info('server started at http://127.0.0.1:8888...')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
|
kitianFresh/awesome-python3-webapp
|
www/app.py
|
Python
|
apache-2.0
| 5,548
|
import numpy as np
from BDSpace.Coordinates import Cartesian, transforms
from BDSpace.Coordinates.transforms import unit_vector
from BDSpace.Curve import Line, Arc, Helix
def line_between_two_points(coordinate_system, point1, point2):
direction = point2 - point1
distance = np.sqrt(np.dot(direction, direction))
v = unit_vector(direction)
line_coordinate_system = Cartesian(basis=np.copy(coordinate_system.basis), origin=np.copy(coordinate_system.origin),
name='Line path coordinate system')
path = Line(name='Line Path', coordinate_system=line_coordinate_system,
origin=point1, a=v[0], b=v[1], c=v[2],
start=0, stop=distance)
return path
def helix_between_two_points(coordinate_system, point1, point2, radius=1, loops=1, right=True):
direction = point2 - point1
distance = np.sqrt(np.dot(direction, direction))
origin = coordinate_system.to_parent_vector(point1)
helix_coordinate_system = Cartesian(basis=np.copy(coordinate_system.basis), origin=np.copy(origin),
name='Helix coordinate system')
r_theta_phi = transforms.cartesian_to_spherical_point(direction)
helix_coordinate_system.rotate_axis_angle(np.array([0, 0, 1], dtype=np.double), r_theta_phi[2])
helix_coordinate_system.rotate_axis_angle(np.array([0, 1, 0], dtype=np.double), r_theta_phi[1])
pitch = distance / int(loops)
name = 'Right Helix' if right else 'Left Helix'
path = Helix(name=name, coordinate_system=helix_coordinate_system,
radius=radius, pitch=pitch, start=0, stop=np.pi * 2 * int(loops), right=right)
return path
def arc_between_two_points(coordinate_system, point1, point2, radius=1, right=True):
global_point = coordinate_system.to_parent(np.vstack((point1, point2)))
direction = point2 - point1
distance = np.sqrt(np.dot(direction, direction))
arc_coordinate_system = Cartesian(basis=np.copy(coordinate_system.basis), origin=np.copy(global_point[0]),
name='Arc coordinate_system')
r_theta_phi = transforms.cartesian_to_spherical_point(direction)
arc_coordinate_system.rotate_axis_angle(np.array([0, 0, 1], dtype=np.double), r_theta_phi[2])
arc_coordinate_system.rotate_axis_angle(np.array([0, 1, 0], dtype=np.double), r_theta_phi[1] + np.pi/2)
x_offset = -distance / 2
y_offset = np.sqrt(radius**2 - x_offset**2)
if right:
y_offset *= -1
arc_coordinate_system.origin = arc_coordinate_system.to_parent_vector(np.array([x_offset, y_offset, 0],
dtype=np.double))
local_point = arc_coordinate_system.to_local(global_point)
phi = transforms.cartesian_to_spherical(local_point)[:, 2]
if right:
start = phi[0]
stop = phi[1]
else:
start = 2 * np.pi - phi[0]
stop = 2 * np.pi - phi[1]
path = Arc(coordinate_system=arc_coordinate_system, a=radius, b=radius, start=start, stop=stop, right=right)
return path
|
bond-anton/Space
|
BDSpace/Pathfinder/__init__.py
|
Python
|
apache-2.0
| 3,109
|
# Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib.callbacks import events
from neutron_lib import fixture
from neutron.api.rpc.callbacks import resource_manager
from neutron.services.trunk import callbacks
from neutron.services.trunk import constants as trunk_consts
from neutron.services.trunk.rpc import backend
from neutron.tests import base
class ServerSideRpcBackendTest(base.BaseTestCase):
# TODO(fitoduarte): add more test to improve coverage of module
def setUp(self):
super(ServerSideRpcBackendTest, self).setUp()
self._mgr = mock.Mock()
self.useFixture(fixture.CallbackRegistryFixture(
callback_manager=self._mgr))
self.register_mock = mock.patch.object(
resource_manager.ResourceCallbacksManager, "register").start()
def test___init__(self,):
test_obj = backend.ServerSideRpcBackend()
calls = [mock.call(test_obj.process_event,
trunk_consts.TRUNK,
events.AFTER_CREATE),
mock.call(test_obj.process_event,
trunk_consts.TRUNK,
events.AFTER_DELETE),
mock.call(test_obj.process_event,
trunk_consts.SUBPORTS,
events.AFTER_CREATE),
mock.call(test_obj.process_event,
trunk_consts.SUBPORTS,
events.AFTER_DELETE)
]
self._mgr.subscribe.assert_has_calls(calls, any_order=True)
def test_process_event(self):
test_obj = backend.ServerSideRpcBackend()
test_obj._stub = mock_stub = mock.Mock()
trunk_plugin = mock.Mock()
test_obj.process_event(
trunk_consts.TRUNK, events.AFTER_CREATE, trunk_plugin,
callbacks.TrunkPayload("context",
"id",
current_trunk="current_trunk"))
test_obj.process_event(
trunk_consts.TRUNK, events.AFTER_DELETE, trunk_plugin,
callbacks.TrunkPayload("context",
"id",
original_trunk="original_trunk"))
calls = [mock.call.trunk_created("context",
"current_trunk"),
mock.call.trunk_deleted("context",
"original_trunk")]
mock_stub.assert_has_calls(calls, any_order=False)
|
eayunstack/neutron
|
neutron/tests/unit/services/trunk/rpc/test_backend.py
|
Python
|
apache-2.0
| 3,069
|
#!/usr/bin/python2.7
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PHP library generator.
This module generates a PHP service class from a given Google APIs Discovery
document. The generated PHP code is intended to be used by the Google
APIs PHP Client (http://code.google.com/p/google-api-php-client).
Features:
- Meta classes generated from the schema definition.
- Type hinting in setter methods and api service methods.
- ApiService classes that offer a PHP interface to the API.
- Proper PHPDoc comments for each class and method in the library (FUTURE).
"""
__author__ = 'chirags@google.com (Chirag Shah)'
import collections
import json
import operator
from googleapis.codegen import api
from googleapis.codegen import api_library_generator
from googleapis.codegen import data_types
from googleapis.codegen import language_model
from googleapis.codegen import utilities
from googleapis.codegen.schema import Schema
class PHPGenerator(api_library_generator.ApiLibraryGenerator):
"""The PHP code generator."""
def __init__(self, discovery, options=None):
"""Create a new PHPGenerator.
Args:
discovery: (dict) The discovery document dictionary.
options: (dict) A dictionary of options to guide the generator's behavior.
"""
super(PHPGenerator, self).__init__(PHPApi, discovery, 'php',
language_model=PhpLanguageModel(),
options=options)
def AnnotateResource(self, the_api, resource):
"""Add the discovery dictionary as data to each resource.
Override default implementation.
Prepend the resource class name to each sub-resource since PHP doesn't
support nested classes.
Args:
the_api: (Api) The API this Resource belongs to.
resource: (Resource) The Resource to annotate.
"""
resource.json = json.dumps(_StripResource(resource.raw))
# Escape stray quotes since it will be used in a PHP function call.
resource.json = resource.json.replace('\'', '\\\'').replace('{}', '')
for method in resource.values['methods']:
self.AnnotateMethod(the_api, method, resource)
if not resource.GetTemplateValue('phpPropName'):
resource.SetTemplateValue('phpPropName', resource.values['wireName'])
for r in resource.values['resources']:
r.values['className'] = (resource.values['className'] +
r.values['className'])
namespaced = '_'.join((resource.GetTemplateValue('phpPropName'),
r.values['wireName']))
namespaced = '_'.join(
(resource.GetTemplateValue('phpPropName'),
self.language_model.ToMemberName(r.values['wireName'], None)))
r.SetTemplateValue('phpPropName', namespaced)
self.AnnotateResource(the_api, r)
def AnnotateMethod(self, unused_api, method, resource=None):
"""Format service request method parameters.
Override default implementation.
Annotates each method parameter with a type hint if possible.
Adds the postBody parameter if there's a requestType parameter.
Generates a list of service parameters used by the client library.
Args:
unused_api: (Api) The API this Method belongs to.
method: (Method) The Method to annotate.
resource: (Resource) The Resource which owns this Method.
"""
for param in method.parameters:
self._SetTypeHint(param)
if method.parameters or method.values.get('requestType'):
method.SetTemplateValue('hasParams', True)
method.SetTemplateValue('name', self._ToMethodName(method.values, resource))
def AnnotateProperty(self, unused_api, prop, schema):
"""Annotate properties with a PHP type hint.
Overrides default implementation.
Args:
unused_api: (Api) The API this Property belongs to.
prop: (Property) The Property to annotate.
schema: (Schema) The Schema this Property belongs to.
"""
if isinstance(prop.data_type, data_types.ArrayDataType):
prop.SetTemplateValue('dataType', 'array')
schema.SetTemplateValue('dataType', 'array')
collection = self.language_model.ToMemberName(prop['wireName'], 0)
schema.SetTemplateValue('collectionKey', collection)
elif not schema.GetTemplateValue('dataType'):
schema.SetTemplateValue('dataType', 'object')
if isinstance(prop.data_type, data_types.MapDataType):
prop.SetTemplateValue('dataType', 'map')
if not prop.member_name_is_json_name:
schema.SetTemplateValue('has_gapi', True)
# add the prop name
prop_names = schema.values.get('propNames', [])
prop_names.append(prop.memberName)
schema.SetTemplateValue('propNames', prop_names)
self._SetTypeHint(prop)
def _GenerateLibrarySource(self, the_api, source_package_writer):
"""Default operations to generate the package.
Do all the default operations for generating a package.
1. Walk the template tree to generate the source.
2. Add in per-language additions to the source
3. Optionally copy in dependencies
4. (Side effect) Closes the source_package_writer.
Args:
the_api: (Api) The Api instance we are writing a libary for.
source_package_writer: (LibraryPackage) source output package.
"""
list_replacements = {
'___models_': ['model', the_api.ModelClasses()],
'___resources_': ['resource', the_api.ResourceClasses()],
'___topLevelModels_': ['model', the_api.TopLevelModelClasses()],
}
self.WalkTemplateTree('templates', self._path_replacements,
list_replacements,
self._top_level_defines, source_package_writer)
# Call back to the language specific generator to give it a chance to emit
# special case elements.
self.GenerateExtraSourceOutput(source_package_writer)
def _ToMethodName(self, method, resource):
"""Convert a wire format name into a suitable PHP variable name."""
s = method['wireName']
if resource and (s.lower() in PhpLanguageModel.PHP_KEYWORDS):
s += resource['className']
# Ensure dashes don't show up in method names
words = s.split('-')
return ''.join(words[:1] + [w.capitalize() for w in words[1:]])
def _SetTypeHint(self, prop):
"""Strip primitive types since PHP doesn't support primitive type hints."""
code_type = prop.code_type
if code_type and code_type.lower() in PhpLanguageModel.PHP_TYPES:
prop.values['typeHint'] = ''
prop.values['typeHintOld'] = ''
prop.values['typeHintFull'] = ''
if code_type.lower() in PhpLanguageModel.PHP_PRIMITIVE_TYPES:
prop.values['annotationType'] = code_type.lower()
else:
prop.values['typeHintOld'] = ('%s_%s' %
(self._api.values['owner'].title(),
code_type))
prop.values['typeHintFull'] = ('%s\\Service\\%s\\%s' %
(self._api.values['owner'].title(),
self._api.values['className'], code_type))
prop.values['typeHint'] = (code_type)
prop.values['annotationType'] = code_type
class PhpLanguageModel(language_model.LanguageModel):
"""A LanguageModel tunded for PHP."""
language = 'php'
_SCHEMA_TYPE_TO_PHP_TYPE = {
'any': 'array',
'boolean': 'bool',
'integer': 'int',
'long': 'string', # PHP doesn't support long integers.
'number': 'double',
'string': 'string',
'uint32': 'string', # PHP doesn't support unsigned integers.
'uint64': 'string', # PHP doesn't support unsigned integers.
'int32': 'int',
'int64': 'string', # Size of an integer is platform-dependent.
'double': 'double',
'float': 'float',
}
# Populated from http://php.net/manual/en/reserved.keywords.php
PHP_KEYWORDS = frozenset((
'abstract', 'and', 'array', 'as', 'break', 'call', 'callable',
'case', 'catch', 'cfunction', 'class', 'clone',
'const', 'continue', 'declare', 'default', 'do',
'else', 'elseif', 'empty', 'enddeclare', 'endfor', 'endforeach',
'endif', 'endswitch', 'endwhile', 'extends', 'final',
'finally', 'for', 'foreach', 'function', 'global', 'goto',
'if', 'implements', 'interface', 'instanceof', 'list',
'namespace', 'new', 'old_function', 'or', 'parent', 'private',
'protected', 'public', 'return', 'static', 'switch', 'throw',
'trait', 'try', 'unset', 'use', 'var', 'while', 'xor', 'yield',
))
PHP_TYPES = frozenset((
'bool', 'boolean', 'int', 'integer', 'file', 'float', 'double', 'string',
'array', 'object',
'null', 'resource',
))
PHP_PRIMITIVE_TYPES = frozenset((
'bool',
'int',
'float',
'string',
'array',
))
# We can not create classes which match a PHP keyword or built in object type.
RESERVED_CLASS_NAMES = PHP_KEYWORDS | PHP_TYPES
array_of_policy = language_model.NamingPolicy(format_string='{name}')
map_of_policy = language_model.NamingPolicy(format_string='{name}')
member_policy = language_model.NamingPolicy(
case_transform=language_model.LOWER_CAMEL_CASE)
getter_policy = language_model.NamingPolicy(
case_transform=language_model.UPPER_CAMEL_CASE,
format_string='get{name}')
setter_policy = language_model.NamingPolicy(
case_transform=language_model.UPPER_CAMEL_CASE,
format_string='set{name}')
def __init__(self):
super(PhpLanguageModel, self).__init__(class_name_delimiter='.')
def GetCodeTypeFromDictionary(self, def_dict):
"""Convert a json primitive type to a suitable PHP type name.
Overrides the default.
Args:
def_dict: (dict) A dictionary describing Json schema for this Property.
Returns:
A name suitable for use as a class in PHP.
"""
# Unique identifier for this schema.
json_type = def_dict.get('type', 'string')
# An additional regular expression or key that helps constrain the value.
# See: http://tools.ietf.org/html/draft-zyp-json-schema-03#section-5.23
json_format = def_dict.get('format')
# If we know what to do with this format type, then use native type.
php_type = (self._SCHEMA_TYPE_TO_PHP_TYPE.get(json_format)
or self._SCHEMA_TYPE_TO_PHP_TYPE.get(json_type, json_type))
return php_type
def ToMemberName(self, s, unused_api):
"""Convert a wire format name into a suitable PHP variable name.
Overrides the default.
Args:
s: (string) The wire format name of a member variable.
Returns:
A name suitable for use as a member in PHP.
"""
return s.replace('-', '_').replace('.', '_').replace('/', '__')
class PHPApi(api.Api):
"""An Api with PHP annotations."""
def __init__(self, discovery_doc, language=None):
super(PHPApi, self).__init__(discovery_doc, language)
# We've been advised by the open source office that the correct legal thing
# to do is to hard-code a copyright date. Generated clients should always
# display the year 2014, even if they are generated in subsequent years.
self.SetTemplateValue('copyright', 'Copyright 2014 Google Inc.\n')
# pylint: disable=unused-argument
# The parameter element_type is deliberately unused since PHP doesn't
# support nested classes.
def ToClassName(self, s, unused_element, element_type=None):
"""Convert a discovery name to a suitable PHP class name.
Overrides the default.
Args:
s: (string) The wire format name of a class.
unused_element: (object) The object we need a class name for.
element_type: (string) The kind of object we need a class name for.
Returns:
A name suitable for use as a class in PHP.
"""
if s.lower() in PhpLanguageModel.RESERVED_CLASS_NAMES:
# Prepend the service name.
return utilities.CamelCase(self.values['name']) + utilities.CamelCase(s)
return utilities.CamelCase(s).replace('&', '')
def ModelClasses(self):
"""Return all the model classes."""
ret = set(
s for s in self._schemas.itervalues()
if isinstance(s, Schema))
return sorted(ret, key=operator.attrgetter('class_name'))
def ResourceClasses(self, resources=None):
"""Return all the resource classes."""
if resources is None:
resources = self.values['resources']
all_resources = sorted(resources,
key=lambda resource: resource.values['className'])
for resource in resources:
all_resources.extend(self.ResourceClasses(resource.values['resources']))
return all_resources
def _BuildResourceDefinitions(self):
"""Loop over the resources in the discovery doc and build definitions."""
self._resources = []
def_dict = self.values.get('resources') or {}
for name in sorted(def_dict):
method_dict = def_dict[name].get('methods', {})
for n in method_dict:
# make sure all parameters are of type dict (and not list)
if not method_dict[n].get('parameters'):
method_dict[n]['parameters'] = {}
resource = api.Resource(self, name, def_dict[name], parent=self)
self._resources.append(resource)
# Properties that should be stripped when serializing parts of the
# discovery document.
_EXTRA_PROPERTIES = ['description', 'enumDescriptions', 'resources', 'pattern',
'parameterOrder']
def _StripResource(resource):
"""Return a copy of a resource dict with extraneous properties removed."""
if not isinstance(resource, dict):
return resource
ret = collections.OrderedDict()
for name, value in resource.iteritems():
if name not in _EXTRA_PROPERTIES:
ret[name] = _StripResource(value)
return ret
|
googleapis/google-api-php-client-services
|
generator/src/googleapis/codegen/php_generator.py
|
Python
|
apache-2.0
| 14,317
|
from __future__ import absolute_import, division, print_function
import numpy as np
import pytest
from xarray.core import dtypes
@pytest.mark.parametrize("args, expected", [
([np.bool], np.bool),
([np.bool, np.string_], np.object_),
([np.float32, np.float64], np.float64),
([np.float32, np.string_], np.object_),
([np.unicode_, np.int64], np.object_),
([np.unicode_, np.unicode_], np.unicode_),
([np.bytes_, np.unicode_], np.object_),
])
def test_result_type(args, expected):
actual = dtypes.result_type(*args)
assert actual == expected
def test_result_type_scalar():
actual = dtypes.result_type(np.arange(3, dtype=np.float32), np.nan)
assert actual == np.float32
def test_result_type_dask_array():
# verify it works without evaluating dask arrays
da = pytest.importorskip('dask.array')
dask = pytest.importorskip('dask')
def error():
raise RuntimeError
array = da.from_delayed(dask.delayed(error)(), (), np.float64)
with pytest.raises(RuntimeError):
array.compute()
actual = dtypes.result_type(array)
assert actual == np.float64
# note that this differs from the behavior for scalar numpy arrays, which
# would get promoted to float32
actual = dtypes.result_type(array, np.array([0.5, 1.0], dtype=np.float32))
assert actual == np.float64
@pytest.mark.parametrize('obj', [1.0, np.inf, 'ab', 1.0 + 1.0j, True])
def test_inf(obj):
assert dtypes.INF > obj
assert dtypes.NINF < obj
|
jcmgray/xarray
|
xarray/tests/test_dtypes.py
|
Python
|
apache-2.0
| 1,508
|
import io
import json
import zipfile
from functools import cached_property
from typing import Callable, Dict, KeysView, List, NamedTuple, Set, Union
import requests
from django.contrib import messages
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import ObjectDoesNotExist, SuspiciousOperation
from django.core.files import File
from django.core.paginator import Paginator
from django.db.models import Prefetch
from django.http import (
FileResponse,
HttpResponse,
HttpResponseRedirect,
JsonResponse,
StreamingHttpResponse,
)
from django.shortcuts import redirect, reverse
from django.views import generic
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.list import MultipleObjectMixin
from accounts.utils import (
hash_child_id,
hash_demographic_id,
hash_id,
hash_participant_id,
)
from exp.utils import (
RESPONSE_PAGE_SIZE,
csv_dict_output_and_writer,
csv_namedtuple_writer,
flatten_dict,
round_age,
round_ages_from_birthdays,
study_name_for_files,
)
from exp.views.mixins import (
CanViewStudyResponsesMixin,
ResearcherLoginRequiredMixin,
SingleObjectFetchProtocol,
StudyLookupMixin,
)
from studies.models import Feedback, Response, Study, Video
from studies.permissions import StudyPermission
from studies.queries import (
get_consent_statistics,
get_responses_with_current_rulings_and_videos,
)
from studies.tasks import build_framedata_dict, build_zipfile_of_videos
class ResponseDataColumn(NamedTuple):
# id: Unique key to identify data. Used as CSV column header and any portion before __ is used to create a
# sub-dictionary for JSON data.
id: str
description: str # Description for data dictionary
extractor: Callable[
[Union[Response, Dict]], Union[str, List]
] # Function to extract value from response instance or dict
optional: bool = False # is a column the user checks a box to include?
name: str = "" # used in template form for optional columns
include_by_default: bool = False # whether to initially check checkbox for field
identifiable: bool = False # used to determine filename signaling
# Columns for response downloads. Extractor functions expect Response instance
RESPONSE_COLUMNS = [
ResponseDataColumn(
id="response__id",
description="Short ID for this response",
extractor=lambda resp: str(resp.id),
name="Response ID",
),
ResponseDataColumn(
id="response__uuid",
description="Unique identifier for response. Can be used to match data to video filenames.",
extractor=lambda resp: str(resp.uuid),
name="Response UUID",
),
ResponseDataColumn(
id="response__date_created",
description="Timestamp for when participant began session, in format e.g. 2019-11-07 17:13:38.702958+00:00",
extractor=lambda resp: str(resp.date_created),
name="Date created",
),
ResponseDataColumn(
id="response__completed",
description=(
"Whether the participant submitted the exit survey; depending on study criteria, this may not align "
"with whether the session is considered complete. E.g., participant may have left early but submitted "
"exit survey, or may have completed all test trials but not exit survey."
),
extractor=lambda resp: resp.completed,
name="Completed",
),
ResponseDataColumn(
id="response__withdrawn",
description=(
"Whether the participant withdrew permission for viewing/use of study video beyond consent video. If "
"true, video will not be available and must not be used."
),
extractor=lambda resp: resp.withdrawn,
name="Withdrawn",
),
ResponseDataColumn(
id="response__parent_feedback",
description=(
"Freeform parent feedback entered into the exit survey, if any. This field may incidentally contain "
"identifying or sensitive information depending on what parents say, so it should be scrubbed or "
"omitted from published data."
),
extractor=lambda resp: resp.parent_feedback,
name="Parent feedback",
),
ResponseDataColumn(
id="response__birthdate_difference",
description=(
"Difference between birthdate entered in exit survey, if any, and birthdate of registered child "
"participating. Positive values mean that the birthdate from the exit survey is LATER. Blank if "
"no birthdate available from the exit survey."
),
extractor=lambda resp: resp.birthdate_difference,
name="Birthdate difference",
),
ResponseDataColumn(
id="response__video_privacy",
description=(
"Privacy level for videos selected during the exit survey, if the parent completed the exit survey. "
"Possible levels are 'private' (only people listed on your IRB protocol can view), 'scientific' "
"(can share for scientific/educational purposes), and 'public' (can also share for publicity). "
"In no cases may videos be shared for commercial purposes. If this is missing (e.g., family stopped "
"just after the consent form and did not complete the exit survey), you must treat the video as "
"private."
),
extractor=lambda resp: resp.privacy,
name="Video privacy level",
),
ResponseDataColumn(
id="response__databrary",
description=(
"Whether the parent agreed to share video data on Databrary - 'yes' or 'no'. If missing, you must "
"treat the video as if 'no' were selected. If 'yes', the video privacy selections also apply to "
"authorized Databrary users."
),
extractor=lambda resp: resp.databrary,
name="Databrary sharing",
),
ResponseDataColumn(
id="response__is_preview",
description=(
"Whether this response was generated by a researcher previewing the experiment. Preview data should "
"not be used in any actual analyses."
),
extractor=lambda resp: resp.is_preview,
name="Preview",
),
ResponseDataColumn(
id="consent__ruling",
description=(
"Most recent consent video ruling: one of 'accepted' (consent has been reviewed and judged to indidate "
"informed consent), 'rejected' (consent has been reviewed and judged not to indicate informed "
"consent -- e.g., video missing or parent did not read statement), or 'pending' (no current judgement, "
"e.g. has not been reviewed yet or waiting on parent email response')"
),
extractor=lambda resp: resp.most_recent_ruling,
),
ResponseDataColumn(
id="consent__arbiter",
description="Name associated with researcher account that made the most recent consent ruling",
extractor=lambda resp: resp.most_recent_ruling_arbiter,
),
ResponseDataColumn(
id="consent__time",
description="Timestamp of most recent consent ruling, format e.g. 2019-12-09 20:40",
extractor=lambda resp: resp.most_recent_ruling_date,
),
ResponseDataColumn(
id="consent__comment",
description=(
"Comment associated with most recent consent ruling (may be used to track e.g. any cases where consent "
"was confirmed by email)"
),
extractor=lambda resp: resp.most_recent_ruling_comment,
),
ResponseDataColumn(
id="consent__time",
description="Timestamp of most recent consent ruling, format e.g. 2019-12-09 20:40",
extractor=lambda resp: resp.most_recent_ruling_date,
),
ResponseDataColumn(
id="study__uuid",
description="Unique identifier of study associated with this response. Same for all responses to a given Lookit study.",
extractor=lambda resp: str(resp.study.uuid),
),
ResponseDataColumn(
id="participant__global_id",
description=(
"Unique identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, and across different studies. MUST BE REDACTED FOR "
"PUBLICATION because this allows identification of families across different published studies, which "
"may have unintended privacy consequences. Researchers can use this ID to match participants across "
"studies (subject to their own IRB review), but would need to generate their own random participant "
"IDs for publication in that case. Use participant_hashed_id as a publication-safe alternative if "
"only analyzing data from one Lookit study."
),
extractor=lambda resp: str(resp.child.user.uuid),
optional=True,
name="Parent global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="participant__hashed_id",
description=(
"Identifier for family account associated with this response. Will be the same for multiple responses "
"from a child and for siblings, but is unique to this study. This may be published directly."
),
extractor=lambda resp: hash_id(
resp.child.user.uuid,
resp.study.uuid,
resp.study.salt,
resp.study.hash_digits,
),
name="Parent ID",
),
ResponseDataColumn(
id="participant__nickname",
description=(
"Nickname associated with the family account for this response - generally the mom or dad's name. "
"Must be redacted for publication."
),
extractor=lambda resp: resp.child.user.nickname,
optional=True,
name="Parent name",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__global_id",
description=(
"Primary unique identifier for the child associated with this response. Will be the same for multiple "
"responses from one child, even across different Lookit studies. MUST BE REDACTED FOR PUBLICATION "
"because this allows identification of children across different published studies, which may have "
"unintended privacy consequences. Researchers can use this ID to match participants across studies "
"(subject to their own IRB review), but would need to generate their own random participant IDs for "
"publication in that case. Use child_hashed_id as a publication-safe alternative if only analyzing "
"data from one Lookit study."
),
extractor=lambda resp: str(resp.child.uuid),
optional=True,
name="Child global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__hashed_id",
description=(
"Identifier for child associated with this response. Will be the same for multiple responses from a "
"child, but is unique to this study. This may be published directly."
),
extractor=lambda resp: hash_id(
resp.child.uuid, resp.study.uuid, resp.study.salt, resp.study.hash_digits
),
name="Child ID",
),
ResponseDataColumn(
id="child__name",
description=(
"Nickname for the child associated with this response. Not necessarily a real name (we encourage "
"initials, nicknames, etc. if parents aren't comfortable providing a name) but must be redacted for "
"publication of data."
),
extractor=lambda resp: resp.child.given_name,
optional=True,
name="Child name",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__birthday",
description=(
"Birthdate of child associated with this response. Must be redacted for publication of data (switch to "
"age at time of participation; either use rounded age, jitter the age, or redact timestamps of "
"participation)."
),
extractor=lambda resp: resp.child.birthday,
optional=True,
name="Birthdate",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__age_in_days",
description=(
"Age in days at time of response of child associated with this response, exact. This can be used in "
"conjunction with timestamps to calculate the child's birthdate, so must be jittered or redacted prior "
"to publication unless no timestamp information is shared."
),
extractor=lambda resp: (resp.date_created.date() - resp.child.birthday).days,
optional=True,
name="Age in days",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__age_rounded",
description=(
"Age in days at time of response of child associated with this response, rounded to the nearest 10 "
"days if under 1 year old and to the nearest 30 days if over 1 year old. May be published; however, if "
"you have more than a few sessions per participant it would be possible to infer the exact age in days "
"(and therefore birthdate) with some effort. In this case you might consider directly jittering "
"birthdates."
),
extractor=lambda resp: str(
round_age(int((resp.date_created.date() - resp.child.birthday).days))
)
if (resp.date_created and resp.child.birthday)
else "",
optional=True,
name="Rounded age",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__gender",
description=(
"Parent-identified gender of child, one of 'm' (male), 'f' (female), 'o' (other), or 'na' (prefer not "
"to answer)"
),
extractor=lambda resp: resp.child.gender,
optional=True,
name="Child gender",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__age_at_birth",
description=(
"Gestational age at birth in weeks. One of '40 or more weeks', '39 weeks' through '24 weeks', "
"'Under 24 weeks', or 'Not sure or prefer not to answer'"
),
extractor=lambda resp: resp.child.age_at_birth,
optional=True,
name="Child gestational age",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__language_list",
description="List of languages spoken (using language codes in Lookit docs), separated by spaces",
extractor=lambda resp: resp.child.language_list,
optional=True,
name="Child languages",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__condition_list",
description="List of child characteristics (using condition/characteristic codes in Lookit docs), separated by spaces",
extractor=lambda resp: resp.child.condition_list,
optional=True,
name="Child conditions",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__additional_information",
description=(
"Free response 'anything else you'd like us to know' field on child registration form for child "
"associated with this response. Should be redacted or reviewed prior to publication as it may include "
"names or other identifying information."
),
extractor=lambda resp: resp.child.additional_information,
optional=True,
name="Child additional information",
include_by_default=True,
identifiable=True,
),
ResponseDataColumn(
id="response__sequence",
description=(
"Each response_sequence.N field (response_sequence.0, response_sequence.1, etc.) gives the ID of the "
"Nth frame displayed during the session associated with this response. Responses may have different "
"sequences due to randomization or if a participant leaves early."
),
extractor=lambda resp: resp.sequence,
name="Response sequence",
),
ResponseDataColumn(
id="response__conditions",
description=(
"RESEARCHERS: EXPAND THIS SECTION BASED ON YOUR INDIVIDUAL STUDY. Each set of "
"response_conditions.N.(...) fields give information about condition assignment during a particular "
"frame of this study. response_conditions.0.frameName is the frame ID (corresponding to a value in "
"response_sequence) where the randomization occurred. Additional fields such as "
"response_conditions.0.conditionNum depend on the specific randomizer frames used in this study."
),
extractor=lambda resp: [
{**{"frameName": cond_frame}, **conds}
for (cond_frame, conds) in resp.conditions.items()
],
),
]
# Columns for demographic data downloads. Extractor functions expect Response values dict,
# rather than instance.
DEMOGRAPHIC_COLUMNS = [
ResponseDataColumn(
id="response__uuid",
description=(
"Primary unique identifier for response. Can be used to match demographic data to response data "
"and video filenames; must be redacted prior to publication if videos are also published."
),
extractor=lambda resp: str(resp["uuid"]),
name="Response UUID",
),
ResponseDataColumn(
id="participant__global_id",
description=(
"Unique identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, and across different studies. MUST BE REDACTED FOR "
"PUBLICATION because this allows identification of families across different published studies, "
"which may have unintended privacy consequences. Researchers can use this ID to match participants "
"across studies (subject to their own IRB review), but would need to generate their own random "
"participant IDs for publication in that case. Use participant__hashed_id as a publication-safe "
"alternative if only analyzing data from one Lookit study."
),
extractor=lambda resp: str(resp["child__user__uuid"]),
optional=True,
name="Parent global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="participant__hashed_id",
description=(
"Identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, but is unique to this study. This may be published "
"directly."
),
extractor=lambda resp: hash_participant_id(resp),
name="Participant ID",
),
ResponseDataColumn(
id="demographic__hashed_id",
description=(
"Identifier for this demographic snapshot. Changes upon updates to the demographic form, "
"so may vary within the same participant across responses."
),
extractor=lambda resp: hash_demographic_id(resp),
name="Demographic ID",
),
ResponseDataColumn(
id="demographic__date_created",
description=(
"Timestamp of creation of the demographic snapshot associated with this response, in format e.g. "
"2019-10-02 21:39:03.713283+00:00"
),
extractor=lambda resp: str(resp["demographic_snapshot__created_at"]),
name="Date created",
),
ResponseDataColumn(
id="demographic__number_of_children",
description="Response to 'How many children do you have?'; options 0-10 or >10 (More than 10)",
extractor=lambda resp: resp["demographic_snapshot__number_of_children"],
name="Number of children",
),
ResponseDataColumn(
id="demographic__child_rounded_ages",
description=(
"List of rounded ages based on child birthdays entered in demographic form (not based on children "
"registered). Ages are at time of response for this row, in days, rounded to nearest 10 for ages "
"under 1 year and nearest 30 otherwise. In format e.g. [60, 390]"
),
extractor=lambda resp: round_ages_from_birthdays(
resp["demographic_snapshot__child_birthdays"], resp["date_created"]
),
name="Child ages rounded",
),
ResponseDataColumn(
id="demographic__languages_spoken_at_home",
description="Freeform response to 'What language(s) does your family speak at home?'",
extractor=lambda resp: resp["demographic_snapshot__languages_spoken_at_home"],
name="Languages spoken at home",
),
ResponseDataColumn(
id="demographic__number_of_guardians",
description="Response to 'How many parents/guardians do your children live with?' - 1, 2, 3> [3 or more], varies",
extractor=lambda resp: resp["demographic_snapshot__number_of_guardians"],
name="Number of guardians",
),
ResponseDataColumn(
id="demographic__number_of_guardians_explanation",
description=(
"Freeform response to 'If the answer varies due to shared custody arrangements or travel, please "
"enter the number of parents/guardians your children are usually living with or explain.'"
),
extractor=lambda resp: resp[
"demographic_snapshot__number_of_guardians_explanation"
],
name="Number of guardians explanation",
),
ResponseDataColumn(
id="demographic__race_identification",
description=(
"Comma-separated list of all values checked for question 'What category(ies) does your family "
"identify as?', from list: White; Hispanic, Latino, or Spanish origin; Black or African American; "
"Asian; American Indian or Alaska Native; Middle Eastern or North African; Native Hawaiian or "
"Other Pacific Islander; Another race, ethnicity, or origin"
),
extractor=lambda resp: resp["demographic_snapshot__race_identification"],
name="Race",
),
ResponseDataColumn(
id="demographic__parent_age",
description=(
"Parent's response to question 'What is your age?'; options are <18, 18-21, 22-24, 25-29, 30-34, "
"35-39, 40-44, 45-49, 50s, 60s, >70"
),
extractor=lambda resp: resp["demographic_snapshot__age"],
name="Parent age",
),
ResponseDataColumn(
id="demographic__parent_gender",
description=(
"Parent's response to question 'What is your gender?'; options are m [male], f [female], o "
"[other], na [prefer not to answer]"
),
extractor=lambda resp: resp["demographic_snapshot__gender"],
name="Parent age",
),
ResponseDataColumn(
id="demographic__education_level",
description=(
"Parent's response to question 'What is the highest level of education you've completed?'; options "
"are some [some or attending high school], hs [high school diploma or GED], col [some or attending "
"college], assoc [2-year college degree], bach [4-year college degree], grad [some or attending "
"graduate or professional school], prof [graduate or professional degree]"
),
extractor=lambda resp: resp["demographic_snapshot__education_level"],
name="Parent education level",
),
ResponseDataColumn(
id="demographic__spouse_education_level",
description=(
"Parent's response to question 'What is the highest level of education your spouse has "
"completed?'; options are some [some or attending high school], hs [high school diploma or GED], "
"col [some or attending college], assoc [2-year college degree], bach [4-year college degree], "
"grad [some or attending graduate or professional school], prof [graduate or professional degree], "
"na [not applicable - no spouse or partner]"
),
extractor=lambda resp: resp["demographic_snapshot__spouse_education_level"],
name="Parent education level",
),
ResponseDataColumn(
id="demographic__annual_income",
description=(
"Parent's response to question 'What is your approximate family yearly income (in US dollars)?'; "
"options are 0, 5000, 10000, 15000, 20000-19000 in increments of 10000, >200000, or na [prefer not "
"to answer]"
),
extractor=lambda resp: resp["demographic_snapshot__annual_income"],
name="Annual income",
),
ResponseDataColumn(
id="demographic__number_of_books",
description="Parent's response to question 'About how many children's books are there in your home?'; integer",
extractor=lambda resp: resp["demographic_snapshot__number_of_books"],
name="Number of books",
),
ResponseDataColumn(
id="demographic__additional_comments",
description="Parent's freeform response to question 'Anything else you'd like us to know?'",
extractor=lambda resp: resp["demographic_snapshot__additional_comments"],
name="Additional comments",
),
ResponseDataColumn(
id="demographic__country",
description="Parent's response to question 'What country do you live in?'; 2-letter country code",
extractor=lambda resp: resp["demographic_snapshot__country"],
name="Country code",
),
ResponseDataColumn(
id="demographic__state",
description=(
"Parent's response to question 'What state do you live in?' if country is US; 2-letter state "
"abbreviation"
),
extractor=lambda resp: resp["demographic_snapshot__state"],
name="US State",
),
ResponseDataColumn(
id="demographic__density",
description=(
"Parent's response to question 'How would you describe the area where you live?'; options are "
"urban, suburban, rural"
),
extractor=lambda resp: resp["demographic_snapshot__density"],
name="Density",
),
ResponseDataColumn(
id="demographic__lookit_referrer",
description="Parent's freeform response to question 'How did you hear about Lookit?'",
extractor=lambda resp: resp["demographic_snapshot__lookit_referrer"],
name="How you heard about Lookit",
),
]
# Which headers from the response data summary should go in the child data downloads
CHILD_CSV_HEADERS = [
col.id
for col in RESPONSE_COLUMNS
if col.id.startswith("child__") or col.id.startswith("participant__")
]
IDENTIFIABLE_DATA_HEADERS = {col.id for col in RESPONSE_COLUMNS if col.identifiable}
def get_response_headers(
selected_header_ids: Union[Set, List],
all_available_header_ids: Union[Set, KeysView],
) -> List:
"""Get ordered list of response headers for download.
Select and order the appropriate headers to include in a file download, based on
which optional headers are selected and which headers are available.
Args:
selected_header_ids: which optional headers to include (corresponding to id values in
RESPONSE_COLUMNS). Headers that are specified as optional in RESPONSE_COLUMNS will
only be included if listed in selected_header_ids.
all_available_header_ids: all header ids we have data for. Any header ids that are in
this set but not in RESPONSE_COLUMNS will be added to the end of the output list.
Returns:
List of headers to include, consisting of the following in order:
1) Headers in RESPONSE_COLUMNS, in order, omitting any that are optional and were not selected
2) Extra headers from all_available_header_ids not included in (1), in alpha order
"""
unselected_optional_ids = {
col.id
for col in RESPONSE_COLUMNS
if col.optional and col.id not in selected_header_ids
}
selected_standard_header_ids = [
col.id
for col in RESPONSE_COLUMNS[0:-2]
if col.id not in unselected_optional_ids
]
return selected_standard_header_ids + sorted(
list(
all_available_header_ids
- set(selected_standard_header_ids)
- unselected_optional_ids
)
)
def get_demographic_headers(selected_header_ids=None) -> List[str]:
"""Get ordered list of demographic headers for download.
Args:
selected_header_ids(set or list): which optional headers to include (corresponding
to id values in DEMOGRAPHIC_COLUMNS).
Returns:
Ordered list of headers to include in download
Headers are id values from DEMOGRAPHIC_COLUMNS in order, omitting any that are optional
and were not included in selected_header_ids.
"""
if selected_header_ids is None:
selected_header_ids = {}
return [
col.id
for col in DEMOGRAPHIC_COLUMNS
if col.id in selected_header_ids or not col.optional
]
def construct_response_dictionary(
resp, columns, optional_headers, include_exp_data=True
):
if optional_headers is None:
optional_headers = {}
resp_dict = {}
for col in columns:
if col.id in optional_headers or not col.optional:
try:
object_name, field_name = col.id.split("__")
if object_name in resp_dict:
resp_dict[object_name][field_name] = col.extractor(resp)
else:
resp_dict[object_name] = {field_name: col.extractor(resp)}
except ValueError:
resp_dict[col.id] = col.extractor(resp)
# Include exp_data field in dictionary?
if include_exp_data:
resp_dict["exp_data"] = resp.exp_data
return resp_dict
class FrameDataRow(NamedTuple):
response_uuid: str
child_hashed_id: str
frame_id: str
event_number: str
key: str
value: str
FRAME_DATA_HEADER_DESCRIPTIONS = {
"response_uuid": "Unique identifier for this response; can be matched to summary data and video filenames",
"child_hashed_id": (
"Hashed identifier for the child associated with this response; can be matched to summary data "
"child_hashed_id. This random ID may be published directly; it is specific to this study. If you "
"need to match children across multiple studies, use the child_global_id."
),
"frame_id": (
"Identifier for the particular frame responsible for this data; matches up to an element in the "
"response_sequence in the summary data file"
),
"event_number": (
"Index of the event responsible for this data, if this is an event. Indexes start from 0 within each "
"frame (and within global data) within each response. Blank for non-event data."
),
"key": "Label for a piece of data collected during this frame - for example, 'formData.child_favorite_animal'",
"value": "Value of the data associated with this key (of the indexed event if applicable) - for example, 'giraffe'",
}
def get_frame_data(resp: Union[Response, Dict]) -> List[FrameDataRow]:
"""Get list of data stored in response's exp_data and global_event_timings fields.
Args:
resp(Response or dict): response data to process. If dict, must contain fields
child__uuid, study__uuid, study__salt, study__hash_digits, uuid, exp_data, and
global_event_timings.
Returns:
List of FrameDataRows each representing a single piece of data from global_event_timings or
exp_data. Descriptions of each field of the FrameDataRow are given in FRAME_DATA_HEADER_DESCRIPTIONS.
"""
if type(resp) is not dict:
resp = {
"child__uuid": resp.child.uuid,
"study__uuid": resp.study.uuid,
"study__salt": resp.study.salt,
"study__hash_digits": resp.study.hash_digits,
"uuid": resp.uuid,
"exp_data": resp.exp_data,
"global_event_timings": resp.global_event_timings,
}
frame_data_tuples = []
child_hashed_id = hash_id(
resp["child__uuid"],
resp["study__uuid"],
resp["study__salt"],
resp["study__hash_digits"],
)
# First add all of the global event timings as events with frame_id "global"
for (iEvent, event) in enumerate(resp["global_event_timings"]):
for (key, value) in event.items():
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id="global",
key=key,
event_number=str(iEvent),
value=value,
)
)
# Next add all data in exp_data
event_prefix = "eventTimings."
for frame_id, frame_data in resp["exp_data"].items():
for (key, value) in flatten_dict(frame_data).items():
# Process event data separately and include event_number within frame
if key.startswith(event_prefix):
key_pieces = key.split(".")
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id=frame_id,
key=".".join(key_pieces[2:]),
event_number=str(key_pieces[1]),
value=value,
)
)
# omit frameType values from CSV
elif key == "frameType":
continue
# Omit the DOB from any exit survey
elif key == "birthDate" and frame_data.get("frameType", None) == "EXIT":
continue
# Omit empty generatedProperties values from CSV
elif key == "generatedProperties" and not value:
continue
# For all other data, create a regular entry with frame_id and no event #
else:
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id=frame_id,
key=key,
event_number="",
value=value,
)
)
return frame_data_tuples
def build_framedata_dict_csv(writer, responses):
response_paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
unique_frame_ids = set()
event_keys = set()
unique_frame_keys_dict = {}
for page_num in response_paginator.page_range:
page_of_responses = response_paginator.page(page_num)
for resp in page_of_responses:
this_resp_data = get_frame_data(resp)
these_ids = {
d.frame_id.partition("-")[2]
for d in this_resp_data
if not d.frame_id == "global"
}
event_keys = event_keys | {
d.key for d in this_resp_data if d.event_number != ""
}
unique_frame_ids = unique_frame_ids | these_ids
for frame_id in these_ids:
these_keys = {
d.key
for d in this_resp_data
if d.frame_id.partition("-")[2] == frame_id and d.event_number == ""
}
if frame_id in unique_frame_keys_dict:
unique_frame_keys_dict[frame_id] = (
unique_frame_keys_dict[frame_id] | these_keys
)
else:
unique_frame_keys_dict[frame_id] = these_keys
# Start with general descriptions of high-level headers (child_id, response_id, etc.)
writer.writerows(
[
{"column": header, "description": description}
for (header, description) in FRAME_DATA_HEADER_DESCRIPTIONS.items()
]
)
writer.writerow(
{
"possible_frame_id": "global",
"frame_description": "Data not associated with a particular frame",
}
)
# Add placeholders to describe each frame type
unique_frame_ids = sorted(list(unique_frame_ids))
for frame_id in unique_frame_ids:
writer.writerow(
{
"possible_frame_id": "*-" + frame_id,
"frame_description": "RESEARCHER: INSERT FRAME DESCRIPTION",
}
)
unique_frame_keys = sorted(list(unique_frame_keys_dict[frame_id]))
for k in unique_frame_keys:
writer.writerow(
{
"possible_frame_id": "*-" + frame_id,
"possible_key": k,
"key_description": "RESEARCHER: INSERT DESCRIPTION OF WHAT THIS KEY MEANS IN THIS FRAME",
}
)
event_keys = sorted(list(event_keys))
event_key_stock_descriptions = {
"eventType": (
"Descriptor for this event; determines what other data is available. Global event 'exitEarly' records "
"cases where the participant attempted to exit the study early by closing the tab/window or pressing F1 "
"or ctrl-X. RESEARCHER: INSERT DESCRIPTIONS OF PARTICULAR EVENTTYPES USED IN YOUR STUDY. (Note: you can "
"find a list of events recorded by each frame in the frame documentation at "
"https://lookit.github.io/ember-lookit-frameplayer, under the Events header.)"
),
"exitType": (
"Used in the global event exitEarly. Only value stored at this point is 'browserNavigationAttempt'"
),
"lastPageSeen": (
"Used in the global event exitEarly. Index of the frame the participant was on before exit attempt."
),
"pipeId": (
"Recorded by any event in a video-capture-equipped frame. Internal video ID used by Pipe service; only "
"useful for troubleshooting in rare cases."
),
"streamTime": (
"Recorded by any event in a video-capture-equipped frame. Indicates time within webcam "
"video (videoId) to nearest 0.1 second. If recording has not started yet, may be 0 or null."
),
"timestamp": "Recorded by all events. Timestamp of event in format e.g. 2019-11-07T17:14:43.626Z",
"videoId": (
"Recorded by any event in a video-capture-equipped frame. Filename (without .mp4 extension) of video "
"currently being recorded."
),
}
for k in event_keys:
writer.writerow(
{
"possible_frame_id": "any (event data)",
"possible_key": k,
"key_description": event_key_stock_descriptions.get(
k, "RESEARCHER: INSERT DESCRIPTION OF WHAT THIS EVENT KEY MEANS"
),
}
)
def build_single_response_framedata_csv(response):
"""
Builds CSV file contents for frame-level data from a single response. Used for both
building zip archive of all response data & offering individual-file downloads on individual responses view.
"""
this_resp_data = get_frame_data(response)
output, writer = csv_namedtuple_writer(FrameDataRow)
writer.writerows(this_resp_data)
return output.getvalue()
class ResponseDownloadMixin(CanViewStudyResponsesMixin, MultipleObjectMixin):
model = Response
paginate_by = 10
ordering = "id"
def get_queryset(self):
study = self.study
return study.responses_for_researcher(self.request.user).order_by(
self.get_ordering()
)
class DemographicDownloadMixin(CanViewStudyResponsesMixin, MultipleObjectMixin):
model = Response
paginate_by = 10
ordering = "id"
def get_queryset(self):
study = self.study
return (
study.responses_for_researcher(self.request.user)
.order_by(self.get_ordering())
.select_related("child", "child__user", "study", "demographic_snapshot")
.values(
"uuid",
"date_created",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
"demographic_snapshot__uuid",
"demographic_snapshot__created_at",
"demographic_snapshot__number_of_children",
"demographic_snapshot__child_birthdays",
"demographic_snapshot__languages_spoken_at_home",
"demographic_snapshot__number_of_guardians",
"demographic_snapshot__number_of_guardians_explanation",
"demographic_snapshot__race_identification",
"demographic_snapshot__age",
"demographic_snapshot__gender",
"demographic_snapshot__education_level",
"demographic_snapshot__spouse_education_level",
"demographic_snapshot__annual_income",
"demographic_snapshot__number_of_books",
"demographic_snapshot__additional_comments",
"demographic_snapshot__country",
"demographic_snapshot__state",
"demographic_snapshot__density",
"demographic_snapshot__lookit_referrer",
"demographic_snapshot__extra",
)
)
class StudyResponsesList(ResponseDownloadMixin, generic.ListView):
"""
View to display a list of study responses.
"""
template_name = "studies/study_responses.html"
def get_ordering(self):
"""
Determine sort field and order. Sorting on id actually sorts on child id, not response id.
Sorting on status, actually sorts on 'completed' field, where we are alphabetizing
"in progress" and "completed"
"""
orderby = self.request.GET.get("sort", "id")
return orderby.replace("id", "child__id").replace("status", "completed")
def get_queryset(self):
return (
super()
.get_queryset()
.prefetch_related(
"consent_rulings__arbiter",
Prefetch(
"feedback",
queryset=Feedback.objects.select_related("researcher").order_by(
"-id"
),
),
)
)
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary. Study results
are paginated.
"""
context = super().get_context_data(**kwargs)
context["study"] = study = self.study
paginated_responses = context["object_list"]
columns_included_in_summary = [
"response__id",
"response__uuid",
"response__date_created",
"response__completed",
"response__withdrawn",
"response__parent_feedback",
"response__birthdate_difference",
"response__video_privacy",
"response__databrary",
"response__is_preview",
"response__sequence",
"participant__global_id",
"participant__hashed_id",
"participant__nickname",
"child__global_id",
"child__hashed_id",
"child__name",
"child__age_rounded",
"child__gender",
"child__age_at_birth",
"child__language_list",
"child__condition_list",
"child__additional_information",
]
columns_included_in_table = [
"child__hashed_id",
"response__uuid",
"response__id",
"response__status",
"response__completed",
"response__is_preview",
]
response_data = []
for resp in paginated_responses:
# Info needed for table display of individual responses
this_resp_data = {
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in columns_included_in_table
}
# Exception - store actual date object for date created
this_resp_data["response__date_created"] = resp.date_created
# info needed for summary table shown at right
this_resp_data["summary"] = [
{
"name": col.name,
"value": col.extractor(resp),
"description": col.description,
}
for col in RESPONSE_COLUMNS
if col.id in columns_included_in_summary
]
this_resp_data["videos"] = resp.videos.values("pk", "full_name")
for v in this_resp_data["videos"]:
v["display_name"] = (
v["full_name"]
.replace("videoStream_{}_".format(study.uuid), "...")
.replace("_{}_".format(resp.uuid), "...")
)
response_data.append(this_resp_data)
context["response_data"] = response_data
context["data_options"] = [col for col in RESPONSE_COLUMNS if col.optional]
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
context["can_edit_feedback"] = self.request.user.has_study_perms(
StudyPermission.EDIT_STUDY_FEEDBACK, context["study"]
)
return context
def build_video_display_name(self, study_uuid, response_uuid, vid_name):
"""
Strips study_uuid and response_uuid out of video responses titles for better display.
"""
return ". . ." + ". . .".join(
vid_name.split(study_uuid + "_")[1].split("_" + response_uuid + "_")
)
class StudySingleResponseDownload(ResponseDownloadMixin, View):
"""
Download a single study response in the selected format with selected headers.
"""
def get(self, *args, **kwargs):
data_type = self.request.GET.get("data-type-selector", None)
if data_type not in ["json", "csv", "framedata"]:
raise SuspiciousOperation
response_id = self.request.GET.get("response_id", None)
try:
resp = self.get_queryset().get(pk=response_id)
except ObjectDoesNotExist:
raise SuspiciousOperation
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
extension = "json" if data_type == "json" else "csv"
filename = "{}_{}{}.{}".format(
study_name_for_files(study.name),
str(resp.uuid),
"_frames"
if data_type == "json"
else "_identifiable"
if IDENTIFIABLE_DATA_HEADERS & header_options
else "",
extension,
)
if data_type == "json":
cleaned_data = json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t",
default=str,
)
elif data_type == "csv":
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
header_list = get_response_headers(header_options, row_data.keys())
output, writer = csv_dict_output_and_writer(header_list)
writer.writerow(row_data)
cleaned_data = output.getvalue()
elif data_type == "framedata":
cleaned_data = build_single_response_framedata_csv(resp)
else:
raise SuspiciousOperation
response = HttpResponse(cleaned_data, content_type="text/{}".format(extension))
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponseVideoAttachment(
ResearcherLoginRequiredMixin, UserPassesTestMixin, StudyLookupMixin, View
):
"""
View that redirects to a requested video for a study response.
"""
raise_exception = True
@cached_property
def video(self):
# Only select the video from consented videos for this study
return self.study.videos_for_consented_responses.get(
pk=self.kwargs.get("video")
)
def can_view_this_video(self):
user = self.request.user
study = self.study
video = self.video
return user.is_researcher and (
(
user.has_study_perms(StudyPermission.READ_STUDY_RESPONSE_DATA, study)
and not video.response.is_preview
)
or (
user.has_study_perms(StudyPermission.READ_STUDY_PREVIEW_DATA, study)
and video.response.is_preview
)
)
test_func = can_view_this_video
def get(self, request, *args, **kwargs):
video = self.video
download_url = video.download_url
if self.request.GET.get("mode") == "download":
r = requests.get(download_url)
response = FileResponse(
File.open(io.BytesIO(r.content)),
filename=video.filename,
as_attachment=True,
)
return response
return redirect(download_url)
class StudyResponseSubmitFeedback(StudyLookupMixin, UserPassesTestMixin, View):
"""
View to create or edit response feedback.
"""
def user_can_edit_feedback(self):
user = self.request.user
study = self.study
# First check user has permission to be editing feedback from this study at all
if not user.is_researcher and user.has_study_perms(
StudyPermission.EDIT_STUDY_FEEDBACK, study
):
return False
# Check that the feedback_id (if given) is from this study
feedback_id = self.request.POST.get("feedback_id", None)
if feedback_id:
try:
feedback = Feedback.objects.get(id=feedback_id)
except ObjectDoesNotExist:
return False
if feedback.response.study_id != study.pk:
return False
# Check that the response_id (if given) is from this study
response_id = self.request.POST.get("response_id", None)
if response_id:
try:
response = Response.objects.get(id=int(response_id))
except ObjectDoesNotExist:
return False
if response.study_id != study.pk:
return False
return True
test_func = user_can_edit_feedback
def post(self, request, *args, **kwargs):
"""
Create or edit feedback. Pass feedback_id to edit existing feedback, or response_id to create new
feedback for that response.
"""
form_data = self.request.POST
user = self.request.user
study = self.study
feedback_id = form_data.get("feedback_id", None)
comment = form_data.get("comment", "")
if feedback_id:
Feedback.objects.filter(id=feedback_id).update(comment=comment)
else:
response_id = int(form_data.get("response_id"))
Feedback.objects.create(
response_id=response_id, researcher=user, comment=comment
)
return HttpResponseRedirect(
reverse("exp:study-responses-list", kwargs=dict(pk=study.pk))
)
class StudyResponsesConsentManager(
ResearcherLoginRequiredMixin,
UserPassesTestMixin,
SingleObjectFetchProtocol[Study],
generic.DetailView,
):
"""Manage consent videos from here: approve or reject as evidence of informed consent."""
template_name = "studies/study_responses_consent_ruling.html"
queryset = Study.objects.all()
raise_exception = True
def user_can_code_consent(self):
user = self.request.user
study = self.get_object()
return user.is_researcher and (
user.has_study_perms(StudyPermission.CODE_STUDY_CONSENT, study)
or user.has_study_perms(StudyPermission.CODE_STUDY_PREVIEW_CONSENT, study)
)
test_func = user_can_code_consent
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Need to prefetch our responses with consent-footage videos.
study = context["study"]
# TODO: technically should not grant access to consent videos for preview data unless has that perm
# (or should clearly indicate that code_study_consent means preview + actual data)
preview_only = not self.request.user.has_study_perms(
StudyPermission.CODE_STUDY_CONSENT, study
)
responses = get_responses_with_current_rulings_and_videos(
study.id, preview_only
)
context["loaded_responses"] = responses
context["summary_statistics"] = get_consent_statistics(study.id, preview_only)
# Using a map for arbitrarily structured data - lists and objects that we can't just trivially shove onto
# data-* properties in HTML
response_key_value_store = {}
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
# two jobs - generate statistics and populate k/v store.
for response in page_of_responses:
response_json = response_key_value_store[str(response["uuid"])] = {}
response["uuid"] = str(response.pop("uuid"))
response_json["videos"] = response.pop("videos")
response_json["details"] = {
"general": {
"uuid": response["uuid"],
"global_event_timings": json.dumps(
response.pop("global_event_timings")
),
"sequence": json.dumps(response.pop("sequence")),
"completed": json.dumps(response.pop("completed")),
"date_created": str(response["date_created"]),
},
"participant": {
"hashed_id": hash_participant_id(response),
"uuid": str(response.pop("child__user__uuid")),
"nickname": response.pop("child__user__nickname"),
},
"child": {
"hashed_id": hash_child_id(response),
"uuid": str(response.pop("child__uuid")),
"name": response.pop("child__given_name"),
"birthday": str(response.pop("child__birthday")),
"gender": response.pop("child__gender"),
"additional_information": response.pop(
"child__additional_information"
),
},
}
# TODO: Use json_script template tag to create JSON that can be used in Javascript
# (see https://docs.djangoproject.com/en/3.0/ref/templates/builtins/#json-script)
context["response_key_value_store"] = json.dumps(response_key_value_store)
return context
def post(self, request, *args, **kwargs):
"""This is where consent rulings are submitted."""
form_data = self.request.POST
user = self.request.user
study = self.get_object()
preview_only = not self.request.user.has_study_perms(
StudyPermission.CODE_STUDY_CONSENT, study
)
# Only allow any action on preview responses unless full perms
responses = study.responses
if preview_only:
responses = responses.filter(is_preview=True)
comments = json.loads(form_data.get("comments"))
# We now accept pending rulings to reverse old reject/approve decisions.
for ruling in ("accepted", "rejected", "pending"):
judged_responses = responses.filter(uuid__in=form_data.getlist(ruling))
for response in judged_responses:
response.consent_rulings.create(
action=ruling,
arbiter=user,
comments=comments.pop(str(response.uuid), None),
)
response.save()
# if there are any comments left over, these will count as new rulings that are the same as the last.
if comments:
for resp_uuid, comment in comments.items():
response = responses.get(uuid=resp_uuid)
response.consent_rulings.create(
action=response.most_recent_ruling, arbiter=user, comments=comment
)
return HttpResponseRedirect(
reverse(
"exp:study-responses-consent-manager",
kwargs=dict(pk=self.get_object().pk),
)
)
class StudyResponsesAll(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView,
):
"""
StudyResponsesAll shows a variety of download options for response and child data
from a given study. (It does not actually show any data.)
"""
template_name = "studies/study_responses_all.html"
queryset = Study.objects.all()
http_method_names = ["get"]
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary.
"""
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["data_options"] = [col for col in RESPONSE_COLUMNS if col.optional]
context["can_delete_preview_data"] = self.request.user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, context["study"]
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDeletePreviewResponses(
ResearcherLoginRequiredMixin,
UserPassesTestMixin,
SingleObjectFetchProtocol[Study],
SingleObjectMixin,
View,
):
queryset = Study.objects.all()
def user_can_delete_preview_data(self):
user = self.request.user
study = self.get_object()
return user.is_researcher and user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, study
)
test_func = user_can_delete_preview_data
def post(self, request, *args, **kwargs):
"""
Post method on all responses view handles the 'delete all preview data' button.
"""
study = self.get_object()
# Note: delete all, not just consented!
preview_responses = study.responses.filter(is_preview=True).prefetch_related(
"videos", "responselog_set", "consent_rulings", "feedback"
)
paginator = Paginator(preview_responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
# response logs, consent rulings, feedback, videos will all be deleted
# via cascades - videos will be removed from S3 also on pre_delete hook
resp.delete()
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs={"pk": study.id})
)
class StudyResponsesJSON(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all study responses in JSON format.
"""
# Smaller pagination because individual responses may be large and we don't want the json representing 100
# responses in memory
paginate_by = 1
def make_chunk(self, paginator, page_num, header_options):
chunk = ""
if page_num == 1:
chunk = "[\n"
chunk += ",\n".join(
json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t", # Use tab rather than spaces to make file smaller (ex. 60MB -> 25MB)
default=str,
)
for resp in paginator.page(page_num)
)
if page_num == paginator.page_range[-1]:
chunk += "\n]"
else:
chunk += ",\n"
return chunk
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
filename = "{}_{}.json".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = StreamingHttpResponse(
(
self.make_chunk(paginator, page_num, header_options)
for page_num in paginator.page_range
),
content_type="text/json",
)
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesCSV(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a summary of all study responses in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
headers = set()
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
# Add any new headers from this session
headers = headers | row_data.keys()
session_list.append(row_data)
header_options = set(self.request.GET.getlist("data_options"))
header_list = get_response_headers(header_options, headers)
output, writer = csv_dict_output_and_writer(header_list)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesDictCSV(CanViewStudyResponsesMixin, View):
"""
Hitting this URL downloads a data dictionary for the study response summary in CSV format. Does not depend on actual response data.
"""
def build_summary_dict_csv(self, optional_headers_selected_ids):
"""
Builds CSV file contents for data dictionary corresponding to the overview CSV
"""
descriptions = {col.id: col.description for col in RESPONSE_COLUMNS}
header_list = get_response_headers(
optional_headers_selected_ids, descriptions.keys()
)
all_descriptions = [
{"column": header, "description": descriptions[header]}
for header in header_list
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
header_options = self.request.GET.getlist("data_options")
cleaned_data = self.build_summary_dict_csv(header_options)
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-responses-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenCSV(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a summary of all children who participated in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
child_list = []
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
}
)
if row_data["child__global_id"] not in child_list:
child_list.append(row_data["child__global_id"])
session_list.append(row_data)
output, writer = csv_dict_output_and_writer(CHILD_CSV_HEADERS)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-identifiable"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenDictCSV(CanViewStudyResponsesMixin, View):
"""
Hitting this URL downloads a data dictionary in CSV format for the summary of children who participated.
Does not depend on actual response data.
TODO: separate from response data mixin
"""
def build_child_dict_csv(self):
"""
Builds CSV file contents for data dictionary for overview of all child participants
"""
all_descriptions = [
{"column": col.id, "description": col.description}
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
cleaned_data = self.build_child_dict_csv()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesFrameDataCSV(ResponseDownloadMixin, generic.list.ListView):
"""Hitting this URL downloads a ZIP file with frame data from one response per file in CSV format"""
# TODO: with large files / many responses generation can take a while. Should generate asynchronously along
# with the data dict.
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
zipped_file = io.BytesIO() # import io
with zipfile.ZipFile(zipped_file, "w", zipfile.ZIP_DEFLATED) as zipped:
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
data = build_single_response_framedata_csv(resp)
filename = "{}_{}_{}.csv".format(
study_name_for_files(study.name), resp.uuid, "frames"
)
zipped.writestr(filename, data)
zipped_file.seek(0)
response = FileResponse(
zipped_file,
as_attachment=True,
filename="{}_framedata_per_session.zip".format(
study_name_for_files(study.name)
),
)
return response
class StudyResponsesFrameDataDictCSV(ResponseDownloadMixin, View):
"""
Hitting this URL queues creation of a template data dictionary for frame-level data in CSV format.
The file is put on GCP and a link is emailed to the user.
"""
def get(self, request, *args, **kwargs):
study = self.study
filename = "{}_{}_{}".format(
study_name_for_files(study.name), study.uuid, "all-frames-dict"
)
build_framedata_dict.delay(filename, study.uuid, self.request.user.uuid)
messages.success(
request,
f"A frame data dictionary for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs=self.kwargs)
)
class StudyDemographics(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView,
):
"""
StudyDemographics view shows participant demographic snapshots associated
with each response to the study
"""
template_name = "studies/study_demographics.html"
queryset = Study.objects.all()
def get_context_data(self, **kwargs):
"""
Adds information for displaying how many and which types of responses are available.
"""
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDemographicsJSON(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all participant demographics in JSON format.
"""
def render_to_response(self, context, **response_kwargs):
study = self.study
header_options = self.request.GET.getlist("demo_options")
json_responses = []
paginator = context["paginator"]
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
json_responses.append(
json.dumps(
construct_response_dictionary(
resp,
DEMOGRAPHIC_COLUMNS,
header_options,
include_exp_data=False,
),
indent="\t",
default=str,
)
)
cleaned_data = f"[ {', '.join(json_responses)} ]"
filename = "{}_{}.json".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/json")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsCSV(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all participant demographics in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
study = self.study
paginator = context["paginator"]
header_options = set(self.request.GET.getlist("demo_options"))
participant_list = []
headers_for_download = get_demographic_headers(header_options)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = {col.id: col.extractor(resp) for col in DEMOGRAPHIC_COLUMNS}
participant_list.append(row_data)
output, writer = csv_dict_output_and_writer(headers_for_download)
writer.writerows(participant_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsDictCSV(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a data dictionary for participant demographics in in CSV format.
Does not depend on any actual data.
"""
def render_to_response(self, context, **response_kwargs):
header_options = set(self.request.GET.getlist("demo_options"))
headers_for_download = get_demographic_headers(header_options)
all_descriptions = [
{"column": col.id, "description": col.description}
for col in DEMOGRAPHIC_COLUMNS
if col.id in headers_for_download
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(self.study.name), "all-demographic-snapshots-dict",
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyCollisionCheck(ResponseDownloadMixin, View):
"""
Hitting this URL checks for collisions among all child and account hashed IDs, and returns a string describing
any collisions (empty string if none).
"""
def get(self, request, *args, **kwargs):
study = self.study
responses = (
study.consented_responses.order_by("id")
.select_related("child", "child__user", "study")
.values(
"uuid",
"child__uuid",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
)
)
child_dict = {}
account_dict = {}
collision_text = ""
# Note: could also just check number of unique global vs. hashed IDs in full dataset;
# only checking one-by-one for more informative output.
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
participant_hashed_id = hash_participant_id(resp)
participant_global_id = resp["child__user__uuid"]
child_hashed_id = hash_child_id(resp)
child_global_id = resp["child__uuid"]
if participant_hashed_id in account_dict:
if participant_global_id != account_dict[participant_hashed_id]:
collision_text += "Participant hashed ID {} ({}, {})\n".format(
participant_hashed_id,
account_dict[participant_hashed_id],
participant_global_id,
)
else:
account_dict[participant_hashed_id] = participant_global_id
if child_hashed_id in child_dict:
if child_global_id != child_dict[child_hashed_id]:
collision_text += "Child hashed ID {} ({}, {})<br>".format(
child_hashed_id,
child_dict[child_hashed_id],
child_global_id,
)
else:
child_dict[child_hashed_id] = child_global_id
return JsonResponse({"collisions": collision_text})
class StudyAttachments(CanViewStudyResponsesMixin, generic.ListView):
"""
StudyAttachments View shows video attachments for the study
"""
template_name = "studies/study_attachments.html"
model = Video
paginate_by = 100
def get_ordering(self):
return self.request.GET.get("sort", "-created_at") or "-created_at"
def get_queryset(self):
"""Fetches all consented videos this user has access to.
Returns:
QuerySet: all videos from this study where response has been marked as
consented and response is of a type (preview/actual data) that user can view
Todo:
* use a helper (e.g. in queries) select_videos_for_user to fetch the
appropriate videos here and in build_zipfile_of_videos - deferring for the moment
to work out dependencies.
"""
study = self.study
videos = study.videos_for_consented_responses
if not self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, study
):
videos = videos.filter(response__is_preview=True)
if not self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, study
):
videos = videos.filter(response__is_preview=False)
match = self.request.GET.get("match", "")
if match:
videos = videos.filter(full_name__icontains=match)
return videos.order_by(self.get_ordering())
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary. Study results
are paginated.
"""
context = super().get_context_data(**kwargs)
context["match"] = self.request.GET.get("match", "")
context["study"] = self.study
return context
def post(self, request, *args, **kwargs):
"""
Downloads study video
"""
match = self.request.GET.get("match", "")
study = self.study
if self.request.POST.get("all-attachments"):
build_zipfile_of_videos.delay(
f"{study.uuid}_videos",
study.uuid,
match,
self.request.user.uuid,
consent_only=False,
)
messages.success(
request,
f"An archive of videos for {study.name} is being generated. You will be emailed a link when it's completed.",
)
if self.request.POST.get("all-consent-videos"):
build_zipfile_of_videos.delay(
f"{study.uuid}_consent_videos",
study.uuid,
match,
self.request.user.uuid,
consent_only=True,
)
messages.success(
request,
f"An archive of consent videos for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-attachments", kwargs=self.kwargs)
)
|
CenterForOpenScience/lookit-api
|
exp/views/responses.py
|
Python
|
apache-2.0
| 80,549
|
import collections
from .types import Range, Constraint
try:
unicode
except NameError: # python3
basestring = unicode = str
def mad_quote(value):
"""Add quotes to a string value."""
quoted = repr(value)
return quoted[1:] if quoted[0] == 'u' else quoted
def mad_parameter(key, value):
"""
Format a single MAD-X command parameter.
"""
key = str(key).lower()
# the empty string was used in earlier versions in place of None:
if value is None or value == '':
return ''
if isinstance(value, Range):
return key + '=' + value.first + '/' + value.last
elif isinstance(value, Constraint):
constr = []
if value.min is not None:
constr.append(key + '>' + value.min)
if value.max is not None:
constr.append(key + '<' + value.max)
if constr:
return ', '.join(constr)
else:
return key + '=' + value.value
elif isinstance(value, bool):
return ('' if value else '-') + key
elif key == 'range':
if isinstance(value, basestring):
return key + '=' + value
elif isinstance(value, collections.Mapping):
return key + '=' + str(value['first']) + '/' + str(value['last'])
else:
return key + '=' + str(value[0]) + '/' + str(value[1])
# check for basestrings before collections.Sequence, because every
# basestring is also a Sequence:
elif isinstance(value, basestring):
# Although, it kinda makes more sense to quote all `basestring`
# instances, this breaks existing models which are using strings
# instead of numeric values. So let's only quote keys for now, where
# we know that it matters a lot:
if key == 'file':
return key + '=' + mad_quote(value)
else:
return key + '=' + str(value)
elif isinstance(value, collections.Sequence):
if key == 'column':
return key + '=' + ','.join(value)
elif value and all(isinstance(v, basestring) for v in value):
return key + '=' + ','.join(value)
else:
return key + '={' + ','.join(map(str, value)) + '}'
else:
return key + '=' + str(value)
def mad_command(*args, **kwargs):
"""
Create a MAD-X command from its name and parameter list.
:param args: initial bareword command arguments (including command name!)
:param kwargs: following named command arguments
:returns: command string
:rtype: str
Examples:
>>> mad_command('twiss', sequence='lhc')
'twiss, sequence=lhc;'
>>> mad_command('option', echo=True)
'option, echo;'
>>> mad_command('constraint', betx=Constraint(max=3.13))
'constraint, betx<3.13;'
"""
_args = list(args)
_args += [mad_parameter(k, v) for k,v in kwargs.items()]
return ', '.join(filter(None, _args)) + ';'
|
pymad/cpymad
|
src/cern/cpymad/_madx_tools.py
|
Python
|
apache-2.0
| 2,922
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.detailed_ground_heat_transfer import GroundHeatTransferBasementComBldg
log = logging.getLogger(__name__)
class TestGroundHeatTransferBasementComBldg(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_groundheattransferbasementcombldg(self):
pyidf.validation_level = ValidationLevel.error
obj = GroundHeatTransferBasementComBldg()
# real
var_january_average_temperature = 1.1
obj.january_average_temperature = var_january_average_temperature
# real
var_february_average_temperature = 2.2
obj.february_average_temperature = var_february_average_temperature
# real
var_march_average_temperature = 3.3
obj.march_average_temperature = var_march_average_temperature
# real
var_april_average_temperature = 4.4
obj.april_average_temperature = var_april_average_temperature
# real
var_may_average_temperature = 5.5
obj.may_average_temperature = var_may_average_temperature
# real
var_june_average_temperature = 6.6
obj.june_average_temperature = var_june_average_temperature
# real
var_july_average_temperature = 7.7
obj.july_average_temperature = var_july_average_temperature
# real
var_august_average_temperature = 8.8
obj.august_average_temperature = var_august_average_temperature
# real
var_september_average_temperature = 9.9
obj.september_average_temperature = var_september_average_temperature
# real
var_october_average_temperature = 10.1
obj.october_average_temperature = var_october_average_temperature
# real
var_november_average_temperature = 11.11
obj.november_average_temperature = var_november_average_temperature
# real
var_december_average_temperature = 12.12
obj.december_average_temperature = var_december_average_temperature
# real
var_daily_variation_sine_wave_amplitude = 13.13
obj.daily_variation_sine_wave_amplitude = var_daily_variation_sine_wave_amplitude
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertAlmostEqual(idf2.groundheattransferbasementcombldgs[0].january_average_temperature, var_january_average_temperature)
self.assertAlmostEqual(idf2.groundheattransferbasementcombldgs[0].february_average_temperature, var_february_average_temperature)
self.assertAlmostEqual(idf2.groundheattransferbasementcombldgs[0].march_average_temperature, var_march_average_temperature)
self.assertAlmostEqual(idf2.groundheattransferbasementcombldgs[0].april_average_temperature, var_april_average_temperature)
self.assertAlmostEqual(idf2.groundheattransferbasementcombldgs[0].may_average_temperature, var_may_average_temperature)
self.assertAlmostEqual(idf2.groundheattransferbasementcombldgs[0].june_average_temperature, var_june_average_temperature)
self.assertAlmostEqual(idf2.groundheattransferbasementcombldgs[0].july_average_temperature, var_july_average_temperature)
self.assertAlmostEqual(idf2.groundheattransferbasementcombldgs[0].august_average_temperature, var_august_average_temperature)
self.assertAlmostEqual(idf2.groundheattransferbasementcombldgs[0].september_average_temperature, var_september_average_temperature)
self.assertAlmostEqual(idf2.groundheattransferbasementcombldgs[0].october_average_temperature, var_october_average_temperature)
self.assertAlmostEqual(idf2.groundheattransferbasementcombldgs[0].november_average_temperature, var_november_average_temperature)
self.assertAlmostEqual(idf2.groundheattransferbasementcombldgs[0].december_average_temperature, var_december_average_temperature)
self.assertAlmostEqual(idf2.groundheattransferbasementcombldgs[0].daily_variation_sine_wave_amplitude, var_daily_variation_sine_wave_amplitude)
|
rbuffat/pyidf
|
tests/test_groundheattransferbasementcombldg.py
|
Python
|
apache-2.0
| 4,351
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-05-15 17:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('inventory', '0037_auto_20180213_1407'),
('contenttypes', '0002_remove_content_type_name'),
('mdm', '0011_enrolleddevice_checkout_at'),
]
operations = [
migrations.CreateModel(
name='DeviceArtifactCommand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('artifact_id', models.PositiveIntegerField()),
('artifact_version', models.PositiveIntegerField()),
('action', models.CharField(choices=[('INSTALL', 'Install'), ('REMOVE', 'Remove')], max_length=64)),
('command_time', models.DateTimeField()),
('result_time', models.DateTimeField(null=True)),
('status_code', models.CharField(choices=[('Acknowledged', 'Acknowledged'), ('Error', 'Error'), ('CommandFormatError', 'Command format error'), ('NotNow', 'Not now')], max_length=64, null=True)),
('artifact_content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('enrolled_device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mdm.EnrolledDevice')),
],
),
migrations.CreateModel(
name='KernelExtension',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(unique=True)),
('identifier', models.TextField(unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='KernelExtensionPolicy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('identifier', models.UUIDField(default=uuid.uuid4, unique=True)),
('version', models.PositiveIntegerField(default=1)),
('allow_user_overrides', models.BooleanField(default=True, help_text='If set to true, users can approve additional kernel extensions not explicitly allowed by configuration profiles')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('trashed_at', models.DateTimeField(editable=False, null=True)),
('allowed_kernel_extensions', models.ManyToManyField(to='mdm.KernelExtension')),
],
),
migrations.CreateModel(
name='KernelExtensionTeam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(unique=True)),
('identifier', models.CharField(max_length=10, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='kernelextensionpolicy',
name='allowed_teams',
field=models.ManyToManyField(to='mdm.KernelExtensionTeam'),
),
migrations.AddField(
model_name='kernelextensionpolicy',
name='meta_business_unit',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='kernel_extension_policy', to='inventory.MetaBusinessUnit'),
),
migrations.AddField(
model_name='kernelextension',
name='team',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mdm.KernelExtensionTeam'),
),
]
|
zentralopensource/zentral
|
zentral/contrib/mdm/migrations/0012_auto_20180515_1749.py
|
Python
|
apache-2.0
| 4,153
|
"""
navdoon.collector
-----------------
Define collectors, that collect Statsd requests and queue them to be
processed by the processor.
"""
import os
import socket
from socket import socket as SocketClass
from abc import abstractmethod, ABCMeta
from threading import Event
from navdoon.pystdlib.queue import Queue
from navdoon.utils.common import LoggerMixIn
from navdoon.utils.system import ExpandableThreadPool
from navdoon.pystdlib.typing import Dict, Any, Tuple, List, Optional
DEFAULT_PORT = 8125
def socket_type_repr(socket_type):
# type: (int) -> str
sock_types = {
socket.SOCK_STREAM: "TCP",
socket.SOCK_DGRAM: "UDP"
}
return sock_types.get(socket_type, "UNKNOWN")
class AbstractCollector(object):
"""Abstract base class for collectors"""
__metaclass__ = ABCMeta
def __init__(self):
self._queue = Queue() # type: Queue
@abstractmethod
def start(self):
# type: () -> None
raise NotImplementedError
@abstractmethod
def wait_until_queuing_requests(self, timeout=None):
# type: (float) -> None
raise NotImplementedError
@abstractmethod
def shutdown(self):
# type: () -> None
raise NotImplementedError
@abstractmethod
def wait_until_shutdown(self, timeout=None):
# type: (float) -> None
raise NotImplementedError
@property
def queue(self):
# type: () -> Queue
return self._queue
@queue.setter
def queue(self, value):
# type: (Queue) -> None
for method in ('put_nowait',):
if not callable(getattr(value, method, None)):
raise ValueError(
"Invalid queue for collector. Queue is missing "
"method '{}'".format(method))
self._queue = value
def __repr__(self):
return "collector <{}>".format(self.__class__)
class SocketServer(LoggerMixIn, AbstractCollector):
"""Collect Statsd metrics via TCP/UDP socket"""
def __init__(self, **kargs):
# type: (**Dict[str, Any]) -> None
AbstractCollector.__init__(self)
LoggerMixIn.__init__(self)
self.chunk_size = 8196 # type: int
self.socket_type = socket.SOCK_DGRAM # type: int
self.socket_timeout = 1 # type: float
self.host = '127.0.0.1' # type: str
self.port = DEFAULT_PORT # type: int
self.user = None # type: int
self.group = None # type: int
self.socket = None # type: socket.socket
self._stop_queuing_requests = Event() # type: Event
self._queuing_requests = Event() # type: Event
self._shutdown = Event() # type: Event
self._should_shutdown = Event() # type: Event
self.configure(**kargs) # type: Dict[str, Any]
self.log_signature = "collector.socket_server " # type: str
self.num_worker_threads = 4 # type: int
self.worker_threads_limit = 128 # type: int
def __del__(self):
# type: () -> None
self._do_shutdown()
def __repr__(self):
# type: () -> str
return "collector.socket_server {}@{}:{}".format(
socket_type_repr(self.socket_type),
self.host,
self.port
)
def configure(self, **kargs):
# type: (**Dict[str, Any]) -> List[str]
"""Configure the server, setting attributes.
Returns a list of attribute names that were affected
"""
configured = []
for key in ('host', 'port', 'user', 'group', 'socket_type',
'num_worker_threads', 'worker_threads_limit'):
if key in kargs:
setattr(self, key, kargs[key])
configured.append(key)
return configured
def start(self):
# type: () -> None
self._bind_socket()
try:
while not self._should_shutdown.is_set():
self._shutdown.clear()
self._log("starting serving requests on {} {}:{}".format(
socket_type_repr(self.socket_type), self.host, self.port))
self._pre_start()
if self.socket_type == socket.SOCK_STREAM:
self._queue_requests_tcp()
else:
self._queue_requests_udp()
self._post_start()
self._log("stopped serving requests")
finally:
self._do_shutdown()
def is_queuing_requests(self):
# type: () -> bool
return self._queuing_requests.is_set()
def wait_until_queuing_requests(self, timeout=None):
# type: (float) -> None
self._queuing_requests.wait(timeout)
def shutdown(self):
# type: () -> None
self._log_debug("shutting down ...")
self._stop_queuing_requests.set()
self._should_shutdown.set()
def wait_until_shutdown(self, timeout=None):
# type: (float) -> None
self._log_debug("waiting until shutdown ...")
self._shutdown.wait(timeout)
self._log("shutdown successfully")
def _pre_start(self):
# type: () -> None
pass
def _queue_requests_udp(self):
# type: () -> None
should_stop = self._stop_queuing_requests.is_set
chunk_size = self.chunk_size
receive = self.socket.recv
timeout_exception = socket.timeout
enqueue = self._queue.put_nowait
try:
self._queuing_requests.set()
self._log_debug("starting queuing UDP requests ...")
while not should_stop():
try:
data = receive(chunk_size)
except timeout_exception:
data = None
if data:
enqueue(data.decode())
finally:
self._log_debug("stopped queuing UDP requests")
self._queuing_requests.clear()
def _queue_requests_tcp(self):
# type: () -> None
stop_event = self._stop_queuing_requests
should_stop_accepting = stop_event.is_set
chunk_size = self.chunk_size
queue_put_nowait = self._queue.put_nowait
shutdown_rdwr = socket.SHUT_RDWR
socket_timeout_exception = socket.timeout
thread_pool = ExpandableThreadPool(self.num_worker_threads)
thread_pool.workers_limit = self.worker_threads_limit
thread_pool.logger = self.logger
thread_pool.log_signature = "threadpool =< {} ".format(self)
thread_pool.initialize()
def _enqueue_from_connection(conn, address):
# type: (socket.socket, Tuple[str, int]) -> None
buffer_size = chunk_size
enqueue = queue_put_nowait
timeout_exception = socket_timeout_exception
should_stop_queuing = stop_event.is_set
receive = conn.recv
incomplete_line_chunk = u''
try:
self._log_debug("collecting metrics from TCP {}:{} ...".format(address[0], address[1]))
while not should_stop_queuing():
try:
buff_bytes = receive(buffer_size)
except timeout_exception:
continue
if not buff_bytes:
break
buff_lines = buff_bytes.decode().splitlines(True)
if incomplete_line_chunk != '':
buff_lines[0] = incomplete_line_chunk + buff_lines[0]
incomplete_line_chunk = ''
if not buff_lines[-1].endswith('\n'):
incomplete_line_chunk = buff_lines.pop()
enqueue(''.join(buff_lines))
finally:
conn.shutdown(shutdown_rdwr)
conn.close()
if incomplete_line_chunk != '':
enqueue(incomplete_line_chunk)
try:
self._queuing_requests.set()
self._log_debug("starting accepting TCP connections ...")
while not should_stop_accepting():
try:
(connection, remote_addr) = self.socket.accept()
self._log_debug("TCP connection from {}:{} ...".format(remote_addr[0], remote_addr[1]))
connection.settimeout(self.socket_timeout)
except socket_timeout_exception:
continue
thread_pool.do(_enqueue_from_connection, connection, remote_addr)
self._log_debug("stopped accepting TCP connection")
thread_pool.stop(True, 10) # TODO: set this timeout from object attrs
self._log_debug("stopped enqueuing TCP requests")
finally:
self._queuing_requests.clear()
def _post_start(self):
# type: () -> None
pass
def _do_shutdown(self):
# type: () -> None
self._close_socket()
self._shutdown.set()
def _bind_socket(self, sock=None):
# type: (Optional[SocketClass]) -> None
if not sock:
sock = self._create_socket()
self._close_socket()
self._change_process_user_group()
self.socket = sock
self._log("bound to address {}:{}".format(*sock.getsockname()))
def _create_socket(self):
# type: () -> SocketClass
sock = socket.socket(socket.AF_INET, self.socket_type)
sock.bind((self.host, self.port))
sock.settimeout(self.socket_timeout)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if self.socket_type == socket.SOCK_STREAM:
sock.listen(5)
return sock
def _close_socket(self):
# type: () -> None
sock = self.socket
if sock:
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error as e:
pass
finally:
sock.close()
self.socket = None
def _change_process_user_group(self):
# type: () -> None
if self.user:
self._log("changing process user to {}".format(self.user))
os.seteuid(self.user)
if self.group:
self._log("changing process group to {}".format(self.group))
os.setegid(self.group)
|
farzadghanei/navdoon
|
navdoon/collector.py
|
Python
|
apache-2.0
| 10,313
|
# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs fio against a remote gluster cluster."""
import json
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import fio
from perfkitbenchmarker.linux_packages import gluster
FLAGS = flags.FLAGS
flags.DEFINE_string('fill_disk_size', '4G',
'Amount to fill the disk before reading.')
flags.DEFINE_string('fill_disk_bs', '128k',
'Block size used to fill the disk before reading.')
flags.DEFINE_integer('fill_disk_iodepth', 64, 'iodepth used to fill the disk.')
flags.DEFINE_string('read_size', '4G', 'Size of the file to read.')
flags.DEFINE_string('read_bs', '512k', 'Block size of the file to read.')
flags.DEFINE_integer('read_iodepth', 1, 'iodepth used in reading the file.')
BENCHMARK_NAME = 'gluster_fio'
BENCHMARK_CONFIG = """
gluster_fio:
description: >
Runs fio against a remote gluster cluster.
vm_groups:
clients:
vm_spec: *default_single_core
vm_count: null
gluster_servers:
vm_spec: *default_single_core
disk_spec: *default_500_gb
vm_count: 1
"""
_VOLUME_NAME = 'vol01'
_MOUNT_POINT = '/glusterfs'
_NUM_SECTORS_READ_AHEAD = 16384
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec):
"""Set up GlusterFS and install fio.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
gluster_servers = benchmark_spec.vm_groups['gluster_servers']
clients = benchmark_spec.vm_groups['clients']
client_vm = clients[0]
vm_util.RunThreaded(lambda vm: vm.Install('fio'), gluster_servers + clients)
for vm in gluster_servers:
vm.SetReadAhead(_NUM_SECTORS_READ_AHEAD,
[d.GetDevicePath() for d in vm.scratch_disks])
# Set up Gluster
if gluster_servers:
gluster.ConfigureServers(gluster_servers, _VOLUME_NAME)
args = [((client, gluster_servers[0], _VOLUME_NAME, _MOUNT_POINT), {})
for client in clients]
vm_util.RunThreaded(gluster.MountGluster, args)
gluster_address = gluster_servers[0].internal_ip
client_vm.RemoteCommand('sudo mkdir -p /testdir')
client_vm.RemoteCommand('sudo mount %s:/vol01 /testdir -t glusterfs' %
gluster_address)
def _RunFio(vm, fio_params, metadata):
"""Run fio.
Args:
vm: Virtual machine to run fio on.
fio_params: fio parameters used to create the fio command to run.
metadata: Metadata to add to the results.
Returns:
A list of sample.Sample objects
"""
stdout, _ = vm.RemoteCommand('sudo {0} {1}'.format(fio.GetFioExec(),
fio_params))
job_file_contents = fio.FioParametersToJob(fio_params)
samples = fio.ParseResults(
job_file_contents,
json.loads(stdout),
base_metadata=metadata,
skip_latency_individual_stats=True)
return samples
def Run(benchmark_spec):
"""Run fio against gluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
gluster_servers = benchmark_spec.vm_groups['gluster_servers']
clients = benchmark_spec.vm_groups['clients']
client_vm = clients[0]
results = []
metadata = {
'fill_disk_size': FLAGS.fill_disk_size,
'fill_disk_bs': FLAGS.fill_disk_bs,
'fill_disk_iodepth': FLAGS.fill_disk_iodepth,
'read_size': FLAGS.read_size,
'read_bs': FLAGS.read_bs,
'read_iodepth': FLAGS.read_iodepth,
}
fio_params = ' '.join([
'--output-format=json', '--name=fill_disk',
'--filename=/testdir/testfile',
'--filesize=%s' % FLAGS.fill_disk_size, '--ioengine=libaio', '--direct=1',
'--verify=0', '--randrepeat=0',
'--bs=%s' % FLAGS.fill_disk_bs,
'--iodepth=%s' % FLAGS.fill_disk_iodepth, '--rw=randwrite'
])
samples = _RunFio(client_vm, fio_params, metadata)
results += samples
# In addition to dropping caches, increase polling to potentially reduce
# variance in network operations
for vm in gluster_servers + clients:
vm.RemoteCommand('sudo /sbin/sysctl net.core.busy_poll=50')
vm.DropCaches()
fio_read_common_params = [
'--output-format=json', '--randrepeat=1', '--ioengine=libaio',
'--gtod_reduce=1', '--filename=/testdir/testfile',
'--bs=%s' % FLAGS.read_bs,
'--iodepth=%s' % FLAGS.read_iodepth,
'--size=%s' % FLAGS.read_size, '--readwrite=randread'
]
fio_params = '--name=first_read ' + ' '.join(fio_read_common_params)
samples = _RunFio(client_vm, fio_params, metadata)
results += samples
# Run the command again. This time, the file should be cached.
fio_params = '--name=second_read ' + ' '.join(fio_read_common_params)
samples = _RunFio(client_vm, fio_params, metadata)
results += samples
return results
def Cleanup(benchmark_spec):
"""Cleanup gluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
clients = benchmark_spec.vm_groups['clients']
gluster_servers = benchmark_spec.vm_groups['gluster_servers']
for client in clients:
client.RemoteCommand('sudo umount %s' % _MOUNT_POINT)
if gluster_servers:
gluster.DeleteVolume(gluster_servers[0], _VOLUME_NAME)
|
GoogleCloudPlatform/PerfKitBenchmarker
|
perfkitbenchmarker/linux_benchmarks/gluster_fio_benchmark.py
|
Python
|
apache-2.0
| 6,058
|
#!/usr/bin/python3
import json
import time
import requests
import websocket
TIMEOUT = 1
class GenericElement(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __getattr__(self, attr):
func_name = '{}.{}'.format(self.name, attr)
def generic_function(**args):
self.parent.pop_messages()
self.parent.message_counter += 1
message_id = int('{}{}'.format(id(self), self.parent.message_counter))
message_id = self.parent.message_counter
call_obj = {'id': message_id, 'method': func_name, 'params': args}
self.parent.ws.send(json.dumps(call_obj))
result, messages = self.parent.wait_result(message_id)
return result, messages
return generic_function
class ChromeInterface(object):
message_counter = 0
def __init__(self, host='localhost', port=9222, tab=0, timeout=TIMEOUT, auto_connect=True):
self.host = host
self.port = port
self.ws = None
self.tabs = None
self.timeout = timeout
if auto_connect:
self.connect(tab=tab)
def get_tabs(self):
response = requests.get('http://{}:{}/json'.format(self.host, self.port))
self.tabs = json.loads(response.text)
def connect(self, tab=0, update_tabs=True):
if update_tabs or self.tabs is None:
self.get_tabs()
wsurl = self.tabs[tab]['webSocketDebuggerUrl']
self.close()
self.ws = websocket.create_connection(wsurl)
self.ws.settimeout(self.timeout)
def connect_targetID(self, targetID):
try:
wsurl = 'ws://{}:{}/devtools/page/{}'.format(self.host, self.port, targetID)
self.close()
self.ws = websocket.create_connection(wsurl)
self.ws.settimeout(self.timeout)
except:
wsurl = self.tabs[0]['webSocketDebuggerUrl']
self.ws = websocket.create_connection(wsurl)
self.ws.settimeout(self.timeout)
def close(self):
if self.ws:
self.ws.close()
# Blocking
def wait_message(self, timeout=None):
timeout = timeout if timeout is not None else self.timeout
self.ws.settimeout(timeout)
try:
message = self.ws.recv()
except:
return None
finally:
self.ws.settimeout(self.timeout)
return json.loads(message)
# Blocking
def wait_event(self, event, timeout=None):
timeout = timeout if timeout is not None else self.timeout
start_time = time.time()
messages = []
matching_message = None
while True:
now = time.time()
if now-start_time > timeout:
break
try:
message = self.ws.recv()
parsed_message = json.loads(message)
messages.append(parsed_message)
if 'method' in parsed_message and parsed_message['method'] == event:
matching_message = parsed_message
break
except websocket.WebSocketTimeoutException:
continue
except:
break
return (matching_message, messages)
# Blocking
def wait_result(self, result_id, timeout=None):
timeout = timeout if timeout is not None else self.timeout
start_time = time.time()
messages = []
matching_result = None
while True:
now = time.time()
if now-start_time > timeout:
break
try:
message = self.ws.recv()
parsed_message = json.loads(message)
messages.append(parsed_message)
if 'result' in parsed_message and parsed_message['id'] == result_id:
matching_result = parsed_message
break
except websocket.WebSocketTimeoutException:
continue
except:
break
return (matching_result, messages)
# Non Blocking
def pop_messages(self):
messages = []
self.ws.settimeout(0)
while True:
try:
message = self.ws.recv()
messages.append(json.loads(message))
except:
break
self.ws.settimeout(self.timeout)
return messages
def __getattr__(self, attr):
genericelement = GenericElement(attr, self)
self.__setattr__(attr, genericelement)
return genericelement
|
marty90/PyChromeDevTools
|
PyChromeDevTools/__init__.py
|
Python
|
apache-2.0
| 4,638
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.conf import settings
from django.shortcuts import render
from django.views.decorators.cache import never_cache
from django.utils import timezone
from uw_saml.decorators import group_required
from uw_sws.models import Term
from course_grader.models import SubmittedGradeRoster, GradeImport
from course_grader.dao.term import (
term_from_param, current_term, current_datetime)
from datetime import datetime, timedelta
from logging import getLogger
import json
logger = getLogger(__name__)
@group_required(settings.GRADEPAGE_SUPPORT_GROUP)
@never_cache
def status(request):
try:
curr_term = current_term()
except Exception as ex:
logger.error("GET current term failed: {}".format(ex))
raise
term_id = request.GET.get("term", "").strip()
try:
selected_term = term_from_param(term_id)
except Exception as ex:
selected_term = curr_term
graderosters = SubmittedGradeRoster.objects.get_status_by_term(
selected_term)
grade_imports = GradeImport.objects.get_import_sources_by_term(
selected_term)
if selected_term.quarter == Term.SUMMER:
grading_period_open = selected_term.aterm_grading_period_open
else:
grading_period_open = selected_term.grading_period_open
start_date = timezone.make_aware(grading_period_open,
timezone.get_default_timezone())
end_date = timezone.make_aware(selected_term.grade_submission_deadline,
timezone.get_default_timezone())
epoch = timezone.make_aware(datetime(1970, 1, 1),
timezone.get_default_timezone())
chart_data = {
"submissions": {
"plot_lines": [],
"grading_period_open": get_total_milliseconds(start_date - epoch),
"data": []
},
"grade_imports": {
"catalyst": [],
"canvas": [],
"csv": [],
}
}
while start_date < end_date:
if start_date.strftime("%a") == "Mon":
chart_data["submissions"]["plot_lines"].append({
"value": get_total_milliseconds(start_date - epoch),
"color": "#707070",
"width": 1,
"dashStyle": "ShortDot"
})
start_date = start_date + timedelta(days=1)
for index, graderoster in enumerate(graderosters):
chart_data["submissions"]["data"].append([
get_total_milliseconds(graderoster["submitted_date"] - epoch),
index + 1
])
for grade_import in grade_imports:
chart_data["grade_imports"][grade_import["source"]].append([
get_total_milliseconds(grade_import["imported_date"] - epoch),
len(chart_data["grade_imports"][grade_import["source"]]) + 1
])
params = {
"graderosters": graderosters,
"selected_year": selected_term.year,
"selected_quarter": selected_term.get_quarter_display(),
"grading_period_open": grading_period_open,
"grade_submission_deadline": selected_term.grade_submission_deadline,
"current_datetime": current_datetime(),
"chart_data": json.dumps(chart_data),
"term_id": term_id
}
return render(request, "support/status.html", params)
def get_total_milliseconds(td):
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e3
|
uw-it-aca/gradepage
|
course_grader/views/support/status.py
|
Python
|
apache-2.0
| 3,528
|
#!/usr/bin/env python3
"""
gftools gen-html aka diffbrowsers2.
Generate html documents to proof a font family, or generate documents to
diff two families.
Examples:
# Generate proofing documents for a single font
gftools gen-html proof font1.ttf
# Generate proofing documents for a family of fonts
gftools gen-html proof font1.ttf font2.ttf font3.ttf
# Output test pages to a dir
gftools gen-html proof font1.ttf -o ~/Desktop/myFamily
# Generate proofing documents and output images using Browserstack
# (a subscription is required)
gftools gen-html proof font1.ttf --imgs
# Generate diff documents
gftools gen-html diff -fb ./fonts_before/font1.ttf -fa ./fonts_after/font1.ttf
"""
from pkg_resources import resource_filename
from gftools.html import HtmlProof, HtmlDiff
import argparse
def main():
html_templates_dir = resource_filename("gftools", "templates")
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
dest="command", required=True, metavar='"proof" or "diff"'
)
# Optional args which can be used in all subparsers
universal_options_parser = argparse.ArgumentParser(add_help=False)
universal_options_parser.add_argument(
"--pages",
nargs="+",
help="Choose which templates to populate. By default, all templates "
"are populated.",
)
universal_options_parser.add_argument(
"--pt-size", "-pt", help="Change pt size of document text", default=14
)
universal_options_parser.add_argument(
"--imgs",
action="store_true",
help="Output images using Browserstack.",
)
universal_options_parser.add_argument(
"--out", "-o", help="Output dir", default="diffbrowsers"
)
universal_options_parser.add_argument(
"--template-dir",
"-td",
help="HTML template directory. By default, gftools/templates is used.",
default=resource_filename("gftools", "templates"),
)
proof_parser = subparsers.add_parser(
"proof",
parents=[universal_options_parser],
help="Generate html proofing documents for a family",
)
proof_parser.add_argument("fonts", nargs="+")
diff_parser = subparsers.add_parser(
"diff",
parents=[universal_options_parser],
help="Generate html diff documents which compares two families. "
"Variable fonts can be compared against static fonts because we "
"match the fvar instances against the static fonts. To Match fonts "
"we use the font's name table records. For static fonts, the fullname "
"is used e.g 'Maven Pro Medium'. For variable fonts, the family name "
"+ fvar instance subfamilyname is used e.g 'Maven Pro' + 'Medium'.",
)
diff_parser.add_argument("--fonts-before", "-fb", nargs="+", required=True)
diff_parser.add_argument("--fonts-after", "-fa", nargs="+", required=True)
args = parser.parse_args()
if args.command == "proof":
html = HtmlProof(args.fonts, args.out, template_dir=args.template_dir)
elif args.command == "diff":
html = HtmlDiff(
args.fonts_before,
args.fonts_after,
args.out,
template_dir=args.template_dir,
)
html.build_pages(args.pages, pt_size=args.pt_size)
if args.imgs:
html.save_imgs()
if __name__ == "__main__":
main()
|
googlefonts/gftools
|
bin/gftools-gen-html.py
|
Python
|
apache-2.0
| 3,393
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from desktop.lib.exceptions_renderable import PopupException
from filebrowser.views import display, listdir_paged
def _get_acl_name(acl):
return ('default:' if acl['isDefault'] else '') + acl['type'] + ':' + acl['name'] + ':'
def _get_acl(acl):
return _get_acl_name(acl) + ('r' if acl['r'] else '-') + ('w' if acl['w'] else '-') + ('x' if acl['x'] else '-')
def _diff_list_dir(user_listing, hdfs_listing):
user_files = [f['stats']['path'] for f in user_listing['files']]
hdfs_files = [f['stats']['path'] for f in hdfs_listing['files']]
# Files visible by hdfs only
hdfs_only = list(set(hdfs_files) - set(user_files))
new_hdfs = filter(lambda f: f['stats']['path'] in hdfs_only, hdfs_listing['files'])
for f in new_hdfs:
f['striked'] = True
listing = user_listing['files'] + new_hdfs
return sorted(listing, key=lambda f: f['path'])
def list_hdfs(request, path):
try:
stats = request.fs.stats(path)
if stats.isDir:
json_response = listdir_paged(request, path)
else:
json_response = display(request, path)
except IOError:
json_response = HttpResponse(json.dumps({'files': [], 'page': {}, 'error': 'FILE_NOT_FOUND'}), mimetype="application/json") # AccessControlException: Permission denied: user=test, access=READ_EXECUTE, inode="/tmp/dir":romain:supergroup:drwxr-xr-x:group::r-x,group:bob:---,group:test:---,default:user::rwx,default:group::r--,default:mask::r--,default:other::rwx (error 403)
except Exception, e:
json_response = HttpResponse(json.dumps({'files': [], 'page': {}, 'error': 'ACCESS_DENIED'}), mimetype="application/json") # AccessControlException: Permission denied: user=test, access=READ_EXECUTE, inode="/tmp/dir":romain:supergroup:drwxr-xr-x:group::r-x,group:bob:---,group:test:---,default:user::rwx,default:group::r--,default:mask::r--,default:other::rwx (error 403)
if json.loads(request.GET.get('isDiffMode', 'false')):
request.doas = 'hdfs'
stats = request.fs.stats(path)
if stats.isDir:
hdfs_response = json.loads(listdir_paged(request, path).content)
resp = json.loads(json_response.content)
resp['page'] = hdfs_response['page']
resp['files'] = _diff_list_dir(resp, hdfs_response)
json_response.content = json.dumps(resp)
return json_response
def get_acls(request):
try:
acls = request.fs.get_acl_status(request.GET.get('path'))
except Exception, e:
print e
acls = None
return HttpResponse(json.dumps(acls is not None and acls['AclStatus'] or None), mimetype="application/json")
def update_acls(request):
path = request.POST.get('path')
acls = json.loads(request.POST.get('acls'))
original_acls = json.loads(request.POST.get('originalAcls'))
try:
if all([acl['status'] == 'deleted' for acl in acls]):
request.fs.remove_acl(path)
else:
renamed_acls = set([_get_acl_name(acl) for acl in original_acls]) - set([_get_acl_name(acl) for acl in acls]) # We need to remove ACLs that have been renamed
_remove_acl_names(request.fs, path, list(renamed_acls))
_remove_acl_entries(request.fs, path, [acl for acl in acls if acl['status'] == 'deleted'])
_modify_acl_entries(request.fs, path, [acl for acl in acls if acl['status'] in ('new', 'modified')])
except Exception, e:
raise PopupException(unicode(str(e.message), "utf8"))
return HttpResponse(json.dumps({'status': 0}), mimetype="application/json")
def bulk_delete_acls(request):
path = request.POST.get('path')
checked_paths = json.loads(request.POST.get('checkedPaths'))
recursive = json.loads(request.POST.get('recursive'))
try:
checked_paths = [path['path'] for path in checked_paths if '+' in path['rwx'] or recursive]
for path in checked_paths:
request.fs.remove_acl(path)
if recursive:
request.fs.do_recursively(request.fs.remove_acl, path)
except Exception, e:
raise PopupException(unicode(str(e.message), "utf8"))
return HttpResponse(json.dumps({'status': 0}), mimetype="application/json")
def bulk_add_acls(request):
path = request.POST.get('path')
acls = json.loads(request.POST.get('acls'))
checked_paths = json.loads(request.POST.get('checkedPaths'))
recursive = json.loads(request.POST.get('recursive'))
try:
checked_paths = [path['path'] for path in checked_paths if path['path'] != path] # Don't touch current path
for path in checked_paths:
_modify_acl_entries(request.fs, path, [acl for acl in acls if acl['status'] == ''], recursive) # Only saved ones
except Exception, e:
raise PopupException(unicode(str(e.message), "utf8"))
return HttpResponse(json.dumps({'status': 0}), mimetype="application/json")
def bulk_sync_acls(request):
bulk_delete_acls(request)
return bulk_add_acls(request)
def _modify_acl_entries(fs, path, acls, recursive=False):
aclspec = ','.join([_get_acl(acl) for acl in acls])
if recursive:
return fs.do_recursively(fs.modify_acl_entries, path, aclspec)
else:
return fs.modify_acl_entries(path, aclspec)
def _remove_acl_entries(fs, path, acls):
aclspec = ','.join([_get_acl_name(acl) for acl in acls])
return fs.remove_acl_entries(path, aclspec)
def _remove_acl_names(fs, path, acl_names):
aclspec = ','.join(acl_names)
return fs.remove_acl_entries(path, aclspec)
|
yongshengwang/builthue
|
apps/security/src/security/api/hdfs.py
|
Python
|
apache-2.0
| 6,181
|
import config
def get_error_significance_score(value1, value2, distance_in_the_future, time_period):
"""
This method takes two values and calculates the absolute error between them. After that
it adjusts the importance of the error. The weight for each error depends on how far into
the future the date of the error is - the furthers into the future, the less significant.
:param value1: int First value
:param value2: int Second value
:param distance_in_the_future: int Distance into the future
:param time_period: string What that distance is measured in (day, week, month)
:return: int Weighted error
"""
error = ((value2 - value1) / value1) * 100
return get_error_significance_score_util(error, distance_in_the_future, time_period)
def get_error_significance_score_util(error, distance_in_the_future, time_period):
period_with_error_significance = config.error_significance_by_time_period[time_period]
if distance_in_the_future >= period_with_error_significance or distance_in_the_future <= 0:
return -1
weight = 1
if distance_in_the_future != 1:
weight = (period_with_error_significance - 1 - distance_in_the_future) / (
period_with_error_significance - distance_in_the_future)
return weight * abs(error)
|
googleinterns/inventory-visualizer
|
backend/classifiers/error_significance_classifier.py
|
Python
|
apache-2.0
| 1,316
|
import sys
from xml.sax.handler import ContentHandler
from xml.sax.xmlreader import Locator
import xml.sax
import xml.sax.handler
class AimlParserError(Exception): pass
class AimlHandler(ContentHandler):
# The legal states of the AIML parser
_STATE_OutsideAiml = 0
_STATE_InsideAiml = 1
_STATE_InsideCategory = 2
_STATE_InsidePattern = 3
_STATE_AfterPattern = 4
_STATE_InsideThat = 5
_STATE_AfterThat = 6
_STATE_InsideTemplate = 7
_STATE_AfterTemplate = 8
def __init__(self, encoding="UTF-8"):
self.categories = {}
self._encoding = encoding
self._state = self._STATE_OutsideAiml
self._version = ""
self._namespace = ""
self._forwardCompatibleMode = False
self._currentPattern = ""
self._currentThat = ""
self._currentTopic = ""
self._insideTopic = False
self._currentUnknown = "" # the name of the current unknown element
# This is set to true when a parse error occurs in a category.
self._skipCurrentCategory = False
# Counts the number of parse errors in a particular AIML document.
# query with getNumErrors(). If 0, the document is AIML-compliant.
self._numParseErrors = 0
# TODO: select the proper validInfo table based on the version number.
self._validInfo = self._validationInfo101
# This stack of bools is used when parsing <li> elements inside
# <condition> elements, to keep track of whether or not an
# attribute-less "default" <li> element has been found yet. Only
# one default <li> is allowed in each <condition> element. We need
# a stack in order to correctly handle nested <condition> tags.
self._foundDefaultLiStack = []
# This stack of strings indicates what the current whitespace-handling
# behavior should be. Each string in the stack is either "default" or
# "preserve". When a new AIML element is encountered, a new string is
# pushed onto the stack, based on the value of the element's "xml:space"
# attribute (if absent, the top of the stack is pushed again). When
# ending an element, pop an object off the stack.
self._whitespaceBehaviorStack = ["default"]
self._elemStack = []
self._locator = Locator()
self.setDocumentLocator(self._locator)
def getNumErrors(self):
"Return the number of errors found while parsing the current document."
return self._numParseErrors
def setEncoding(self, encoding):
"""Set the text encoding to use when encoding strings read from XML.
Defaults to 'UTF-8'.
"""
self._encoding = encoding
def _location(self):
"Return a string describing the current location in the source file."
line = self._locator.getLineNumber()
column = self._locator.getColumnNumber()
return "(line %d, column %d)" % (line, column)
def _pushWhitespaceBehavior(self, attr):
"""Push a new string onto the whitespaceBehaviorStack.
The string's value is taken from the "xml:space" attribute, if it exists
and has a legal value ("default" or "preserve"). Otherwise, the previous
stack element is duplicated.
"""
assert len(self._whitespaceBehaviorStack) > 0, "Whitespace behavior stack should never be empty!"
try:
if attr["xml:space"] == "default" or attr["xml:space"] == "preserve":
self._whitespaceBehaviorStack.append(attr["xml:space"])
else:
raise AimlParserError, "Invalid value for xml:space attribute " + self._location()
except KeyError:
self._whitespaceBehaviorStack.append(self._whitespaceBehaviorStack[-1])
def startElementNS(self, name, qname, attr):
print "QNAME:", qname
print "NAME:", name
uri, elem = name
if (elem == "bot"): print "name:", attr.getValueByQName("name"), "a'ite?"
self.startElement(elem, attr)
pass
def startElement(self, name, attr):
# Wrapper around _startElement, which catches errors in _startElement()
# and keeps going.
# If we're inside an unknown element, ignore everything until we're
# out again.
if self._currentUnknown != "":
return
# If we're skipping the current category, ignore everything until
# it's finished.
if self._skipCurrentCategory:
return
# process this start-element.
try:
self._startElement(name, attr)
except AimlParserError, msg:
# Print the error message
sys.stderr.write("PARSE ERROR: %s\n" % msg)
self._numParseErrors += 1 # increment error count
# In case of a parse error, if we're inside a category, skip it.
if self._state >= self._STATE_InsideCategory:
self._skipCurrentCategory = True
def _startElement(self, name, attr):
if name == "aiml":
# <aiml> tags are only legal in the OutsideAiml state
if self._state != self._STATE_OutsideAiml:
raise AimlParserError, "Unexpected <aiml> tag " + self._location()
self._state = self._STATE_InsideAiml
self._insideTopic = False
self._currentTopic = u""
try:
self._version = attr["version"]
except KeyError:
# This SHOULD be a syntax error, but so many AIML sets out there are missing
# "version" attributes that it just seems nicer to let it slide.
#raise AimlParserError, "Missing 'version' attribute in <aiml> tag "+self._location()
#print "WARNING: Missing 'version' attribute in <aiml> tag "+self._location()
#print " Defaulting to version 1.0"
self._version = "1.0"
self._forwardCompatibleMode = (self._version != "1.0.1")
self._pushWhitespaceBehavior(attr)
# Not sure about this namespace business yet...
#try:
# self._namespace = attr["xmlns"]
# if self._version == "1.0.1" and self._namespace != "http://alicebot.org/2001/AIML-1.0.1":
# raise AimlParserError, "Incorrect namespace for AIML v1.0.1 "+self._location()
#except KeyError:
# if self._version != "1.0":
# raise AimlParserError, "Missing 'version' attribute(s) in <aiml> tag "+self._location()
elif self._state == self._STATE_OutsideAiml:
# If we're outside of an AIML element, we ignore all tags.
return
elif name == "topic":
# <topic> tags are only legal in the InsideAiml state, and only
# if we're not already inside a topic.
if (self._state != self._STATE_InsideAiml) or self._insideTopic:
raise AimlParserError, "Unexpected <topic> tag", self._location()
try:
self._currentTopic = unicode(attr['name'])
except KeyError:
raise AimlParserError, "Required \"name\" attribute missing in <topic> element " + self._location()
self._insideTopic = True
elif name == "category":
# <category> tags are only legal in the InsideAiml state
if self._state != self._STATE_InsideAiml:
raise AimlParserError, "Unexpected <category> tag " + self._location()
self._state = self._STATE_InsideCategory
self._currentPattern = u""
self._currentThat = u""
# If we're not inside a topic, the topic is implicitly set to *
if not self._insideTopic: self._currentTopic = u"*"
self._elemStack = []
self._pushWhitespaceBehavior(attr)
elif name == "pattern":
# <pattern> tags are only legal in the InsideCategory state
if self._state != self._STATE_InsideCategory:
raise AimlParserError, "Unexpected <pattern> tag " + self._location()
self._state = self._STATE_InsidePattern
elif name == "that" and self._state == self._STATE_AfterPattern:
# <that> are legal either inside a <template> element, or
# inside a <category> element, between the <pattern> and the
# <template> elements. This clause handles the latter case.
self._state = self._STATE_InsideThat
elif name == "template":
# <template> tags are only legal in the AfterPattern and AfterThat
# states
if self._state not in [self._STATE_AfterPattern, self._STATE_AfterThat]:
raise AimlParserError, "Unexpected <template> tag " + self._location()
# if no <that> element was specified, it is implicitly set to *
if self._state == self._STATE_AfterPattern:
self._currentThat = u"*"
self._state = self._STATE_InsideTemplate
self._elemStack.append(['template', {}])
self._pushWhitespaceBehavior(attr)
elif self._state == self._STATE_InsidePattern:
# Certain tags are allowed inside <pattern> elements.
if name == "bot" and attr.has_key("name") and attr["name"] == u"name":
# Insert a special character string that the PatternMgr will
# replace with the bot's name.
self._currentPattern += u" BOT_NAME "
else:
raise AimlParserError, ("Unexpected <%s> tag " % name) + self._location()
elif self._state == self._STATE_InsideThat:
# Certain tags are allowed inside <that> elements.
if name == "bot" and attr.has_key("name") and attr["name"] == u"name":
# Insert a special character string that the PatternMgr will
# replace with the bot's name.
self._currentThat += u" BOT_NAME "
else:
raise AimlParserError, ("Unexpected <%s> tag " % name) + self._location()
elif self._state == self._STATE_InsideTemplate and self._validInfo.has_key(name):
# Starting a new element inside the current pattern. First
# we need to convert 'attr' into a native Python dictionary,
# so it can later be marshaled.
attrDict = {}
for k, v in attr.items():
#attrDict[k[1].encode(self._encoding)] = v.encode(self._encoding)
attrDict[k.encode(self._encoding)] = unicode(v)
self._validateElemStart(name, attrDict, self._version)
# Push the current element onto the element stack.
self._elemStack.append([name.encode(self._encoding), attrDict])
self._pushWhitespaceBehavior(attr)
# If this is a condition element, push a new entry onto the
# foundDefaultLiStack
if name == "condition":
self._foundDefaultLiStack.append(False)
else:
# we're now inside an unknown element.
if self._forwardCompatibleMode:
# In Forward Compatibility Mode, we ignore the element and its
# contents.
self._currentUnknown = name
else:
# Otherwise, unknown elements are grounds for error!
raise AimlParserError, ("Unexpected <%s> tag " % name) + self._location()
def characters(self, ch):
# Wrapper around _characters which catches errors in _characters()
# and keeps going.
if self._state == self._STATE_OutsideAiml:
# If we're outside of an AIML element, we ignore all text
return
if self._currentUnknown != "":
# If we're inside an unknown element, ignore all text
return
if self._skipCurrentCategory:
# If we're skipping the current category, ignore all text.
return
try:
self._characters(ch)
except AimlParserError, msg:
# Print the message
sys.stderr.write("PARSE ERROR: %s\n" % msg)
self._numParseErrors += 1 # increment error count
# In case of a parse error, if we're inside a category, skip it.
if self._state >= self._STATE_InsideCategory:
self._skipCurrentCategory = True
def _characters(self, ch):
text = unicode(ch)
if self._state == self._STATE_InsidePattern:
# TODO: text inside patterns must be upper-case!
self._currentPattern += text
elif self._state == self._STATE_InsideThat:
self._currentThat += text
elif self._state == self._STATE_InsideTemplate:
# First, see whether the element at the top of the element stack
# is permitted to contain text.
try:
parent = self._elemStack[-1][0]
parentAttr = self._elemStack[-1][1]
required, optional, canBeParent = self._validInfo[parent]
nonBlockStyleCondition = (
parent == "condition" and not (parentAttr.has_key("name") and parentAttr.has_key("value")))
if not canBeParent:
raise AimlParserError, ("Unexpected text inside <%s> element " % parent) + self._location()
elif parent == "random" or nonBlockStyleCondition:
# <random> elements can only contain <li> subelements. However,
# there's invariably some whitespace around the <li> that we need
# to ignore. Same for non-block-style <condition> elements (i.e.
# those which don't have both a "name" and a "value" attribute).
if len(text.strip()) == 0:
# ignore whitespace inside these elements.
return
else:
# non-whitespace text inside these elements is a syntax error.
raise AimlParserError, ("Unexpected text inside <%s> element " % parent) + self._location()
except IndexError:
# the element stack is empty. This should never happen.
raise AimlParserError, "Element stack is empty while validating text " + self._location()
# Add a new text element to the element at the top of the element
# stack. If there's already a text element there, simply append the
# new characters to its contents.
try:
textElemOnStack = (self._elemStack[-1][-1][0] == "text")
except IndexError:
textElemOnStack = False
except KeyError:
textElemOnStack = False
if textElemOnStack:
self._elemStack[-1][-1][2] += text
else:
self._elemStack[-1].append(["text", {"xml:space": self._whitespaceBehaviorStack[-1]}, text])
else:
# all other text is ignored
pass
def endElementNS(self, name, qname):
uri, elem = name
self.endElement(elem)
def endElement(self, name):
"""Wrapper around _endElement which catches errors in _characters()
and keeps going.
"""
if self._state == self._STATE_OutsideAiml:
# If we're outside of an AIML element, ignore all tags
return
if self._currentUnknown != "":
# see if we're at the end of an unknown element. If so, we can
# stop ignoring everything.
if name == self._currentUnknown:
self._currentUnknown = ""
return
if self._skipCurrentCategory:
# If we're skipping the current category, see if it's ending. We
# stop on ANY </category> tag, since we're not keeping track of
# state in ignore-mode.
if name == "category":
self._skipCurrentCategory = False
self._state = self._STATE_InsideAiml
return
try:
self._endElement(name)
except AimlParserError, msg:
# Print the message
sys.stderr.write("PARSE ERROR: %s\n" % msg)
self._numParseErrors += 1 # increment error count
# In case of a parse error, if we're inside a category, skip it.
if self._state >= self._STATE_InsideCategory:
self._skipCurrentCategory = True
def _endElement(self, name):
"""Verify that an AIML end element is valid in the current
context.
Raises an AimlParserError if an illegal end element is encountered.
"""
if name == "aiml":
# </aiml> tags are only legal in the InsideAiml state
if self._state != self._STATE_InsideAiml:
raise AimlParserError, "Unexpected </aiml> tag " + self._location()
self._state = self._STATE_OutsideAiml
self._whitespaceBehaviorStack.pop()
elif name == "topic":
# </topic> tags are only legal in the InsideAiml state, and
# only if _insideTopic is true.
if self._state != self._STATE_InsideAiml or not self._insideTopic:
raise AimlParserError, "Unexpected </topic> tag " + self._location()
self._insideTopic = False
self._currentTopic = u""
elif name == "category":
# </category> tags are only legal in the AfterTemplate state
if self._state != self._STATE_AfterTemplate:
raise AimlParserError, "Unexpected </category> tag " + self._location()
self._state = self._STATE_InsideAiml
# End the current category. Store the current pattern/that/topic and
# element in the categories dictionary.
key = (self._currentPattern.strip(), self._currentThat.strip(), self._currentTopic.strip())
self.categories[key] = self._elemStack[-1]
self._whitespaceBehaviorStack.pop()
elif name == "pattern":
# </pattern> tags are only legal in the InsidePattern state
if self._state != self._STATE_InsidePattern:
raise AimlParserError, "Unexpected </pattern> tag " + self._location()
self._state = self._STATE_AfterPattern
elif name == "that" and self._state == self._STATE_InsideThat:
# </that> tags are only allowed inside <template> elements or in
# the InsideThat state. This clause handles the latter case.
self._state = self._STATE_AfterThat
elif name == "template":
# </template> tags are only allowed in the InsideTemplate state.
if self._state != self._STATE_InsideTemplate:
raise AimlParserError, "Unexpected </template> tag " + self._location()
self._state = self._STATE_AfterTemplate
self._whitespaceBehaviorStack.pop()
elif self._state == self._STATE_InsidePattern:
# Certain tags are allowed inside <pattern> elements.
if name not in ["bot"]:
raise AimlParserError, ("Unexpected </%s> tag " % name) + self._location()
elif self._state == self._STATE_InsideThat:
# Certain tags are allowed inside <that> elements.
if name not in ["bot"]:
raise AimlParserError, ("Unexpected </%s> tag " % name) + self._location()
elif self._state == self._STATE_InsideTemplate:
# End of an element inside the current template. Append the
# element at the top of the stack onto the one beneath it.
elem = self._elemStack.pop()
self._elemStack[-1].append(elem)
self._whitespaceBehaviorStack.pop()
# If the element was a condition, pop an item off the
# foundDefaultLiStack as well.
if elem[0] == "condition": self._foundDefaultLiStack.pop()
else:
# Unexpected closing tag
raise AimlParserError, ("Unexpected </%s> tag " % name) + self._location()
# A dictionary containing a validation information for each AIML
# element. The keys are the names of the elements. The values are a
# tuple of three items. The first is a list containing the names of
# REQUIRED attributes, the second is a list of OPTIONAL attributes,
# and the third is a boolean value indicating whether or not the
# element can contain other elements and/or text (if False, the
# element can only appear in an atomic context, such as <date/>).
_validationInfo101 = {
"bot": ( ["name"], [], False ),
"condition": ( [], ["name", "value"], True ), # can only contain <li> elements
"date": ( [], [], False ),
"formal": ( [], [], True ),
"gender": ( [], [], True ),
"get": ( ["name"], [], False ),
"gossip": ( [], [], True ),
"id": ( [], [], False ),
"input": ( [], ["index"], False ),
"javascript": ( [], [], True ),
"learn": ( [], [], True ),
"li": ( [], ["name", "value"], True ),
"lowercase": ( [], [], True ),
"person": ( [], [], True ),
"person2": ( [], [], True ),
"random": ( [], [], True ), # can only contain <li> elements
"sentence": ( [], [], True ),
"set": ( ["name"], [], True),
"size": ( [], [], False ),
"sr": ( [], [], False ),
"srai": ( [], [], True ),
"star": ( [], ["index"], False ),
"system": ( [], [], True ),
"template": ( [], [], True ), # needs to be in the list because it can be a parent.
"that": ( [], ["index"], False ),
"thatstar": ( [], ["index"], False ),
"think": ( [], [], True ),
"topicstar": ( [], ["index"], False ),
"uppercase": ( [], [], True ),
"version": ( [], [], False ),
}
def _validateElemStart(self, name, attr, version):
"""Test the validity of an element starting inside a <template>
element.
This function raises an AimlParserError exception if it the tag is
invalid. Otherwise, no news is good news.
"""
# Check the element's attributes. Make sure that all required
# attributes are present, and that any remaining attributes are
# valid options.
required, optional, canBeParent = self._validInfo[name]
for a in required:
if a not in attr and not self._forwardCompatibleMode:
raise AimlParserError, ("Required \"%s\" attribute missing in <%s> element " % (
a, name)) + self._location()
for a in attr:
if a in required: continue
if a[0:4] == "xml:": continue # attributes in the "xml" namespace can appear anywhere
if a not in optional and not self._forwardCompatibleMode:
raise AimlParserError, ("Unexpected \"%s\" attribute in <%s> element " % (a, name)) + self._location()
# special-case: several tags contain an optional "index" attribute.
# This attribute's value must be a positive integer.
if name in ["star", "thatstar", "topicstar"]:
for k, v in attr.items():
if k == "index":
temp = 0
try:
temp = int(v)
except:
raise AimlParserError, ("Bad type for \"%s\" attribute (expected integer, found \"%s\") " % (
k, v)) + self._location()
if temp < 1:
raise AimlParserError, ("\"%s\" attribute must have non-negative value " % (
k)) + self._location()
# See whether the containing element is permitted to contain
# subelements. If not, this element is invalid no matter what it is.
try:
parent = self._elemStack[-1][0]
parentAttr = self._elemStack[-1][1]
except IndexError:
# If the stack is empty, no parent is present. This should never
# happen.
raise AimlParserError, ("Element stack is empty while validating <%s> " % name) + self._location()
required, optional, canBeParent = self._validInfo[parent]
nonBlockStyleCondition = (
parent == "condition" and not (parentAttr.has_key("name") and parentAttr.has_key("value")))
if not canBeParent:
raise AimlParserError, ("<%s> elements cannot have any contents " % parent) + self._location()
# Special-case test if the parent element is <condition> (the
# non-block-style variant) or <random>: these elements can only
# contain <li> subelements.
elif (parent == "random" or nonBlockStyleCondition) and name != "li":
raise AimlParserError, ("<%s> elements can only contain <li> subelements " % parent) + self._location()
# Special-case test for <li> elements, which can only be contained
# by non-block-style <condition> and <random> elements, and whose
# required attributes are dependent upon which attributes are
# present in the <condition> parent.
elif name == "li":
if not (parent == "random" or nonBlockStyleCondition):
raise AimlParserError, (
"Unexpected <li> element contained by <%s> element " % parent) + self._location()
if nonBlockStyleCondition:
if parentAttr.has_key("name"):
# Single-predicate condition. Each <li> element except the
# last must have a "value" attribute.
if len(attr) == 0:
# This could be the default <li> element for this <condition>,
# unless we've already found one.
if self._foundDefaultLiStack[-1]:
raise AimlParserError, "Unexpected default <li> element inside <condition> " + self._location()
else:
self._foundDefaultLiStack[-1] = True
elif len(attr) == 1 and attr.has_key("value"):
pass # this is the valid case
else:
raise AimlParserError, "Invalid <li> inside single-predicate <condition> " + self._location()
elif len(parentAttr) == 0:
# Multi-predicate condition. Each <li> element except the
# last must have a "name" and a "value" attribute.
if len(attr) == 0:
# This could be the default <li> element for this <condition>,
# unless we've already found one.
if self._foundDefaultLiStack[-1]:
raise AimlParserError, "Unexpected default <li> element inside <condition> " + self._location()
else:
self._foundDefaultLiStack[-1] = True
elif len(attr) == 2 and attr.has_key("value") and attr.has_key("name"):
pass # this is the valid case
else:
raise AimlParserError, "Invalid <li> inside multi-predicate <condition> " + self._location()
# All is well!
return True
def create_parser():
"""Create and return an AIML parser object."""
parser = xml.sax.make_parser()
handler = AimlHandler("UTF-8")
parser.setContentHandler(handler)
#parser.setFeature(xml.sax.handler.feature_namespaces, True)
return parser
|
mpetyx/pychatbot
|
AIML/PyAIML_CUSTOM_OVERLAY-SEMIOTICS/aiml/AimlParser.py
|
Python
|
apache-2.0
| 28,339
|
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.deprecated as kfp
from kfp.samples.test.utils import TestCase, relative_path, run_pipeline_func
run_pipeline_func([
TestCase(
pipeline_file=relative_path(__file__, 'lightweight_component.ipynb'),
mode=kfp.dsl.PipelineExecutionMode.V1_LEGACY,
),
])
|
kubeflow/pipelines
|
samples/core/lightweight_component/lightweight_component_test.py
|
Python
|
apache-2.0
| 870
|
import logging
from org.openbaton.v2.cmd import BaseObCmd
from org.openbaton.v2.utils import get_result_to_list, get_result_to_show, parse_path_or_json, result_to_str
class Nsd(BaseObCmd):
"""Command to manage NSDs. It allows to:
* show details of a specific NSD passing an id
* list all saved NSDs
* delete a specific NSD passing an id
* create a specific NSD passing a path to a file or directly the json content
"""
log = logging.getLogger(__name__)
keys_to_list = ["id", "name", "vendor", "version"]
keys_to_exclude = []
def find(self, params):
if not params:
return "ERROR: missing <nsd-id>"
_id = params[0]
return result_to_str(get_result_to_show(self.app.ob_client.get_nsd(_id),
excluded_keys=self.keys_to_exclude,
_format=self.app.format))
def create(self, params):
if not params:
return "ERROR: missing <nsd> or <path-to-json>"
nsd = parse_path_or_json(params[0])
return result_to_str(get_result_to_show(self.app.ob_client.create_nsd(nsd),
excluded_keys=self.keys_to_exclude,
_format=self.app.format))
def delete(self, params):
if not params:
return "ERROR: missing <nsd-id>"
_id = params[0]
self.app.ob_client.delete_nsd(_id)
return "INFO: Deleted nsd with id %s" % _id
def list(self, params=None):
return result_to_str(
get_result_to_list(self.app.ob_client.list_nsds(), keys=self.keys_to_list, _format=self.app.format),
_format=self.app.format)
|
openbaton/openbaton-cli
|
org/openbaton/v2/nsd.py
|
Python
|
apache-2.0
| 1,773
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from heat.common import exception
from heat.engine import resource
from heat.engine import scheduler
from heat.engine.resources import resource_group
from heat.tests import common
from heat.tests import generic_resource
from heat.tests import utils
template = {
"heat_template_version": "2013-05-23",
"resources": {
"group1": {
"type": "OS::Heat::ResourceGroup",
"properties": {
"count": 2,
"resource_def": {
"type": "dummy.resource",
"properties": {
"Foo": "Bar"
}
}
}
}
}
}
template2 = {
"heat_template_version": "2013-05-23",
"resources": {
"dummy": {
"type": "dummy.resource",
"properties": {
"Foo": "baz"
}
},
"group1": {
"type": "OS::Heat::ResourceGroup",
"properties": {
"count": 2,
"resource_def": {
"type": "dummy.resource",
"properties": {
"Foo": {"get_attr": ["dummy", "Foo"]}
}
}
}
}
}
}
class ResourceWithPropsAndId(generic_resource.ResourceWithProps):
def FnGetRefId(self):
return "ID-%s" % self.name
class ResourceGroupTest(common.HeatTestCase):
def setUp(self):
common.HeatTestCase.setUp(self)
resource._register_class("dummy.resource",
ResourceWithPropsAndId)
utils.setup_dummy_db()
def test_assemble_nested(self):
"""
Tests that the nested stack that implements the group is created
appropriately based on properties.
"""
stack = utils.parse_stack(template)
snip = stack.t['Resources']['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
templ = {
"heat_template_version": "2013-05-23",
"resources": {
"0": {
"type": "dummy.resource",
"properties": {
"Foo": "Bar"
}
},
"1": {
"type": "dummy.resource",
"properties": {
"Foo": "Bar"
}
},
"2": {
"type": "dummy.resource",
"properties": {
"Foo": "Bar"
}
}
}
}
self.assertEqual(templ, resg._assemble_nested(3))
def test_assemble_nested_include(self):
templ = copy.deepcopy(template)
res_def = templ["resources"]["group1"]["properties"]['resource_def']
res_def['properties']['Foo'] = None
stack = utils.parse_stack(templ)
snip = stack.t['Resources']['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2013-05-23",
"resources": {
"0": {
"type": "dummy.resource",
"properties": {}
}
}
}
self.assertEqual(expect, resg._assemble_nested(1))
expect['resources']["0"]['properties'] = {"Foo": None}
self.assertEqual(expect, resg._assemble_nested(1, include_all=True))
def test_invalid_res_type(self):
"""Test that error raised for unknown resource type."""
tmp = copy.deepcopy(template)
grp_props = tmp['resources']['group1']['properties']
grp_props['resource_def']['type'] = "idontexist"
stack = utils.parse_stack(tmp)
snip = stack.t['Resources']['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
exc = self.assertRaises(exception.StackValidationFailed,
resg.validate)
self.assertIn('Unknown resource Type', str(exc))
def test_reference_attr(self):
stack = utils.parse_stack(template2)
snip = stack.t['Resources']['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
self.assertIsNone(resgrp.validate())
@utils.stack_delete_after
def test_delete(self):
"""Test basic delete."""
resg = self._create_dummy_stack()
self.assertIsNotNone(resg.nested())
scheduler.TaskRunner(resg.delete)()
self.assertEqual((resg.DELETE, resg.COMPLETE), resg.nested().state)
self.assertEqual((resg.DELETE, resg.COMPLETE), resg.state)
@utils.stack_delete_after
def test_update(self):
"""Test basic update."""
resg = self._create_dummy_stack()
new_snip = copy.deepcopy(resg.t)
new_snip['Properties']['count'] = 3
scheduler.TaskRunner(resg.update, new_snip)()
self.stack = resg.nested()
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.state)
self.assertEqual((resg.UPDATE, resg.COMPLETE), resg.nested().state)
self.assertEqual(3, len(resg.nested()))
@utils.stack_delete_after
def test_aggregate_attribs(self):
"""
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
resg = self._create_dummy_stack()
expected = ['0', '1']
self.assertEqual(expected, resg.FnGetAtt('foo'))
self.assertEqual(expected, resg.FnGetAtt('Foo'))
@utils.stack_delete_after
def test_aggregate_refs(self):
"""
Test resource id aggregation
"""
resg = self._create_dummy_stack()
expected = ['ID-0', 'ID-1']
self.assertEqual(expected, resg.FnGetAtt("refs"))
@utils.stack_delete_after
def test_index_refs(self):
"""Tests getting ids of individual resources."""
resg = self._create_dummy_stack()
self.assertEqual("ID-0", resg.FnGetAtt('resource.0'))
self.assertEqual("ID-1", resg.FnGetAtt('resource.1'))
self.assertRaises(exception.InvalidTemplateAttribute, resg.FnGetAtt,
'resource.2')
def _create_dummy_stack(self):
stack = utils.parse_stack(template)
snip = stack.t['Resources']['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
scheduler.TaskRunner(resg.create)()
self.stack = resg.nested()
self.assertEqual(2, len(resg.nested()))
self.assertEqual((resg.CREATE, resg.COMPLETE), resg.state)
return resg
|
ntt-sic/heat
|
heat/tests/test_resource_group.py
|
Python
|
apache-2.0
| 7,257
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Login helper."""
import json
import random
import string
from flask import jsonify
from lib.adcase import db
import requests
def run(req, action):
"""frontend for login module.
Args:
req: flask request
action: action to run
Returns:
details from each function
"""
if action == "register_token":
out = register_token(req)
return jsonify(out)
def register_token(req):
"""Register new login token.
Args:
req: flask request
Returns:
object with logged user data
"""
import hashlib
user = do_login(req.args.get("token"))
if user["logged"]:
#user["email"] = str(hashlib.md5(user["email"].encode('utf-8')).hexdigest())
#user["name"] = "Logged"
#ser["full"] = ""
# save to DB
n = 256
user["hash"] = "".join(
random.choices(string.ascii_uppercase + string.digits, k=n))
user_id = "0"
# get user id
user_id = db.res("SELECT id from users where email=%s", (user["email"]))
if not user_id:
db.query(
"INSERT into users set email=%s,short_name=%s,name=%s,"
"valid_until='2100-01-01',status=1",
(user["email"], user["name"], user["full"]))
user_id = db.res("SELECT id from users where email=%s", (user["email"]))
# save session
db.query(
"INSERT into sessions set email=%s,name=%s,full=%s,"
"created_date=now(),enabled=1,hash=%s,user_id=%s",
(user["email"], user["name"], user["full"], user["hash"], user_id))
return {"user": user}
def do_login(token):
"""Validates login token with oauth server.
Args:
token: login sent from frontend
Returns:
object with logged user data
"""
r = requests.get("https://www.googleapis.com/oauth2/v3/tokeninfo?id_token=" +
token)
jdata = r.content
user = {}
user["email"] = json.loads(jdata)["email"]
user["name"] = json.loads(jdata)["given_name"]
user["full"] = json.loads(jdata)["name"]
user["logged"] = True
# except:
# user["logged"] = False
return user
|
google/adcase
|
src/lib/adcase/login.py
|
Python
|
apache-2.0
| 2,608
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from argparse import ArgumentParser
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookParser
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
)
app = Flask(__name__)
# get channel_secret and channel_access_token from your environment variable
channel_secret = os.getenv('LINE_CHANNEL_SECRET', None)
channel_access_token = os.getenv('LINE_CHANNEL_ACCESS_TOKEN', None)
if channel_secret is None:
print('Specify LINE_CHANNEL_SECRET as environment variable.')
sys.exit(1)
if channel_access_token is None:
print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')
sys.exit(1)
line_bot_api = LineBotApi(channel_access_token)
parser = WebhookParser(channel_secret)
@app.route("/callback", methods=['POST'])
def callback():
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# parse webhook body
try:
events = parser.parse(body, signature)
except InvalidSignatureError:
abort(400)
# if event is MessageEvent and message is TextMessage, then echo text
for event in events:
if not isinstance(event, MessageEvent):
continue
if not isinstance(event.message, TextMessage):
continue
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=event.message.text)
)
return 'OK'
if __name__ == "__main__":
arg_parser = ArgumentParser(
usage='Usage: python ' + __file__ + ' [--port <port>] [--help]'
)
arg_parser.add_argument('-p', '--port', type=int, default=8000, help='port')
arg_parser.add_argument('-d', '--debug', default=False, help='debug')
options = arg_parser.parse_args()
app.run(debug=options.debug, port=options.port)
|
line/line-bot-sdk-python
|
examples/flask-echo/app.py
|
Python
|
apache-2.0
| 2,562
|
#!/usr/bin/env python
# Copyright 2015 Criteo. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import json
import logging
from ciscoconfparse import CiscoConfParse
import sys
import re
import os
from ansible.module_utils.basic import *
def clean_file(file, no_command):
with open(file) as file_opened:
list = file_opened.readlines()
list_clean = []
if no_command == 'undo':
line_parent_sharp = False
for line in list:
line_clean = line.rstrip(' \t\r\n\0')
if line_parent_sharp is False:
if line_clean == '#' or line_clean == '!':
line_parent_sharp = True
elif re.match('^!.*$', line_clean) or re.match(
'^#.*$', line_clean):
pass
elif line_clean == 'return' or line_clean == 'exit':
pass
elif line_clean == '':
pass
else:
list_clean.append(line_clean)
else:
if line_clean == '#' or line_clean == '!':
line_parent_sharp = True
elif re.match('^!.*$', line_clean) or re.match(
'^#.*$', line_clean):
pass
elif line_clean == 'return' or line_clean == 'exit':
pass
elif re.match('^\s.*$', line_clean):
line_without_space = re.match('^\s(.*)$', line_clean)
list_clean.append(line_without_space.group(1))
elif line_clean == '':
pass
else:
list_clean.append(line_clean)
line_parent_sharp = False
else:
for line in list:
if 'Current Configuration ...' in line:
pass
else:
list_clean.append(line.rstrip(' \t\r\n\0'))
return list_clean
def compare_children_prefix_no(obj_origin, obj_target, no_command):
command_list = [obj_origin.text]
for child in obj_origin.children:
match = obj_target.find_objects_w_parents(
obj_origin.text,
'^'+re.escape(child.text)+'$')
if len(match) == 0 and not re.match(
"\s*" + no_command + "\s.*", child.text):
line_text_without_indent = re.match("(\s*)(.*)", child.text)
command_list.append(
line_text_without_indent.group(1) +
no_command +
" " +
line_text_without_indent.group(2))
elif len(match) == 0 and re.match(
"\s*" + no_command + "\s.*", child.text):
line_text_without_no = re.match(
"(\s*)" + no_command + "\s(.*)", child.text)
command_list.append(
line_text_without_no.group(1) +
line_text_without_no.group(2))
elif len(child.children) > 0:
for result_recursive in compare_children_prefix_no(
child, obj_target, no_command):
command_list.append(result_recursive)
if len(command_list) > 1:
return command_list
else:
return []
def compare_children(obj_origin, obj_target):
command_list = [obj_origin.text]
for child in obj_origin.children:
match = obj_target.find_objects_w_parents(
obj_origin.text,
'^'+re.escape(child.text)+'$')
if len(match) == 0:
command_list.append(child.text)
elif len(child.children) > 0:
for result_recursive in compare_children(child, obj_target):
command_list.append(result_recursive)
if len(command_list) > 1:
return command_list
else:
return []
def merge_commands(list1, list2):
command_list = []
for line_list1 in list1:
if isinstance(line_list1, list) is True:
for line_children_list1 in line_list1:
command_list.append(line_children_list1)
for line_list2 in list2:
if isinstance(line_list2, list) is True:
if line_list1[0] == line_list2[0]:
for line_children_list2 in line_list2[1:]:
command_list.append(line_children_list2)
list2.remove(line_list2)
else:
command_list.append(line_list1)
for line_list2 in list2:
if isinstance(line_list2, list) is True:
for line_children_list2 in line_list2:
command_list.append(line_children_list2)
else:
command_list.append(line_list2)
return command_list
def netcompare(origin, target, no_command):
origin_file = CiscoConfParse(origin, syntax='ios', factory=False)
target_file = CiscoConfParse(target, syntax='ios', factory=False)
diff_add_no_commands = []
for line in origin_file.objs:
if line.parent.linenum == line.linenum and line.is_config_line is True:
parent_match = target_file.find_objects(
'^'+re.escape(line.text)+'$',
exactmatch=True, ignore_ws=False)
if len(parent_match) == 0 and not re.match(
no_command + "\s.*", line.text):
diff_add_no_commands.append(no_command + ' ' + line.text)
elif len(parent_match) == 0 and re.match(
no_command + "\s.*", line.text):
line_text_without_no = re.match(
no_command + "\s(.*)", line.text)
diff_add_no_commands.append(line_text_without_no.group(1))
if len(line.children) > 0 and len(parent_match) != 0:
result_comparison = compare_children_prefix_no(
line, target_file, no_command)
if len(result_comparison) > 0:
diff_add_no_commands.append(result_comparison)
diff_add_commands = []
for line in target_file.objs:
if line.parent.linenum == line.linenum and line.is_config_line is True:
parent_match = origin_file.find_objects(
'^'+re.escape(line.text)+'$',
exactmatch=True, ignore_ws=False)
if len(parent_match) == 0:
parent_with_children = target_file.find_all_children(
'^'+re.escape(line.text)+'$')
for line_in_parent_with_children in parent_with_children:
diff_add_commands.append(line_in_parent_with_children)
if len(line.children) > 0 and len(parent_match) != 0:
result_comparison = compare_children(line, origin_file)
if len(result_comparison) > 0:
diff_add_commands.append(
compare_children(line, origin_file))
return merge_commands(diff_add_no_commands, diff_add_commands)
def main():
logging.basicConfig(
filename='/var/tmp/net_compare.log', level=logging.DEBUG)
module = AnsibleModule(
argument_spec=dict(
origin=dict(required=True, type='str'),
target=dict(required=True, type='str'),
no_command=dict(required=True, type='str'),
)
)
origin_list = clean_file(
module.params['origin'], module.params['no_command'])
target_list = clean_file(
module.params['target'], module.params['no_command'])
display_commands = netcompare(
origin_list, target_list, module.params['no_command'])
if display_commands:
results = json.dumps(display_commands)
module.exit_json(changed=True, text=results)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
cedric222/netcompare
|
ansible/library/netcompare.py
|
Python
|
apache-2.0
| 8,224
|
import gettext
gettext.install(
'example',
'locale',
names=['ngettext'],
)
print(_('This message is in the script.'))
|
jasonwee/asus-rt-n14uhp-mrtg
|
src/lesson_internationlization_and_localization/gettext_app_builtin.py
|
Python
|
apache-2.0
| 132
|
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Antonin Bas (antonin@barefootnetworks.com)
#
#
# -*- coding: utf-8 -*-
class Node(object):
def __init__(self, n):
self.n = n
self.edges = set()
def add_edge_to(self, other):
assert(isinstance(other, Node))
self.edges.add(other)
def __str__(self):
return str(self.n)
class Graph(object):
def __init__(self):
self.nodes = {}
def add_node(self, node):
assert(node not in self.nodes)
self.nodes[node] = Node(node)
def __contains__(self, node):
return node in self.nodes
def get_node(self, node):
return self.nodes[node]
def produce_topo_sorting(self):
def visit(node, topo_sorting, sequence=None):
if sequence is not None:
sequence += [str(node)]
if node._behavioral_topo_sorting_mark == 1:
if sequence is not None:
print "cycle", sequence
return False
if node._behavioral_topo_sorting_mark != 2:
node._behavioral_topo_sorting_mark = 1
for next_node in node.edges:
res = visit(next_node, topo_sorting, sequence)
if not res:
return False
node._behavioral_topo_sorting_mark = 2
topo_sorting.insert(0, node.n)
return True
has_cycle = False
topo_sorting = []
for node in self.nodes.values():
# 0 is unmarked, 1 is temp, 2 is permanent
node._behavioral_topo_sorting_mark = 0
for node in self.nodes.values():
if node._behavioral_topo_sorting_mark == 0:
if not visit(node, topo_sorting, sequence=[]):
has_cycle = True
break
# removing mark
for node in self.nodes.values():
del node._behavioral_topo_sorting_mark
if has_cycle:
return None
return topo_sorting
|
p4lang/p4c-bm
|
p4c_bm/util/topo_sorting.py
|
Python
|
apache-2.0
| 2,606
|
#!/usr/bin/env python
# we're using python 3.x style print but want it to work in python 2.x,
from __future__ import print_function
import os
import argparse
import sys
parser = argparse.ArgumentParser(description="Validates directory containing pocolm-format "
"language model, as produced by make_lm_dir.py",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("lm_dir",
help="Directory to validate")
args = parser.parse_args()
os.environ['PATH'] = (os.environ['PATH'] + os.pathsep +
os.path.abspath(os.path.dirname(sys.argv[0])))
if not os.path.exists(args.lm_dir):
sys.exit("validate_lm_dir.py: Expected directory {0} to exist".format(args.lm_dir))
# the following code checks ngram_order
f = open("{0}/ngram_order".format(args.lm_dir))
line = f.readline()
try:
ngram_order = int(line)
assert ngram_order > 1 and len(line.split()) == 1
assert f.readline() == ''
except Exception as e:
sys.exit("validate_lm_dir.py: Expected file {0}/ngram_order to contain "
"an integer >1: {1}".format(args.lm_dir, str(e)))
f.close()
# the following code checks num_ngrams
try:
f = open("{0}/num_ngrams".format(args.lm_dir))
lines = f.readlines()
assert(len(lines) == ngram_order)
for order, line in enumerate(lines):
assert(len(line.split()) == 2)
assert(int(line.split()[0]) == order + 1)
assert(int(line.split()[1]) > 0)
except Exception as e:
sys.exit("validate_lm_dir.py: Expected file {0}/num_ngrams to contain "
"an integer for every order each line: {1}".format(args.lm_dir, str(e)))
f.close()
# call validate_vocab.py to check the vocab.
if os.system("validate_vocab.py {0}/words.txt".format(args.lm_dir)) != 0:
sys.exit("validate_lm_dir.py: failed to validate {0}/words.txt".format(args.lm_dir))
if os.system("echo true | cmp -s - {0}/was_pruned || "
"echo false | cmp -s - {0}/was_pruned".format(args.lm_dir)) != 0:
sys.exit("validate_lm_dir.py: {0}/was_pruned should contain "
"'true' or 'false'.".format(args.lm_dir))
# check the 'names' file; it should have lines like:
# 1 switchboard
# 2 fisher
# etc.
f = open("{0}/names".format(args.lm_dir))
num_train_sets = 0
while True:
line = f.readline()
if line == '':
break
num_train_sets += 1
try:
[m, name] = line.split()
assert int(m) == num_train_sets
except:
sys.exit("validate_lm_dir.py: bad {0}'th line of {1}/names: '{2}'".format(
num_train_sets, args.lm_dir, line[0:-1]))
f.close()
if os.path.exists(args.lm_dir + "/num_splits"):
# split LM dir, contains float.all.split{1,2,3..}
f = open(args.lm_dir + "/num_splits")
try:
num_splits = int(f.readline())
assert f.readline() == '' and num_splits > 1
f.close()
except:
sys.exit("validate_lm_dir.py: {0}/num_splits had unexpected contents.")
for i in range(1, num_splits + 1):
name = "{0}/float.all.{1}".format(args.lm_dir, i)
if not os.path.exists(name):
sys.exit("validate_lm_dir.py: expected file {0} to exist".format(name))
else:
# non-split LM dir, contains float.all
count_file = args.lm_dir + "/float.all"
if not os.path.exists(count_file):
sys.exit("validate_lm_dir.py: Expected file {0} to exist".format(count_file))
if not os.path.getsize(count_file) > 0:
sys.exit("validate_lm_dir.py: Expected file {0} to be nonempty".format(count_file))
if os.system("validate_metaparameters.py --ngram-order={0} --num-train-sets={1} "
"{2}/metaparameters".format(ngram_order,
num_train_sets, args.lm_dir)) != 0:
sys.exit("validate_lm_dir.py: failed to validate {0}/metaparameters".format(
args.lm_dir))
print("validate_lm_dir.py: validated LM directory " + args.lm_dir,
file=sys.stderr)
|
wantee/pocolm
|
scripts/validate_lm_dir.py
|
Python
|
apache-2.0
| 4,017
|
from __future__ import print_function
import os
import re
import ujson
from typing import Any, Dict, List, Text
from django.core.management.commands import compilemessages
from django.conf import settings
import polib
from zerver.lib.i18n import with_language
class Command(compilemessages.Command):
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
if settings.PRODUCTION:
# HACK: When using upgrade-zulip-from-git, we're in a
# production environment where STATIC_ROOT will include
# past versions; this ensures we only process the current
# version
settings.STATIC_ROOT = os.path.join(settings.DEPLOY_ROOT, "static")
settings.LOCALE_PATHS = (os.path.join(settings.DEPLOY_ROOT, 'static/locale'),)
super(Command, self).handle(*args, **options)
self.extract_language_options()
self.create_language_name_map()
def create_language_name_map(self):
# type: () -> None
join = os.path.join
static_root = settings.STATIC_ROOT
path = join(static_root, 'locale', 'language_options.json')
output_path = join(static_root, 'locale', 'language_name_map.json')
with open(path, 'r') as reader:
languages = ujson.load(reader)
lang_list = []
for lang_info in languages['languages']:
name = lang_info['name']
lang_info['name'] = with_language(name, lang_info['code'])
lang_list.append(lang_info)
lang_list.sort(key=lambda lang: lang['name'])
with open(output_path, 'w') as output_file:
ujson.dump({'name_map': lang_list}, output_file, indent=4)
def get_po_filename(self, locale_path, locale):
# type: (Text, Text) -> Text
po_template = '{}/{}/LC_MESSAGES/django.po'
return po_template.format(locale_path, locale)
def get_json_filename(self, locale_path, locale):
# type: (Text, Text) -> Text
return "{}/{}/translations.json".format(locale_path, locale)
def extract_language_options(self):
# type: () -> None
locale_path = u"{}/locale".format(settings.STATIC_ROOT)
output_path = u"{}/language_options.json".format(locale_path)
data = {'languages': []} # type: Dict[str, List[Dict[str, Any]]]
lang_name_re = re.compile('"Language-Team: (.*?) \(')
locales = os.listdir(locale_path)
locales.append(u'en')
locales = list(set(locales))
for locale in locales:
info = {} # type: Dict[str, Any]
if locale == u'en':
data['languages'].append({
'code': u'en',
'name': u'English',
})
continue
filename = self.get_po_filename(locale_path, locale)
if not os.path.exists(filename):
continue
with open(filename, 'r') as reader:
result = lang_name_re.search(reader.read())
if result:
try:
name = result.group(1)
except Exception:
print("Problem in parsing {}".format(filename))
raise
else:
raise Exception("Unknown language %s" % (locale,))
percentage = self.get_translation_percentage(locale_path, locale)
info['name'] = name
info['code'] = locale
info['percent_translated'] = percentage
if info:
data['languages'].append(info)
with open(output_path, 'w') as writer:
ujson.dump(data, writer, indent=2)
def get_translation_percentage(self, locale_path, locale):
# type: (Text, Text) -> int
# backend stats
po = polib.pofile(self.get_po_filename(locale_path, locale))
not_translated = len(po.untranslated_entries())
total = len(po.translated_entries()) + not_translated
# frontend stats
with open(self.get_json_filename(locale_path, locale)) as reader:
for key, value in ujson.load(reader).items():
total += 1
if key == value:
not_translated += 1
return (total - not_translated) * 100 // total
|
verma-varsha/zulip
|
zerver/management/commands/compilemessages.py
|
Python
|
apache-2.0
| 4,381
|
"""Bayesian p-value Posterior/Prior predictive plot."""
import numpy as np
from ..labels import BaseLabeller
from ..rcparams import rcParams
from ..utils import _var_names
from .plot_utils import default_grid, filter_plotters_list, get_plotting_function
from ..sel_utils import xarray_var_iter
def plot_bpv(
data,
kind="u_value",
t_stat="median",
bpv=True,
plot_mean=True,
reference="analytical",
mse=False,
n_ref=100,
hdi_prob=0.94,
color="C0",
grid=None,
figsize=None,
textsize=None,
labeller=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
ax=None,
backend=None,
plot_ref_kwargs=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Parameters
----------
data : az.InferenceData object
:class:`arviz.InferenceData` object containing the observed and
posterior/prior predictive data.
kind : str
Type of plot to display ("p_value", "u_value", "t_stat"). Defaults to u_value.
For "p_value" we compute p := p(y* ≤ y | y). This is the probability of the data y being
larger or equal than the predicted data y*. The ideal value is 0.5 (half the predictions
below and half above the data).
For "u_value" we compute pi := p(yi* ≤ yi | y). i.e. like a p_value but per observation yi.
This is also known as marginal p_value. The ideal distribution is uniform. This is similar
to the LOO-pit calculation/plot, the difference is than in LOO-pit plot we compute
pi = p(yi* r ≤ yi | y-i ), where y-i, is all other data except yi.
For "t_stat" we compute := p(T(y)* ≤ T(y) | y) where T is any test statistic. See t_stat
argument below for details of available options.
t_stat : str, float, or callable
Test statistics to compute from the observations and predictive distributions.
Allowed strings are "mean", "median" or "std". Defaults to "median".
Alternative a quantile can be passed as a float (or str) in the
interval (0, 1). Finally a user defined function is also
acepted, see examples section for details.
bpv : bool
If True (default) add the Bayesian p_value to the legend when ``kind = t_stat``.
plot_mean : bool
Whether or not to plot the mean test statistic. Defaults to True.
reference : str
How to compute the distributions used as reference for u_values or p_values. Allowed values
are "analytical" (default) and "samples". Use `None` to do not plot any reference.
Defaults to "samples".
mse :bool
Show scaled mean square error between uniform distribution and marginal p_value
distribution. Defaults to False.
n_ref : int, optional
Number of reference distributions to sample when ``reference=samples``. Defaults to 100.
hdi_prob: float, optional
Probability for the highest density interval for the analytical reference distribution when
computing u_values. Should be in the interval (0, 1]. Defaults to
0.94.
color : str
Matplotlib color
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize : tuple
Figure size. If None it will be defined automatically.
textsize : float
Text size scaling factor for labels, titles and lines. If None it will be
autoscaled based on ``figsize``.
data_pairs : dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, ``data_pairs = {'y' : 'y_hat'}``
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
labeller : labeller instance, optional
Class providing the method ``make_pp_label`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
var_names : list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the variables by ``~``
when you want to exclude them from the plot.
filter_vars : {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords : dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten : list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
flatten_pp : list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the coords argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs
parameters. If flatten is defined and flatten_pp is None, then ``flatten_pp=flatten``.
legend : bool
Add legend to figure. By default True.
ax : numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend : str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
plot_ref_kwargs : dict, optional
Extra keyword arguments to control how reference is represented.
Passed to :meth:`matplotlib.axes.Axes.plot` or
:meth:`matplotlib.axes.Axes.axhspan` (when ``kind=u_value``
and ``reference=analytical``).
backend_kwargs : bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or
:func:`bokeh.plotting.figure`. For additional documentation
check the plotting method of the backend.
group : {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
Other value can be 'prior'.
show : bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_ppc : Plot for posterior/prior predictive checks.
plot_loo_pit : Plot Leave-One-Out probability integral transformation (PIT) predictive checks.
plot_dist_comparison : Plot to compare fitted and unfitted distributions.
References
----------
* Gelman et al. (2013) see http://www.stat.columbia.edu/~gelman/book/ pages 151-153 for details
Examples
--------
Plot Bayesian p_values.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data("regression1d")
>>> az.plot_bpv(data, kind="p_value")
Plot custom test statistic comparison.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data("regression1d")
>>> az.plot_bpv(data, kind="t_stat", t_stat=lambda x:np.percentile(x, q=50, axis=-1))
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in (f"{group}_predictive", "observed_data"):
if not hasattr(data, groups):
raise TypeError(f'`data` argument must have the group "{groups}"')
if kind.lower() not in ("t_stat", "u_value", "p_value"):
raise TypeError("`kind` argument must be either `t_stat`, `u_value`, or `p_value`")
if reference is not None:
if reference.lower() not in ("analytical", "samples"):
raise TypeError(
"`reference` argument must be either `analytical`, `samples`, or `None`"
)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
if data_pairs is None:
data_pairs = {}
if labeller is None:
labeller = BaseLabeller()
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
observed = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed.data_vars)
var_names = _var_names(var_names, observed, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed.dims.keys())
if coords is None:
coords = {}
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
for key in coords.keys():
coords[key] = np.where(np.in1d(observed[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True
)
),
"plot_t_stats",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters, grid=grid)
bpvplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
total_pp_samples=total_pp_samples,
kind=kind,
bpv=bpv,
t_stat=t_stat,
reference=reference,
mse=mse,
n_ref=n_ref,
hdi_prob=hdi_prob,
plot_mean=plot_mean,
color=color,
figsize=figsize,
textsize=textsize,
labeller=labeller,
plot_ref_kwargs=plot_ref_kwargs,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_bpv", "bpvplot", backend)
axes = plot(**bpvplot_kwargs)
return axes
|
arviz-devs/arviz
|
arviz/plots/bpvplot.py
|
Python
|
apache-2.0
| 11,291
|
# Copyright (c) 2006-2007 Open Source Applications Foundation
# Copyright (c) 2008-2009 Mikeal Rogers <mikeal.rogers@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import windmill
import logging
from time import sleep
import os, sys
from datetime import datetime
from threading import Thread
import shutil
import socket
import functest
functest.configure()
def process_options(argv_list):
"""Process all the command line options"""
import admin_options
admin_options.process_module(admin_options)
argv_list.pop(0)
action = None
# This might be the hairiest code in windmill :)
# We have a very specific way we need to parse arguments
# because of the way different arguments interact with each other
# 8/27/2007 Gawd this is ugly, i would love to refactor this but I've
# forgotten what it does -Mikeal
# 12/15/2007 Oh man, I'm going to add a feature to this without refactoring it.
# The issue with this code remains the same and no standard arg parsing
# module can do what we need.
for arg in argv_list:
# Grab the test url if one is given
if arg.startswith('http://') or arg.startswith('https://'):
windmill.settings['TEST_URL'] = arg
functest.registry['url'] = arg
elif arg.startswith('-'):
# Take something like -efg and set the e, f, and g options
options = arg.replace('-', '')
for option in options:
admin_options.flags_dict[option]()
else:
# Any argument not starting with - is a regular named option
value = None
if arg.find('=') is not -1:
name, value = arg.split('=')
else:
name = arg
if name in admin_options.options_dict:
processor = admin_options.options_dict[name]
if value is None:
processor()
else:
processor(value)
elif name in action_mapping:
action = action_mapping[name]
else:
print name, 'is not a windmill argument. Sticking in functest registry.'
if value is None:
value = True
functest.registry[name] = value
if action is None:
# If an action is not defined we default to running the service in the foreground
return action_mapping['runserver']
else:
return action
def setup_servers(console_level=logging.INFO):
"""Setup the server and return httpd and loggers"""
windmill.is_active = True
windmill.ide_is_awake = False
if len(logging.getLogger().handlers) > 0:
console_handler = logging.getLogger().handlers[0]
console_handler.setLevel(console_level)
httpd = windmill.server.wsgi.make_windmill_server()
return httpd
def run_threaded(console_level=logging.INFO):
"""Run the server threaded."""
httpd = setup_servers(console_level)
httpd_thread = Thread(target=httpd.start)
getattr(httpd_thread, 'setDaemon', lambda x: x)(True)
httpd_thread.start()
while not httpd.ready:
sleep(.25)
return httpd, httpd_thread
def configure_global_settings(logging_on=True):
"""Configure that global settings for the current run"""
# This logging stuff probably shouldn't be here, it should probably be abstracted
if logging_on:
logging.getLogger().setLevel(0)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger().addHandler(console)
if os.environ.has_key('WINDMILL_CONFIG_FILE'):
local_settings = os.environ['WINDMILL_CONFIG_FILE']
else:
local_settings = None
windmill.settings = windmill.conf.configure_settings(localSettings=local_settings)
port = windmill.settings['SERVER_HTTP_PORT']
while 1:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', port))
s.close()
port += 1
except socket.error:
break
windmill.settings['SERVER_HTTP_PORT'] = port
return windmill.settings
on_ide_awake = []
def setup():
"""Setup server and shell objects"""
global shell_objects_dict
shell_objects_dict = {}
windmill.settings['shell_objects'] = shell_objects_dict
assert not windmill.settings.get('setup_has_run', False)
httpd, httpd_thread = run_threaded(windmill.settings['CONSOLE_LOG_LEVEL'])
shell_objects_dict['httpd'] = httpd
shell_objects_dict['httpd_thread'] = httpd_thread
from windmill.bin import shell_objects
if windmill.settings['CONTINUE_ON_FAILURE'] is not False:
shell_objects.jsonrpc_client.add_json_command('{"method": "commands.setOptions", "params": {"stopOnFailure" : false}}')
if windmill.settings['EXTENSIONS_DIR'] is not None:
shell_objects.load_extensions_dir(windmill.settings['EXTENSIONS_DIR'])
if windmill.settings['RUN_TEST'] is not None:
shell_objects.run_test(windmill.settings['RUN_TEST'])
if windmill.settings['LOAD_TEST'] is not None:
shell_objects.load_test(windmill.settings['LOAD_TEST'])
if windmill.settings['JAVASCRIPT_TEST_DIR']:
shell_objects.run_js_tests(windmill.settings['JAVASCRIPT_TEST_DIR'],
windmill.settings['JAVASCRIPT_TEST_FILTER'],
windmill.settings['JAVASCRIPT_TEST_PHASE'])
browser = [setting for setting in windmill.settings.keys() if setting.startswith('START_') and \
windmill.settings[setting] is True]
import shell_objects
if len(browser) is 1:
shell_objects_dict['browser'] = getattr(shell_objects, browser[0].lower())()
for attribute in dir(shell_objects):
shell_objects_dict[attribute] = getattr(shell_objects, attribute)
shell_objects_dict['setup_has_run'] = True
return shell_objects_dict
def teardown(shell_objects):
"""Teardown the server, threads, and open browsers."""
if windmill.is_active:
windmill.is_active = False
shell_objects['clear_queue']()
for controller in windmill.settings['controllers']:
controller.stop()
del(controller)
if windmill.settings['START_FIREFOX'] and windmill.settings['MOZILLA_CREATE_NEW_PROFILE']:
shutil.rmtree(windmill.settings['MOZILLA_PROFILE'])
for directory in windmill.teardown_directories:
if os.path.isdir(directory):
shutil.rmtree(directory)
# while shell_objects['httpd_thread'].isAlive():
# try:
# shell_objects['httpd'].stop()
# except Exception, e:
# print "Exception occurred while shutting server down:"
# print e
#
# # Hacking workaround for port locking up on linux.
# if sys.platform == 'linux2':
# try:
# shell_objects['httpd'].socket.shutdown(socket.SHUT_RDWR)
# shell_objects['httpd'].socket.close()
# except: pass
shell_objects['httpd'].stop()
#shell_objects['httpd_thread'].join()
def runserver_action(shell_objects):
"""Run the server in the foreground with the options given to the command line"""
try:
if 'runserver' in sys.argv or len(windmill.settings['controllers']) is 0:
print 'Server running...'
if windmill.settings['EXIT_ON_DONE'] and not windmill.settings['JAVASCRIPT_TEST_DIR']:
while windmill.block_exit or (
len(shell_objects['httpd'].controller_queue.queue) is not 0 ) or (
len(shell_objects['httpd'].test_resolution_suite.unresolved) is not 0 ):
sleep(.25)
elif ( windmill.settings['RUN_TEST'] ):
windmill.runserver_running = True
while windmill.runserver_running:
sleep(.25)
else:
windmill.runserver_running = True
while windmill.runserver_running:
sleep(.25)
teardown(shell_objects)
if windmill.test_has_failed:
sys.exit(1)
except KeyboardInterrupt:
teardown(shell_objects)
sys.exit(1)
def shell_action(shell_objects):
"""Start the windmill shell environment"""
windmill.in_shell = True
# If ipython is installed and we weren't given the usecode option
try:
assert not windmill.settings['USECODE']
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed()
ipshell(local_ns=shell_objects)
except:
import code
code.interact(local=shell_objects)
teardown(shell_objects)
# def wxui_action(shell_objects):
# """Start the wxPython based service GUI"""
# try:
# import wxui
# app = wxui.App(shell_objects)
# shell_objects['wxui_app'] = app
# app.MainLoop()
# teardown(shell_objects)
# except ImportError:
# print 'Failed to import wx, defaulting to the shell'
# shell_action(shell_objects)
# def tinderbox_action(shell_objects):
# """Tinderbox action for continuous integration"""
# shell_objects['jsonrpc_client'].add_json_command('{"method": "commands.setOptions", "params": {"stopOnFailure" : false}}')
#
# class ResultsProcessor(object):
# passed = 0
# failed = 0
# def success(self, test, debug):
# self.passed += 1
# def failure(self, test, debug):
# self.failed += 1
#
# result_processor = ResultsProcessor()
# shell_objects['httpd'].test_resolution_suite.result_processor = result_processor
#
# starttime = datetime.now()
# result = None
#
# if windmill.settings['RUN_TEST']:
# try:
# while ( len(shell_objects['httpd'].controller_queue.queue) is not 0 ) or (
# len(shell_objects['httpd'].test_resolution_suite.unresolved) is not 0 ):
# sleep(1)
#
# print '#TINDERBOX# Testname = FullSuite'
# print '#TINDERBOX# Time elapsed = %s' % str (datetime.now() - starttime)
#
# if result_processor.failed > 0 or result_processor.passed is 0:
# result = "FAILED"
# else:
# result = "PASSED"
#
# print '#TINDERBOX# Status = %s' % result
# teardown(shell_objects)
# if result == "FAILED":
# sys.exit(1)
#
# except KeyboardInterrupt:
# teardown(shell_objects)
# if result == "FAILED":
# sys.exit(1)
# else:
# try:
# while not windmill.TESTS_COMPLETED:
# sleep(1)
# except KeyboardInterrupt:
# teardown(shell_objects)
# if result == "FAILED":
# sys.exit(1)
#
# print '#TINDERBOX# Testname = FullSuite'
# print '#TINDERBOX# Time elapsed = %s' % str (datetime.now() - starttime)
# if windmill.RESULTS['fail'] > 0 or windmill.RESULTS['pass'] is 0:
# result = "FAILED"
# else:
# result = "PASSED"
#
# print '#TINDERBOX# Status = %s' % result
# teardown(shell_objects)
# if result == "FAILED":
# sys.exit(1)
def start_windmill():
"""Start windmill and return shell_objects"""
configure_global_settings()
shell_objects = setup()
return shell_objects
def command_line_startup():
"""Command line startup"""
windmill.stdout, windmill.stdin = sys.stdout, sys.stdin
configure_global_settings()
action = process_options(sys.argv)
shell_objects = setup()
action(shell_objects)
action_mapping = {'shell':shell_action, 'runserver':runserver_action,
'run_service':runserver_action}
|
ept/windmill
|
windmill/bin/admin_lib.py
|
Python
|
apache-2.0
| 12,984
|
from gasp import * # import everything from the gasp library
begin_graphics() # open the graphics canvas
Box((20, 20), 100, 100) # the house
Box((55, 20), 30, 50) # the door
Box((40, 80), 20, 20) # the left window
Box((80, 80), 20, 20) # the right window
Line((20, 120), (70, 160)) # the left roof
Line((70, 160), (120, 120)) # the right roof
update_when('key_pressed') # keep the canvas open until a key is pressed
end_graphics() # close the canvas (which would happen
# anyway, since the script ends here, but it
# is better to be explicit).
|
abhispra/python
|
hello_world/house_gasp.py
|
Python
|
apache-2.0
| 659
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import os
import shutil
import textwrap
import threading
from builtins import map
from collections import defaultdict
from textwrap import dedent
from pants.backend.jvm.subsystems.jvm_tool_mixin import JvmToolMixin
from pants.backend.jvm.subsystems.shader import Shader
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.tasks.ivy_task_mixin import IvyResolveFingerprintStrategy, IvyTaskMixin
from pants.backend.jvm.tasks.jar_task import JarTask
from pants.base.exceptions import TaskError
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.build_graph.target import Target
from pants.invalidation.cache_manager import VersionedTargetSet
from pants.ivy.ivy_subsystem import IvySubsystem
from pants.java import util
from pants.java.executor import Executor
from pants.util.dirutil import safe_mkdir_for
from pants.util.memo import memoized_property
class ShadedToolFingerprintStrategy(IvyResolveFingerprintStrategy):
def __init__(self, main, custom_rules=None):
# The bootstrapper uses no custom confs in its resolves.
super(ShadedToolFingerprintStrategy, self).__init__(confs=None)
self._main = main
self._custom_rules = custom_rules
def compute_fingerprint(self, target):
hasher = hashlib.sha1()
base_fingerprint = super(ShadedToolFingerprintStrategy, self).compute_fingerprint(target)
if base_fingerprint is None:
return None
hasher.update('version=2')
hasher.update(base_fingerprint)
# NB: this series of updates must always cover the same fields that populate `_tuple`'s slots
# to ensure proper invalidation.
hasher.update(self._main)
if self._custom_rules:
for rule in self._custom_rules:
hasher.update(rule.render())
return hasher.hexdigest()
def _tuple(self):
# NB: this tuple's slots - used for `==/hash()` - must be kept in agreement with the hashed
# fields in `compute_fingerprint` to ensure proper invalidation.
return self._main, tuple(self._custom_rules or ())
def __hash__(self):
return hash((type(self),) + self._tuple())
def __eq__(self, other):
return type(self) == type(other) and self._tuple() == other._tuple()
class BootstrapJvmTools(IvyTaskMixin, JarTask):
class ToolUnderspecified(Exception):
pass
@classmethod
def product_types(cls):
return ['jvm_build_tools_classpath_callbacks']
@classmethod
def register_options(cls, register):
super(BootstrapJvmTools, cls).register_options(register)
register('--eager', type=bool, default=False,
help='Eagerly bootstrap all known JVM tools, instead of fetching them on-demand. '
'Useful for creating a warm Pants workspace, e.g., for containerizing.')
# Must be registered with the shader- prefix, as JarTask already registers --jvm-options
# (indirectly, via NailgunTask).
register('--shader-jvm-options', type=list, metavar='<option>...',
help='Run the tool shader with these extra jvm options.')
@classmethod
def subsystem_dependencies(cls):
return super(BootstrapJvmTools, cls).subsystem_dependencies() + (IvySubsystem, Shader.Factory)
@classmethod
def prepare(cls, options, round_manager):
super(BootstrapJvmTools, cls).prepare(options, round_manager)
Shader.Factory.prepare_tools(round_manager)
class ToolResolveError(TaskError):
"""Indicates an error resolving a required JVM tool classpath."""
@classmethod
def _tool_resolve_error(cls, error, dep_spec, jvm_tool):
msg = dedent("""
Failed to resolve target for tool: {tool}. This target was obtained from
option {option} in scope {scope}. You probably need to add this target to your tools
BUILD file(s), usually located in BUILD.tools in the workspace root.
Exception {etype}: {error}
""".format(tool=dep_spec,
etype=type(error).__name__,
error=error,
scope=jvm_tool.scope,
option=jvm_tool.key))
return cls.ToolResolveError(msg)
@classmethod
def get_alternate_target_roots(cls, options, address_mapper, build_graph):
processed = set()
for jvm_tool in JvmToolMixin.get_registered_tools():
dep_spec = jvm_tool.dep_spec(options)
dep_address = Address.parse(dep_spec)
# Some JVM tools are requested multiple times, we only need to handle them once.
if dep_address not in processed:
processed.add(dep_address)
try:
if build_graph.resolve_address(dep_address):
# The user has defined a tool classpath override - we let that stand.
continue
except AddressLookupError as e:
if jvm_tool.classpath is None:
raise cls._tool_resolve_error(e, dep_spec, jvm_tool)
else:
if not jvm_tool.is_default(options):
# The user specified a target spec for this jvm tool that doesn't actually exist.
# We want to error out here instead of just silently using the default option while
# appearing to respect their config.
raise cls.ToolResolveError(dedent("""
Failed to resolve target for tool: {tool}. This target was obtained from
option {option} in scope {scope}.
Make sure you didn't make a typo in the tool's address. You specified that the
tool should use the target found at "{tool}".
This target has a default classpath configured, so you can simply remove:
[{scope}]
{option}: {tool}
from pants.ini (or any other config file) to use the default tool.
The default classpath is: {default_classpath}
Note that tool target addresses in pants.ini should be specified *without* quotes.
""").strip().format(tool=dep_spec,
option=jvm_tool.key,
scope=jvm_tool.scope,
default_classpath=':'.join(map(str, jvm_tool.classpath or ()))))
if jvm_tool.classpath:
tool_classpath_target = JarLibrary(name=dep_address.target_name,
address=dep_address,
build_graph=build_graph,
jars=jvm_tool.classpath)
else:
# The tool classpath is empty by default, so we just inject a dummy target that
# ivy resolves as the empty list classpath. JarLibrary won't do since it requires
# one or more jars, so we just pick a target type ivy has no resolve work to do for.
tool_classpath_target = Target(name=dep_address.target_name,
address=dep_address,
build_graph=build_graph)
build_graph.inject_target(tool_classpath_target, synthetic=True)
# We use the trick of not returning alternate roots, but instead just filling the dep_spec
# holes with a JarLibrary built from a tool's default classpath JarDependency list if there is
# no over-riding targets present. This means we do modify the build_graph, but we at least do
# it at a time in the engine lifecycle cut out for handling that.
return None
def __init__(self, *args, **kwargs):
super(BootstrapJvmTools, self).__init__(*args, **kwargs)
self._tool_cache_path = os.path.join(self.workdir, 'tool_cache')
def execute(self):
registered_tools = JvmToolMixin.get_registered_tools()
if registered_tools:
# Map of scope -> (map of key -> callback).
callback_product_map = self.context.products.get_data('jvm_build_tools_classpath_callbacks',
init_func=lambda: defaultdict(dict))
# We leave a callback in the products map because we want these Ivy calls
# to be done lazily (they might never actually get executed) and we want
# to hit Task.invalidated (called in Task._ivy_resolve) on the instance of
# BootstrapJvmTools rather than the instance of whatever class requires
# the bootstrap tools. It would be awkward and possibly incorrect to call
# self.invalidated twice on a Task that does meaningful invalidation on its
# targets. -pl
for jvm_tool in registered_tools:
dep_spec = jvm_tool.dep_spec(self.context.options)
callback = self.cached_bootstrap_classpath_callback(dep_spec, jvm_tool)
callback_product_map[jvm_tool.scope][jvm_tool.key] = callback
if self.get_options().eager:
with self.context.new_workunit('eager'):
for scope, callbacks_by_key in callback_product_map.items():
for key, callback in callbacks_by_key.items():
try:
callback()
except self.ToolUnderspecified:
pass # We don't want to fail for placeholder registrations
# (e.g., custom scala platform).
def _resolve_tool_targets(self, dep_spec, jvm_tool):
try:
targets = list(self.context.resolve(dep_spec))
if not targets:
raise KeyError
return targets
except (KeyError, AddressLookupError) as e:
raise self._tool_resolve_error(e, dep_spec, jvm_tool)
def _check_underspecified_tools(self, jvm_tool, targets):
# NOTE: ScalaPlatform allows a user to specify a custom configuration. When this is
# done all of the targets must be defined by the user and defaults are set as None.
# If we catch a case of a scala-platform tool being bootstrapped and we have no user
# specified target we need to throw an exception for the user.
# It is possible for tests to insert synthetic tool targets which we honor here.
# Bootstrapped tools are inserted as synthetic. If they exist on disk they are later
# updated as non synthetic targets. If it's a synthetic target make sure it has a rev.
synthetic_targets = [t.is_synthetic for t in targets]
empty_revs = [cp.rev is None for cp in jvm_tool.classpath or []]
if any(empty_revs) and any(synthetic_targets):
raise self.ToolUnderspecified(textwrap.dedent("""
Unable to bootstrap tool: '{}' because no rev was specified. This usually
means that the tool was not defined properly in your build files and no
default option was provided to use for bootstrap.
""".format(jvm_tool.key)))
def _bootstrap_classpath(self, jvm_tool, targets):
self._check_underspecified_tools(jvm_tool, targets)
workunit_name = 'bootstrap-{}'.format(jvm_tool.key)
return self.ivy_classpath(targets, silent=True, workunit_name=workunit_name)
@memoized_property
def shader(self):
return Shader.Factory.create(self.context)
def _bootstrap_shaded_jvm_tool(self, jvm_tool, targets):
fingerprint_strategy = ShadedToolFingerprintStrategy(jvm_tool.main,
custom_rules=jvm_tool.custom_rules)
with self.invalidated(targets,
# We're the only dependent in reality since we shade.
invalidate_dependents=False,
fingerprint_strategy=fingerprint_strategy) as invalidation_check:
# If there are no vts, then there are no resolvable targets, so we exit early with an empty
# classpath. This supports the optional tool classpath case.
if not invalidation_check.all_vts:
return []
tool_vts = self.tool_vts(invalidation_check)
jar_name = '{main}-{hash}.jar'.format(main=jvm_tool.main, hash=tool_vts.cache_key.hash)
shaded_jar = os.path.join(self._tool_cache_path, 'shaded_jars', jar_name)
if not invalidation_check.invalid_vts and os.path.exists(shaded_jar):
return [shaded_jar]
# Ensure we have a single binary jar we can shade.
binary_jar = os.path.join(self._tool_cache_path, 'binary_jars', jar_name)
safe_mkdir_for(binary_jar)
classpath = self._bootstrap_classpath(jvm_tool, targets)
if len(classpath) == 1:
shutil.copy(classpath[0], binary_jar)
else:
with self.open_jar(binary_jar) as jar:
for classpath_jar in classpath:
jar.writejar(classpath_jar)
jar.main(jvm_tool.main)
# Now shade the binary jar and return that single jar as the safe tool classpath.
safe_mkdir_for(shaded_jar)
with self.shader.binary_shader(shaded_jar,
jvm_tool.main,
binary_jar,
custom_rules=jvm_tool.custom_rules,
jvm_options=self.get_options().jvm_options) as shader:
try:
result = util.execute_runner(shader,
workunit_factory=self.context.new_workunit,
workunit_name='shade-{}'.format(jvm_tool.key))
if result != 0:
raise TaskError("Shading of tool '{key}' with main class {main} for {scope} failed "
"with exit code {result}, command run was:\n\t{cmd}"
.format(key=jvm_tool.key,
main=jvm_tool.main,
scope=jvm_tool.scope,
result=result,
cmd=shader.cmd))
except Executor.Error as e:
raise TaskError("Shading of tool '{key}' with main class {main} for {scope} failed "
"with: {exception}".format(key=jvm_tool.key,
main=jvm_tool.main,
scope=jvm_tool.scope,
exception=e))
if self.artifact_cache_writes_enabled():
self.update_artifact_cache([(tool_vts, [shaded_jar])])
return [shaded_jar]
def check_artifact_cache_for(self, invalidation_check):
tool_vts = self.tool_vts(invalidation_check)
return [tool_vts]
def tool_vts(self, invalidation_check):
# The monolithic shaded tool jar is a single output dependent on the entire target set, and is
# not divisible by target. So we can only cache it keyed by the entire target set.
return VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
def _bootstrap_jvm_tool(self, dep_spec, jvm_tool):
targets = self._resolve_tool_targets(dep_spec, jvm_tool)
if jvm_tool.main is None:
return self._bootstrap_classpath(jvm_tool, targets)
else:
return self._bootstrap_shaded_jvm_tool(jvm_tool, targets)
def cached_bootstrap_classpath_callback(self, dep_spec, jvm_tool):
cache = {}
cache_lock = threading.Lock()
def bootstrap_classpath():
with cache_lock:
if 'classpath' not in cache:
cache['classpath'] = self._bootstrap_jvm_tool(dep_spec, jvm_tool)
return cache['classpath']
return bootstrap_classpath
|
foursquare/pants
|
src/python/pants/backend/jvm/tasks/bootstrap_jvm_tools.py
|
Python
|
apache-2.0
| 15,536
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains AWS Athena hook"""
import warnings
from time import sleep
from typing import Any, Dict, Optional
from botocore.paginate import PageIterator
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class AthenaHook(AwsBaseHook):
"""
Interact with AWS Athena to run, poll queries and return query results
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
:class:`~airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
:param sleep_time: Time (in seconds) to wait between two consecutive calls to check query status on Athena
:type sleep_time: int
"""
INTERMEDIATE_STATES = (
'QUEUED',
'RUNNING',
)
FAILURE_STATES = (
'FAILED',
'CANCELLED',
)
SUCCESS_STATES = ('SUCCEEDED',)
def __init__(self, *args: Any, sleep_time: int = 30, **kwargs: Any) -> None:
super().__init__(client_type='athena', *args, **kwargs) # type: ignore
self.sleep_time = sleep_time
def run_query(
self,
query: str,
query_context: Dict[str, str],
result_configuration: Dict[str, Any],
client_request_token: Optional[str] = None,
workgroup: str = 'primary',
) -> str:
"""
Run Presto query on athena with provided config and return submitted query_execution_id
:param query: Presto query to run
:type query: str
:param query_context: Context in which query need to be run
:type query_context: dict
:param result_configuration: Dict with path to store results in and config related to encryption
:type result_configuration: dict
:param client_request_token: Unique token created by user to avoid multiple executions of same query
:type client_request_token: str
:param workgroup: Athena workgroup name, when not specified, will be 'primary'
:type workgroup: str
:return: str
"""
params = {
'QueryString': query,
'QueryExecutionContext': query_context,
'ResultConfiguration': result_configuration,
'WorkGroup': workgroup,
}
if client_request_token:
params['ClientRequestToken'] = client_request_token
response = self.get_conn().start_query_execution(**params)
query_execution_id = response['QueryExecutionId']
return query_execution_id
def check_query_status(self, query_execution_id: str) -> Optional[str]:
"""
Fetch the status of submitted athena query. Returns None or one of valid query states.
:param query_execution_id: Id of submitted athena query
:type query_execution_id: str
:return: str
"""
response = self.get_conn().get_query_execution(QueryExecutionId=query_execution_id)
state = None
try:
state = response['QueryExecution']['Status']['State']
except Exception as ex:
self.log.error('Exception while getting query state %s', ex)
finally:
# The error is being absorbed here and is being handled by the caller.
# The error is being absorbed to implement retries.
return state
def get_state_change_reason(self, query_execution_id: str) -> Optional[str]:
"""
Fetch the reason for a state change (e.g. error message). Returns None or reason string.
:param query_execution_id: Id of submitted athena query
:type query_execution_id: str
:return: str
"""
response = self.get_conn().get_query_execution(QueryExecutionId=query_execution_id)
reason = None
try:
reason = response['QueryExecution']['Status']['StateChangeReason']
except Exception as ex:
self.log.error('Exception while getting query state change reason: %s', ex)
finally:
# The error is being absorbed here and is being handled by the caller.
# The error is being absorbed to implement retries.
return reason
def get_query_results(
self, query_execution_id: str, next_token_id: Optional[str] = None, max_results: int = 1000
) -> Optional[dict]:
"""
Fetch submitted athena query results. returns none if query is in intermediate state or
failed/cancelled state else dict of query output
:param query_execution_id: Id of submitted athena query
:type query_execution_id: str
:param next_token_id: The token that specifies where to start pagination.
:type next_token_id: str
:param max_results: The maximum number of results (rows) to return in this request.
:type max_results: int
:return: dict
"""
query_state = self.check_query_status(query_execution_id)
if query_state is None:
self.log.error('Invalid Query state')
return None
elif query_state in self.INTERMEDIATE_STATES or query_state in self.FAILURE_STATES:
self.log.error('Query is in "%s" state. Cannot fetch results', query_state)
return None
result_params = {'QueryExecutionId': query_execution_id, 'MaxResults': max_results}
if next_token_id:
result_params['NextToken'] = next_token_id
return self.get_conn().get_query_results(**result_params)
def get_query_results_paginator(
self,
query_execution_id: str,
max_items: Optional[int] = None,
page_size: Optional[int] = None,
starting_token: Optional[str] = None,
) -> Optional[PageIterator]:
"""
Fetch submitted athena query results. returns none if query is in intermediate state or
failed/cancelled state else a paginator to iterate through pages of results. If you
wish to get all results at once, call build_full_result() on the returned PageIterator
:param query_execution_id: Id of submitted athena query
:type query_execution_id: str
:param max_items: The total number of items to return.
:type max_items: int
:param page_size: The size of each page.
:type page_size: int
:param starting_token: A token to specify where to start paginating.
:type starting_token: str
:return: PageIterator
"""
query_state = self.check_query_status(query_execution_id)
if query_state is None:
self.log.error('Invalid Query state (null)')
return None
if query_state in self.INTERMEDIATE_STATES or query_state in self.FAILURE_STATES:
self.log.error('Query is in "%s" state. Cannot fetch results', query_state)
return None
result_params = {
'QueryExecutionId': query_execution_id,
'PaginationConfig': {
'MaxItems': max_items,
'PageSize': page_size,
'StartingToken': starting_token,
},
}
paginator = self.get_conn().get_paginator('get_query_results')
return paginator.paginate(**result_params)
def poll_query_status(self, query_execution_id: str, max_tries: Optional[int] = None) -> Optional[str]:
"""
Poll the status of submitted athena query until query state reaches final state.
Returns one of the final states
:param query_execution_id: Id of submitted athena query
:type query_execution_id: str
:param max_tries: Number of times to poll for query state before function exits
:type max_tries: int
:return: str
"""
try_number = 1
final_query_state = None # Query state when query reaches final state or max_tries reached
while True:
query_state = self.check_query_status(query_execution_id)
if query_state is None:
self.log.info('Trial %s: Invalid query state. Retrying again', try_number)
elif query_state in self.INTERMEDIATE_STATES:
self.log.info(
'Trial %s: Query is still in an intermediate state - %s', try_number, query_state
)
else:
self.log.info(
'Trial %s: Query execution completed. Final state is %s}', try_number, query_state
)
final_query_state = query_state
break
if max_tries and try_number >= max_tries: # Break loop if max_tries reached
final_query_state = query_state
break
try_number += 1
sleep(self.sleep_time)
return final_query_state
def get_output_location(self, query_execution_id: str) -> str:
"""
Function to get the output location of the query results
in s3 uri format.
:param query_execution_id: Id of submitted athena query
:type query_execution_id: str
:return: str
"""
output_location = None
if query_execution_id:
response = self.get_conn().get_query_execution(QueryExecutionId=query_execution_id)
if response:
try:
output_location = response['QueryExecution']['ResultConfiguration']['OutputLocation']
except KeyError:
self.log.error("Error retrieving OutputLocation")
raise
else:
raise
else:
raise ValueError("Invalid Query execution id")
return output_location
def stop_query(self, query_execution_id: str) -> Dict:
"""
Cancel the submitted athena query
:param query_execution_id: Id of submitted athena query
:type query_execution_id: str
:return: dict
"""
return self.get_conn().stop_query_execution(QueryExecutionId=query_execution_id)
class AWSAthenaHook(AthenaHook):
"""
This hook is deprecated.
Please use :class:`airflow.providers.amazon.aws.hooks.athena.AthenaHook`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"This hook is deprecated. Please use `airflow.providers.amazon.aws.hooks.athena.AthenaHook`.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
|
mistercrunch/airflow
|
airflow/providers/amazon/aws/hooks/athena.py
|
Python
|
apache-2.0
| 11,213
|
from intelligine.synergy.object.ressource.Ressource import Resource
from intelligine.cst import TYPE, TYPE_RESOURCE_EXPLOITABLE, TYPE_RESOURCE_EATABLE, COL_EATABLE, COL_SMELL, SMELL_FOOD, \
TRANSPORTABLE
class Food(Resource):
def __init__(self, collection, context):
super().__init__(collection, context)
context.metas.list.add(TYPE, self.get_id(), TYPE_RESOURCE_EXPLOITABLE)
def get_what_carry(self):
return self # dev
clone = self.__class__(self._collection, self._context)
self._collection.add_object(clone)
return clone
def transform_to_stocked(self):
self._context.metas.list.remove(TYPE, self.get_id(), TYPE_RESOURCE_EXPLOITABLE)
self._context.metas.list.add(TYPE, self.get_id(), TYPE_RESOURCE_EATABLE)
self._add_col(COL_EATABLE)
self._add_col(COL_SMELL)
self._set_smell(SMELL_FOOD)
|
buxx/intelligine
|
intelligine/synergy/object/Food.py
|
Python
|
apache-2.0
| 899
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import re
from collections import OrderedDict
from datetime import datetime
from urlparse import urlparse
import json
from operator import itemgetter, attrgetter
import pkg_resources
from pylons import tmpl_context as c, app_globals as g, response
from pylons import request
from paste.deploy.converters import asbool, aslist
from tg import expose, redirect, flash, validate, config, jsonify
from tg.decorators import with_trailing_slash, without_trailing_slash
from webob import exc
from bson import ObjectId
from ming.orm.ormsession import ThreadLocalORMSession
from ming.odm import session
import PIL
from allura.app import Application, DefaultAdminController, SitemapEntry
from allura.lib import helpers as h
from allura import version
from allura import model as M
from allura.lib.security import has_access, require_access
from allura.lib.widgets import form_fields as ffw
from allura.lib import exceptions as forge_exc
from allura.lib import plugin
from allura.controllers import BaseController
from allura.lib.decorators import require_post
from allura.tasks import export_tasks
from allura.lib.widgets.project_list import ProjectScreenshots
from . import widgets as aw
log = logging.getLogger(__name__)
class W:
markdown_editor = ffw.MarkdownEdit()
label_edit = ffw.LabelEdit()
explain_export_modal = ffw.Lightbox(
name='explain_export', trigger='#why_export')
group_card = aw.GroupCard()
permission_card = aw.PermissionCard()
group_settings = aw.GroupSettings()
new_group_settings = aw.NewGroupSettings()
screenshot_admin = aw.ScreenshotAdmin()
screenshot_list = ProjectScreenshots(draggable=True)
metadata_admin = aw.MetadataAdmin()
audit = aw.AuditLog()
page_list = ffw.PageList()
class AdminApp(Application):
'''This is the admin app. It is pretty much required for
a functioning allura project.
'''
__version__ = version.__version__
_installable_tools = None
max_instances = 0
tool_label = 'admin'
icons = {
24: 'images/admin_24.png',
32: 'images/admin_32.png',
48: 'images/admin_48.png'
}
exportable = True
has_notifications = False
def __init__(self, project, config):
Application.__init__(self, project, config)
self.root = ProjectAdminController()
self.api_root = ProjectAdminRestController()
self.admin = AdminAppAdminController(self)
self.templates = pkg_resources.resource_filename(
'allura.ext.admin', 'templates')
self.sitemap = [SitemapEntry('Admin', '.')]
def is_visible_to(self, user):
'''Whether the user can view the app.'''
return has_access(c.project, 'create')(user=user)
@staticmethod
def installable_tools_for(project):
tools = []
for name, App in g.entry_points['tool'].iteritems():
cfg = M.AppConfig(project_id=project._id, tool_name=name)
app = App(project, cfg)
if app.installable:
tools.append(dict(name=name, app=App))
# prevent from saving temporary config to db
session(cfg).expunge(cfg)
tools.sort(key=lambda t: (t['app'].status_int(), t['app'].ordinal))
return [t for t in tools
if t['app'].status in project.allowed_tool_status]
@staticmethod
def exportable_tools_for(project):
tools = []
for tool in project.app_configs:
if project.app_instance(tool).exportable:
tools.append(tool)
return sorted(tools, key=lambda t: t.options.mount_point)
def main_menu(self):
'''Apps should provide their entries to be added to the main nav
:return: a list of :class:`SitemapEntries <allura.app.SitemapEntry>`
'''
return [SitemapEntry('Admin', '.')]
@h.exceptionless([], log)
def sidebar_menu(self):
links = []
admin_url = c.project.url() + 'admin/'
if c.project.is_nbhd_project:
links.append(SitemapEntry('Add Project', c.project.url()
+ 'add_project', ui_icon=g.icons['add']))
nbhd_admin_url = c.project.neighborhood.url() + '_admin/'
links = links + [
SitemapEntry('Neighborhood'),
SitemapEntry('Overview', nbhd_admin_url + 'overview'),
SitemapEntry('Awards', nbhd_admin_url + 'accolades')]
else:
links += [
SitemapEntry('Welcome', admin_url),
SitemapEntry('Metadata', admin_url + 'overview', className="admin-nav-metadata"),
]
if c.project.neighborhood.name != "Users":
links += [
SitemapEntry('Screenshots', admin_url + 'screenshots'),
SitemapEntry('Categorization', admin_url + 'trove')
]
if plugin.ProjectRegistrationProvider.get().registration_date(c.project) < datetime(2016, 6, 1):
# only show transitional Tools page to older projects that may be used to it
# no point is showing it to new projects
links.append(SitemapEntry('Tools', admin_url + 'tools_moved'))
if asbool(config.get('bulk_export_enabled', True)):
links.append(SitemapEntry('Export', admin_url + 'export'))
if c.project.is_root and has_access(c.project, 'admin')():
links.append(
SitemapEntry('User Permissions', admin_url + 'groups/', className="admin-nav-user-perms"))
if not c.project.is_root and has_access(c.project, 'admin')():
links.append(
SitemapEntry('Permissions', admin_url + 'permissions/'))
if len(c.project.neighborhood_invitations):
links.append(
SitemapEntry('Invitation(s)', admin_url + 'invitations'))
links.append(SitemapEntry('Audit Trail', admin_url + 'audit/'))
if c.project.is_nbhd_project:
links.append(SitemapEntry('Statistics', nbhd_admin_url + 'stats/'))
links.append(None)
links.append(SitemapEntry('Help', nbhd_admin_url + 'help/'))
for ep_name in sorted(g.entry_points['admin'].keys()):
admin_extension = g.entry_points['admin'][ep_name]
admin_extension().update_project_sidebar_menu(links)
return links
def admin_menu(self):
return []
def install(self, project):
pass
def bulk_export(self, f, export_path='', with_attachments=False):
json.dump(self.project, f, cls=jsonify.GenericJSON, indent=2)
class AdminExtensionLookup(object):
@expose()
def _lookup(self, name, *remainder):
for ep_name in sorted(g.entry_points['admin'].keys()):
admin_extension = g.entry_points['admin'][ep_name]
controller = admin_extension().project_admin_controllers.get(name)
if controller:
return controller(), remainder
raise exc.HTTPNotFound, name
class ProjectAdminController(BaseController):
def _check_security(self):
require_access(c.project, 'admin')
def __init__(self):
self.permissions = PermissionsController()
self.groups = GroupsController()
self.audit = AuditController()
self.ext = AdminExtensionLookup()
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_admin.html')
def index(self, **kw):
return dict()
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_invitations.html')
def invitations(self):
return dict()
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_overview.html')
def overview(self, **kw):
c.markdown_editor = W.markdown_editor
c.metadata_admin = W.metadata_admin
# need this because features field expects data in specific format
metadata_admin_value = h.fixed_attrs_proxy(
c.project,
features=[{'feature': f} for f in c.project.features])
c.explain_export_modal = W.explain_export_modal
show_export_control = asbool(config.get('show_export_control', False))
allow_project_delete = asbool(config.get('allow_project_delete', True))
explain_export_text = '''The purpose of this section is to determine whether your project is subject to the
provisions of the US Export Administration Regulations. You should consult section 734.4 and Supplement 2
to Part 734 for information on such items and the calculation of U.S. controlled content.
<a href="https://www.bis.doc.gov/policy-guidance/encryption" target="_blank">
https://www.bis.doc.gov/policy-guidance/encryption</a>'''
if 'us_export_contact' in config:
explain_export_text += \
'If you have additional questions, ' \
'please contact <a href="mailto:{contact}">{contact}</a>.'.format(contact=config['us_export_contact'])
return dict(show_export_control=show_export_control,
allow_project_delete=allow_project_delete,
metadata_admin_value=metadata_admin_value,
explain_export_text=explain_export_text)
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_screenshots.html')
def screenshots(self, **kw):
c.screenshot_admin = W.screenshot_admin
c.screenshot_list = W.screenshot_list
return dict()
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_trove.html')
def trove(self):
c.label_edit = W.label_edit
base_troves_by_name = {t.shortname: t
for t in M.TroveCategory.query.find(dict(trove_parent_id=0))}
first_troves = aslist(config.get('trovecategories.admin.order', 'topic,license,os'), ',')
base_troves = [
base_troves_by_name.pop(t) for t in first_troves
] + sorted(base_troves_by_name.values(), key=attrgetter('fullname'))
trove_recommendations = {}
for trove in base_troves:
config_name = 'trovecategories.admin.recommended.{}'.format(trove.shortname)
recommendation_pairs = aslist(config.get(config_name, []), ',')
trove_recommendations[trove.shortname] = OrderedDict()
for pair in recommendation_pairs:
trove_id, label = pair.split('=')
trove_recommendations[trove.shortname][trove_id] = label
return dict(base_troves=base_troves,
trove_recommendations=trove_recommendations)
@expose('jinja:allura.ext.admin:templates/project_tools_moved.html')
def tools_moved(self, **kw):
return {}
@expose()
@require_post()
def update_labels(self, labels=None, **kw):
require_access(c.project, 'admin')
c.project.labels = labels.split(',')
M.AuditLog.log('updated labels')
redirect('trove')
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_install_tool.html')
def install_tool(self, tool_name=None, **kw):
if tool_name == 'subproject':
tool = {
'tool_label': 'Sub Project',
'default_mount_label': 'SubProject',
'default_mount_point': 'subproject'
}
options = []
else:
tool = g.entry_points['tool'][tool_name]
options = tool.options_on_install()
return dict(
tool_name=tool_name,
tool=tool,
options=options,
existing_mount_points=c.project.mount_points()
)
@expose()
def _lookup(self, name, *remainder):
app = c.project.app_instance(name)
if app is None:
raise exc.HTTPNotFound, name
return app.admin, remainder
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_permissions.html')
def groups(self, **kw):
return dict()
@expose()
@require_post()
@validate(W.metadata_admin, error_handler=overview)
@h.vardec
def update(self, name=None,
short_description=None,
summary='',
icon=None,
category=None,
external_homepage='',
video_url='',
support_page='',
support_page_url='',
twitter_handle='',
facebook_page='',
removal='',
moved_to_url='',
export_controlled=False,
export_control_type=None,
tracking_id='',
features=None,
**kw):
require_access(c.project, 'update')
if removal != c.project.removal:
M.AuditLog.log('change project removal status to %s', removal)
h.log_action(log, 'change project removal status').info('')
c.project.removal = removal
c.project.removal_changed_date = datetime.utcnow()
if 'delete_icon' in kw:
M.ProjectFile.query.remove(dict(project_id=c.project._id, category=re.compile(r'^icon')))
c.project.set_tool_data('allura', icon_original_size=None)
M.AuditLog.log('remove project icon')
h.log_action(log, 'remove project icon').info('')
g.post_event('project_updated')
redirect('overview')
elif 'delete' in kw:
allow_project_delete = asbool(
config.get('allow_project_delete', True))
if allow_project_delete or not c.project.is_root:
M.AuditLog.log('delete project')
h.log_action(log, 'delete project').info('')
plugin.ProjectRegistrationProvider.get().delete_project(
c.project, c.user)
redirect('overview')
elif 'undelete' in kw:
h.log_action(log, 'undelete project').info('')
M.AuditLog.log('undelete project')
plugin.ProjectRegistrationProvider.get().undelete_project(
c.project, c.user)
redirect('overview')
if name and name != c.project.name:
h.log_action(log, 'change project name').info('')
M.AuditLog.log('change project name to %s', name)
c.project.name = name
if short_description != c.project.short_description:
h.log_action(log, 'change project short description').info('')
M.AuditLog.log('change short description to %s', short_description)
c.project.short_description = short_description
if summary != c.project.summary:
h.log_action(log, 'change project summary').info('')
M.AuditLog.log('change summary to %s', summary)
c.project.summary = summary
category = category and ObjectId(category) or None
if category != c.project.category_id:
h.log_action(log, 'change project category').info('')
M.AuditLog.log('change category to %s', category)
c.project.category_id = category
if external_homepage != c.project.external_homepage:
h.log_action(log, 'change external home page').info('')
M.AuditLog.log('change external home page to %s',
external_homepage)
c.project.external_homepage = external_homepage
if video_url != c.project.video_url:
h.log_action(log, 'change video url').info('')
M.AuditLog.log('change video url to %s', video_url)
c.project.video_url = video_url
if support_page != c.project.support_page:
h.log_action(log, 'change project support page').info('')
M.AuditLog.log('change project support page to %s', support_page)
c.project.support_page = support_page
old_twitter = c.project.social_account('Twitter')
if not old_twitter or twitter_handle != old_twitter.accounturl:
h.log_action(log, 'change project twitter handle').info('')
M.AuditLog.log('change project twitter handle to %s',
twitter_handle)
c.project.set_social_account('Twitter', twitter_handle)
old_facebook = c.project.social_account('Facebook')
if not old_facebook or facebook_page != old_facebook.accounturl:
if not facebook_page or 'facebook.com' in urlparse(facebook_page).netloc:
h.log_action(log, 'change project facebook page').info('')
M.AuditLog.log(
'change project facebook page to %s', facebook_page)
c.project.set_social_account('Facebook', facebook_page)
if support_page_url != c.project.support_page_url:
h.log_action(log, 'change project support page url').info('')
M.AuditLog.log('change project support page url to %s',
support_page_url)
c.project.support_page_url = support_page_url
if moved_to_url != c.project.moved_to_url:
h.log_action(log, 'change project moved to url').info('')
M.AuditLog.log('change project moved to url to %s', moved_to_url)
c.project.moved_to_url = moved_to_url
export_controlled = asbool(export_controlled)
if export_controlled != c.project.export_controlled:
h.log_action(
log, 'change project export controlled status').info('')
M.AuditLog.log(
'change project export controlled status to %s', export_controlled)
c.project.export_controlled = export_controlled
if not export_controlled:
export_control_type = None
if export_control_type != c.project.export_control_type:
h.log_action(log, 'change project export control type').info('')
M.AuditLog.log('change project export control type to %s',
export_control_type)
c.project.export_control_type = export_control_type
if tracking_id != c.project.tracking_id:
h.log_action(log, 'change project tracking ID').info('')
M.AuditLog.log('change project tracking ID to %s', tracking_id)
c.project.tracking_id = tracking_id
features = [f['feature'].strip() for f in features or []
if f.get('feature', '').strip()]
if features != c.project.features:
h.log_action(log, 'change project features').info('')
M.AuditLog.log('change project features to %s', features)
c.project.features = features
if icon is not None and icon != '':
if c.project.icon:
M.ProjectFile.query.remove(dict(project_id=c.project._id, category=re.compile(r'^icon')))
M.AuditLog.log('update project icon')
c.project.save_icon(icon.filename, icon.file, content_type=icon.type)
g.post_event('project_updated')
flash('Saved', 'success')
redirect('overview')
def _add_trove(self, type, new_trove):
current_troves = getattr(c.project, 'trove_%s' % type)
trove_obj = M.TroveCategory.query.get(trove_cat_id=int(new_trove))
error_msg = None
if type in ['license', 'audience', 'developmentstatus', 'language'] and len(current_troves) >= 6:
error_msg = 'You may not have more than 6 of this category.'
elif type in ['topic'] and len(current_troves) >= 3:
error_msg = 'You may not have more than 3 of this category.'
elif trove_obj is not None:
if trove_obj._id not in current_troves:
current_troves.append(trove_obj._id)
M.AuditLog.log('add trove %s: %s', type, trove_obj.fullpath)
# just in case the event handling is super fast
ThreadLocalORMSession.flush_all()
c.project.last_updated = datetime.utcnow()
g.post_event('project_updated')
else:
error_msg = 'This category has already been assigned to the project.'
return (trove_obj, error_msg)
@expose('json:')
@require_post()
def add_trove_js(self, type, new_trove, **kw):
require_access(c.project, 'update')
trove_obj, error_msg = self._add_trove(type, new_trove)
return dict(trove_full_path=trove_obj.fullpath_within_type, trove_cat_id=trove_obj.trove_cat_id, error_msg=error_msg)
@expose()
@require_post()
def add_trove(self, type, new_trove, **kw):
require_access(c.project, 'update')
trove_obj, error_msg = self._add_trove(type, new_trove)
if error_msg:
flash(error_msg, 'error')
redirect('trove')
@expose()
@require_post()
def delete_trove(self, type, trove, **kw):
require_access(c.project, 'update')
trove_obj = M.TroveCategory.query.get(trove_cat_id=int(trove))
current_troves = getattr(c.project, 'trove_%s' % type)
if trove_obj is not None and trove_obj._id in current_troves:
M.AuditLog.log('remove trove %s: %s', type, trove_obj.fullpath)
current_troves.remove(trove_obj._id)
# just in case the event handling is super fast
ThreadLocalORMSession.flush_all()
c.project.last_updated = datetime.utcnow()
g.post_event('project_updated')
redirect('trove')
@expose()
@require_post()
@validate(W.screenshot_admin)
def add_screenshot(self, screenshot=None, caption=None, **kw):
require_access(c.project, 'update')
screenshots = c.project.get_screenshots()
if len(screenshots) >= 6:
flash('You may not have more than 6 screenshots per project.',
'error')
elif screenshot is not None and screenshot != '':
M.AuditLog.log('add screenshot')
sort = 1 + max([ss.sort or 0 for ss in screenshots] or [0])
M.ProjectFile.save_image(
screenshot.filename, screenshot.file, content_type=screenshot.type,
save_original=True,
original_meta=dict(
project_id=c.project._id,
category='screenshot',
caption=caption,
sort=sort),
square=True, thumbnail_size=(150, 150),
thumbnail_meta=dict(project_id=c.project._id, category='screenshot_thumb'))
g.post_event('project_updated')
redirect('screenshots')
@expose()
@require_post()
def sort_screenshots(self, **kw):
"""Sort project screenshots.
Called via ajax when screenshots are reordered via drag/drop on
the Screenshots admin page.
``kw`` is a mapping of (screenshot._id, sort_order) pairs.
"""
for s in c.project.get_screenshots():
if str(s._id) in kw:
s.sort = int(kw[str(s._id)])
g.post_event('project_updated')
@expose()
@require_post()
def delete_screenshot(self, id=None, **kw):
require_access(c.project, 'update')
if id is not None and id != '':
M.AuditLog.log('remove screenshot')
M.ProjectFile.query.remove(
dict(project_id=c.project._id, _id=ObjectId(id)))
g.post_event('project_updated')
redirect('screenshots')
@expose()
@require_post()
def edit_screenshot(self, id=None, caption=None, **kw):
require_access(c.project, 'update')
if id is not None and id != '':
M.ProjectFile.query.get(
project_id=c.project._id, _id=ObjectId(id)).caption = caption
g.post_event('project_updated')
redirect('screenshots')
@expose()
@require_post()
def join_neighborhood(self, nid):
require_access(c.project, 'admin')
if not nid:
n = M.Neighborhood.query.get(name='Projects')
c.project.neighborhood_id = n._id
flash('Joined %s' % n.name)
redirect(c.project.url() + 'admin/')
nid = ObjectId(str(nid))
if nid not in c.project.neighborhood_invitations:
flash('No invitation to that neighborhood', 'error')
redirect('.')
c.project.neighborhood_id = nid
n = M.Neighborhood.query.get(_id=nid)
flash('Joined %s' % n.name)
redirect('invitations')
def _update_mounts(self, subproject=None, tool=None, new=None, **kw):
'''
Returns the new App or Subproject, if one was installed.
Returns None otherwise.
'''
if subproject is None:
subproject = []
if tool is None:
tool = []
new_app = None
for sp in subproject:
p = M.Project.query.get(shortname=sp['shortname'],
neighborhood_id=c.project.neighborhood_id)
if sp.get('delete'):
require_access(c.project, 'admin')
M.AuditLog.log('delete subproject %s', sp['shortname'])
h.log_action(log, 'delete subproject').info(
'delete subproject %s', sp['shortname'],
meta=dict(name=sp['shortname']))
p.removal = 'deleted'
plugin.ProjectRegistrationProvider.get().delete_project(
p, c.user)
elif not new:
M.AuditLog.log('update subproject %s', sp['shortname'])
p.name = sp['name']
p.ordinal = int(sp['ordinal'])
for p in tool:
if p.get('delete'):
require_access(c.project, 'admin')
M.AuditLog.log('uninstall tool %s', p['mount_point'])
h.log_action(log, 'uninstall tool').info(
'uninstall tool %s', p['mount_point'],
meta=dict(mount_point=p['mount_point']))
c.project.uninstall_app(p['mount_point'])
elif not new:
M.AuditLog.log('update tool %s', p['mount_point'])
options = c.project.app_config(p['mount_point']).options
options.mount_label = p['mount_label']
options.ordinal = int(p['ordinal'])
if new and new.get('install'):
ep_name = new.get('ep_name', None)
if not ep_name:
require_access(c.project, 'create')
mount_point = new['mount_point'].lower() or h.nonce()
M.AuditLog.log('create subproject %s', mount_point)
h.log_action(log, 'create subproject').info(
'create subproject %s', mount_point,
meta=dict(mount_point=mount_point, name=new['mount_label']))
sp = c.project.new_subproject(mount_point)
sp.name = new['mount_label']
if 'ordinal' in new:
sp.ordinal = int(new['ordinal'])
else:
sp.ordinal = c.project.last_ordinal_value() + 1
new_app = sp
else:
require_access(c.project, 'admin')
installable_tools = AdminApp.installable_tools_for(c.project)
if not ep_name.lower() in [t['name'].lower() for t in installable_tools]:
flash('Installation limit exceeded.', 'error')
return
mount_point = new['mount_point'] or ep_name
M.AuditLog.log('install tool %s', mount_point)
h.log_action(log, 'install tool').info(
'install tool %s', mount_point,
meta=dict(tool_type=ep_name, mount_point=mount_point, mount_label=new['mount_label']))
App = g.entry_points['tool'][ep_name]
# pass only options which app expects
config_on_install = {
k: v for (k, v) in kw.iteritems()
if k in [o.name for o in App.options_on_install()]
}
new_app = c.project.install_app(
ep_name,
mount_point,
mount_label=new['mount_label'],
ordinal=int(new['ordinal']) if 'ordinal' in new else None,
**config_on_install)
g.post_event('project_updated')
g.post_event('project_menu_updated')
return new_app
@h.vardec
@expose()
@require_post()
def update_mounts(self, subproject=None, tool=None, new=None, page=0, limit=200, **kw):
if new and new['ep_name'] == u'subproject':
new['ep_name'] = ""
try:
new_app = self._update_mounts(subproject, tool, new, **kw)
if new_app:
if getattr(new_app, 'tool_label', '') == 'External Link':
flash(u'{} installed successfully.'.format(new_app.tool_label))
else:
new_url = new_app.url
if callable(new_url): # subprojects have a method instead of property
new_url = new_url()
redirect(new_url)
except forge_exc.ForgeError, exc:
flash('%s: %s' % (exc.__class__.__name__, exc.args[0]),
'error')
redirect(request.referer)
@expose('jinja:allura.ext.admin:templates/export.html')
def export(self, tools=None, with_attachments=False):
if not asbool(config.get('bulk_export_enabled', True)):
raise exc.HTTPNotFound()
if request.method == 'POST':
try:
ProjectAdminRestController().export(tools, send_email=True, with_attachments=with_attachments)
except (exc.HTTPBadRequest, exc.HTTPServiceUnavailable) as e:
flash(str(e), 'error')
redirect('.')
else:
flash(
'Export scheduled. You will recieve an email with download instructions when complete.', 'ok')
redirect('export')
exportable_tools = AdminApp.exportable_tools_for(c.project)
apps_id = [tool._id for tool in exportable_tools]
db = M.session.project_doc_session.db
files_id = db.attachment.find({"app_config_id": {"$in": apps_id}}).distinct("file_id")
try:
total_size = db.attachment.files.aggregate([
{
"$match": {"_id": {"$in": files_id}}
},
{
"$group": {"_id": "total", "total_size": {"$sum": "$length"}}
},
{
"$project": {"_id": 0, "total_size": {"$divide": ["$total_size", 1000000]}}
}
]).get('result')[0].get('total_size')
except IndexError:
total_size = 0
return {
'tools': exportable_tools,
'status': c.project.bulk_export_status(),
'total_size': round(total_size, 3)
}
class ProjectAdminRestController(BaseController):
"""
Exposes RESTful API for project admin actions.
"""
def _check_security(self):
require_access(c.project, 'admin')
@expose('json:')
@require_post()
def mount_order(self, **kw):
if not kw:
raise exc.HTTPBadRequest('Expected kw params in the form of "ordinal: mount_point"')
try:
sorted_tools = sorted(kw.items(), key=lambda x: int(x[0]))
except ValueError:
raise exc.HTTPBadRequest('Invalid kw: expected "ordinal: mount_point"')
for ordinal, mount_point in sorted_tools:
try:
c.project.app_config(mount_point).options.ordinal = int(ordinal)
except AttributeError as e:
# Handle sub project
p = M.Project.query.get(shortname="{}/{}".format(c.project.shortname, mount_point),
neighborhood_id=c.project.neighborhood_id)
if p:
p.ordinal = int(ordinal)
M.AuditLog.log('Updated tool order')
g.post_event('project_menu_updated')
return {'status': 'ok'}
@expose('json:')
@require_post()
def configure_tool_grouping(self, grouping_threshold='1', **kw):
try:
grouping_threshold = int(grouping_threshold)
if grouping_threshold < 1 or grouping_threshold > 10:
raise exc.HTTPBadRequest('Invalid threshold. Expected a value between 1 and 10')
c.project.set_tool_data(
'allura', grouping_threshold=grouping_threshold)
except ValueError:
raise exc.HTTPBadRequest('Invalid threshold. Expected a value between 1 and 10')
M.AuditLog.log('Updated tool grouping threshold')
g.post_event('project_menu_updated')
return {'status': 'ok'}
@expose('json:')
def installable_tools(self, **kw):
""" List of installable tools and their default options.
"""
tools = []
for tool in AdminApp.installable_tools_for(c.project):
tools.append({
'name': tool['name'],
'description': " ".join(tool['app'].tool_description.split()),
'icons': tool['app'].icons,
'tool_label': tool['app'].tool_label,
'defaults': {
'default_options': tool['app'].default_options(),
'default_mount_label': tool['app'].default_mount_label,
'default_mount_point': tool['app'].admin_menu_delete_button,
}
})
if c.project.is_root:
# subprojects only allowed on top-level projects (no nesting)
tools.append({
'name': 'subproject',
'description': "With a Sub Project you can add an entire project just like any other tool.",
'tool_label': 'Sub Project',
'defaults': {
'default_mount_label': 'Sub',
'default_mount_point': 'sub',
}
})
return {'tools': tools}
@expose('json:')
@require_post()
def export(self, tools=None, send_email=False, with_attachments=False, **kw):
"""
Initiate a bulk export of the project data.
Must be given a list of tool mount points to include in the export.
The list can either be comma-separated or a repeated param, e.g.,
`export?tools=tickets&tools=discussion`.
If the tools are not provided, an invalid mount point is listed, or
there is some other problems with the arguments, a `400 Bad Request`
response will be returned.
If an export is already currently running for this project, a
`503 Unavailable` response will be returned.
Otherwise, a JSON object of the form
`{"status": "in progress", "filename": FILENAME}` will be returned,
where `FILENAME` is the filename of the export artifact relative to
the users shell account directory.
"""
if not asbool(config.get('bulk_export_enabled', True)):
raise exc.HTTPNotFound()
if not tools:
raise exc.HTTPBadRequest(
'Must give at least one tool mount point to export')
tools = aslist(tools, ',')
exportable_tools = AdminApp.exportable_tools_for(c.project)
allowed = set(t.options.mount_point for t in exportable_tools)
if not set(tools).issubset(allowed):
raise exc.HTTPBadRequest('Invalid tool')
if c.project.bulk_export_status() == 'busy':
raise exc.HTTPServiceUnavailable(
'Export for project %s already running' % c.project.shortname)
# filename (potentially) includes a timestamp, so we have
# to pre-generate to be able to return it to the user
filename = c.project.bulk_export_filename()
export_tasks.bulk_export.post(tools, filename, send_email=send_email, with_attachments=with_attachments)
return {
'status': 'in progress',
'filename': filename,
}
@expose('json:')
def admin_options(self, mount_point=None, **kw):
"""
Returns the admin options for a given mount_point
"""
if not mount_point:
raise exc.HTTPBadRequest('Must provide a mount point')
tool = c.project.app_instance(mount_point)
if tool is None:
raise exc.HTTPBadRequest('The mount point you provided was invalid')
admin_menu = tool.admin_menu()
if tool.admin_menu_delete_button:
admin_menu.append(tool.admin_menu_delete_button)
return {
'options': [dict(text=m.label, href=m.url, className=m.className)
for m in admin_menu]
}
@expose('json:')
def export_status(self, **kw):
"""
Check the status of a bulk export.
Returns an object containing only one key, `status`, whose value is
either `'busy'` or `'ready'`.
"""
status = c.project.bulk_export_status()
return {'status': status or 'ready'}
@expose('json:')
@require_post()
def install_tool(self, tool=None, mount_point=None, mount_label=None, order=None, **kw):
"""API for installing tools in current project.
Requires a valid tool, mount point and mount label names.
(All arguments are required.)
Usage example::
POST to:
/rest/p/testproject/admin/install_tool/
with params:
{
'tool': 'tickets',
'mount_point': 'mountpoint',
'mount_label': 'mountlabel',
'order': 'first|last|alpha_tool'
}
Example output (in successful case)::
{
"info": "Tool tickets with mount_point mountpoint and mount_label mountlabel was created.",
"success": true
}
"""
controller = ProjectAdminController()
if not tool or not mount_point or not mount_label:
return {
'success': False,
'info': 'All arguments required.'
}
installable_tools = AdminApp.installable_tools_for(c.project)
tools_names = [t['name'] for t in installable_tools]
if not (tool in tools_names):
return {
'success': False,
'info': 'Incorrect tool name, or limit is reached.'
}
if c.project.app_instance(mount_point) is not None:
return {
'success': False,
'info': 'Mount point already exists.',
}
if order is None:
order = 'last'
mounts = [{'ordinal': ac.options.ordinal,
'label': ac.options.mount_label,
'mount': ac.options.mount_point,
'type': ac.tool_name.lower()}
for ac in c.project.app_configs]
subs = {p.shortname: p for p in M.Project.query.find({'parent_id': c.project._id})}
for sub in subs.values():
mounts.append({'ordinal': sub.ordinal,
'mount': sub.shortname,
'type': 'sub-project'})
mounts.sort(key=itemgetter('ordinal'))
if order == 'first':
ordinal = 0
elif order == 'last':
ordinal = len(mounts)
elif order == 'alpha_tool':
tool = tool.lower()
for i, mount in enumerate(mounts):
if mount['type'] == tool and mount['label'] > mount_label:
ordinal = i
break
else:
ordinal = len(mounts)
mounts.insert(ordinal, {'ordinal': ordinal, 'type': 'new'})
for i, mount in enumerate(mounts):
if mount['type'] == 'new':
pass
elif mount['type'] == 'sub-project':
subs[mount['mount']].ordinal = i
else:
c.project.app_config(mount['mount']).options.ordinal = i
data = {
'install': 'install',
'ep_name': tool,
'ordinal': ordinal,
'mount_point': mount_point,
'mount_label': mount_label
}
params = {
'new': data
}
if kw:
params.update(**kw)
try:
controller._update_mounts(**params)
except forge_exc.ForgeError as e:
return {
'success': False,
'info': str(e),
}
return {
'success': True,
'info': 'Tool %s with mount_point %s and mount_label %s was created.'
% (tool, mount_point, mount_label)
}
@expose()
def _lookup(self, *args):
if len(args) == 0:
raise exc.HTTPNotFound, args
name, remainder = args[0], args[1:]
app = c.project.app_instance(name)
if app is None or app.admin_api_root is None:
raise exc.HTTPNotFound, name
return app.admin_api_root, remainder
class PermissionsController(BaseController):
def _check_security(self):
require_access(c.project, 'admin')
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_permissions.html')
def index(self, **kw):
c.card = W.permission_card
return dict(permissions=self._index_permissions())
@without_trailing_slash
@expose()
@h.vardec
@require_post()
def update(self, card=None, **kw):
permissions = self._index_permissions()
old_permissions = dict(permissions)
for args in card:
perm = args['id']
new_group_ids = args.get('new', [])
group_ids = args.get('value', [])
if isinstance(new_group_ids, basestring):
new_group_ids = [new_group_ids]
if isinstance(group_ids, basestring):
group_ids = [group_ids]
# make sure the admin group has the admin permission
if perm == 'admin':
if c.project.is_root:
pid = c.project._id
else:
pid = c.project.parent_id
admin_group_id = str(
M.ProjectRole.query.get(project_id=pid, name='Admin')._id)
if admin_group_id not in group_ids + new_group_ids:
flash(
'You cannot remove the admin group from the admin permission.', 'warning')
group_ids.append(admin_group_id)
permissions[perm] = []
role_ids = map(ObjectId, group_ids + new_group_ids)
permissions[perm] = role_ids
c.project.acl = []
for perm, role_ids in permissions.iteritems():
role_names = lambda ids: ','.join(sorted(
pr.name for pr in M.ProjectRole.query.find(dict(_id={'$in': ids}))))
old_role_ids = old_permissions.get(perm, [])
if old_role_ids != role_ids:
M.AuditLog.log('updated "%s" permissions: "%s" => "%s"',
perm, role_names(old_role_ids), role_names(role_ids))
c.project.acl += [M.ACE.allow(rid, perm) for rid in role_ids]
g.post_event('project_updated')
redirect('.')
def _index_permissions(self):
permissions = dict(
(p, []) for p in c.project.permissions)
for ace in c.project.acl:
if ace.access == M.ACE.ALLOW:
permissions[ace.permission].append(ace.role_id)
return permissions
class GroupsController(BaseController):
def _check_security(self):
require_access(c.project, 'admin')
def _index_permissions(self):
permissions = dict(
(p, []) for p in c.project.permissions)
for ace in c.project.acl:
if ace.access == M.ACE.ALLOW:
permissions[ace.permission].append(ace.role_id)
return permissions
def _map_group_permissions(self):
roles = c.project.named_roles
permissions = self._index_permissions()
permissions_by_role = dict()
auth_role = M.ProjectRole.authenticated()
anon_role = M.ProjectRole.anonymous()
for role in roles + [auth_role, anon_role]:
permissions_by_role[str(role._id)] = []
for perm in permissions:
perm_info = dict(has="no", text="Does not have permission %s" %
perm, name=perm)
role_ids = permissions[perm]
if role._id in role_ids:
perm_info['text'] = "Has permission %s" % perm
perm_info['has'] = "yes"
else:
for r in role.child_roles():
if r._id in role_ids:
perm_info['text'] = "Inherited permission %s from %s" % (
perm, r.name)
perm_info['has'] = "inherit"
break
if perm_info['has'] == "no":
if anon_role._id in role_ids:
perm_info[
'text'] = "Inherited permission %s from Anonymous" % perm
perm_info['has'] = "inherit"
elif auth_role._id in role_ids and role != anon_role:
perm_info[
'text'] = "Inherited permission %s from Authenticated" % perm
perm_info['has'] = "inherit"
permissions_by_role[str(role._id)].append(perm_info)
return permissions_by_role
@without_trailing_slash
@expose()
@require_post()
@h.vardec
def delete_group(self, group_name, **kw):
role = M.ProjectRole.by_name(group_name)
if not role:
flash('Group "%s" does not exist.' % group_name, 'error')
else:
role.delete()
M.AuditLog.log('delete group %s', group_name)
flash('Group "%s" deleted successfully.' % group_name)
g.post_event('project_updated')
redirect('.')
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_groups.html')
def index(self, **kw):
c.card = W.group_card
permissions_by_role = self._map_group_permissions()
auth_role = M.ProjectRole.authenticated()
anon_role = M.ProjectRole.anonymous()
roles = c.project.named_roles
roles.append(None)
return dict(roles=roles, permissions_by_role=permissions_by_role,
auth_role=auth_role, anon_role=anon_role)
@without_trailing_slash
@expose('json:')
@require_post()
@h.vardec
def change_perm(self, role_id, permission, allow="true", **kw):
if allow == "true":
M.AuditLog.log('granted permission %s to group %s', permission,
M.ProjectRole.query.get(_id=ObjectId(role_id)).name)
c.project.acl.append(M.ACE.allow(ObjectId(role_id), permission))
else:
admin_group_id = str(M.ProjectRole.by_name('Admin')._id)
if admin_group_id == role_id and permission == 'admin':
return dict(error='You cannot remove the admin permission from the admin group.')
M.AuditLog.log('revoked permission %s from group %s', permission,
M.ProjectRole.query.get(_id=ObjectId(role_id)).name)
c.project.acl.remove(M.ACE.allow(ObjectId(role_id), permission))
g.post_event('project_updated')
return self._map_group_permissions()
@without_trailing_slash
@expose('json:')
@require_post()
@h.vardec
def add_user(self, role_id, username, **kw):
if not username or username == '*anonymous':
return dict(error='You must choose a user to add.')
group = M.ProjectRole.query.get(_id=ObjectId(role_id))
user = M.User.query.get(username=username.strip(), pending=False)
if not group:
return dict(error='Could not find group with id %s' % role_id)
if not user:
return dict(error='User %s not found' % username)
user_role = M.ProjectRole.by_user(user, upsert=True)
if group._id in user_role.roles:
return dict(error='%s (%s) is already in the group %s.' % (user.display_name, username, group.name))
M.AuditLog.log('add user %s to %s', username, group.name)
user_role.roles.append(group._id)
if group.name == 'Admin':
for ac in c.project.app_configs:
c.project.app_instance(ac).subscribe(user)
g.post_event('project_updated')
return dict(username=username, displayname=user.display_name)
@without_trailing_slash
@expose('json:')
@require_post()
@h.vardec
def remove_user(self, role_id, username, **kw):
group = M.ProjectRole.query.get(_id=ObjectId(role_id))
user = M.User.by_username(username.strip())
if group.name == 'Admin' and len(group.users_with_role()) == 1:
return dict(error='You must have at least one user with the Admin role.')
if not group:
return dict(error='Could not find group with id %s' % role_id)
if not user:
return dict(error='User %s not found' % username)
user_role = M.ProjectRole.by_user(user)
if not user_role or group._id not in user_role.roles:
return dict(error='%s (%s) is not in the group %s.' % (user.display_name, username, group.name))
M.AuditLog.log('remove user %s from %s', username, group.name)
user_role.roles.remove(group._id)
g.post_event('project_updated')
return dict()
@without_trailing_slash
@expose()
@require_post()
@h.vardec
def update(self, card=None, **kw):
for pr in card:
group = M.ProjectRole.query.get(_id=ObjectId(pr['id']))
assert group.project == c.project, 'Security violation'
user_ids = pr.get('value', [])
new_users = pr.get('new', [])
if isinstance(user_ids, basestring):
user_ids = [user_ids]
if isinstance(new_users, basestring):
new_users = [new_users]
# Handle new users in groups
user_added = False
for username in new_users:
user = M.User.by_username(username.strip())
if not user:
flash('User %s not found' % username, 'error')
redirect('.')
if not user._id:
continue # never add anon users to groups
M.AuditLog.log('add user %s to %s', username, group.name)
M.ProjectRole.by_user(
user, upsert=True).roles.append(group._id)
user_added = True
# Make sure we aren't removing all users from the Admin group
if group.name == u'Admin' and not (user_ids or user_added):
flash('You must have at least one user with the Admin role.',
'warning')
redirect('.')
# Handle users removed from groups
user_ids = set(
uid and ObjectId(uid)
for uid in user_ids)
for role in M.ProjectRole.query.find(dict(user_id={'$ne': None}, roles=group._id)):
if role.user_id and role.user_id not in user_ids:
role.roles = [
rid for rid in role.roles if rid != group._id]
M.AuditLog.log('remove user %s from %s',
role.user.username, group.name)
g.post_event('project_updated')
redirect('.')
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_group.html')
def new(self):
c.form = W.new_group_settings
return dict(
group=None,
show_settings=True,
action="create")
@expose()
@require_post()
@validate(W.new_group_settings)
@h.vardec
def create(self, name=None, **kw):
if M.ProjectRole.by_name(name):
flash('%s already exists' % name, 'error')
else:
M.ProjectRole(project_id=c.project._id, name=name)
M.AuditLog.log('create group %s', name)
g.post_event('project_updated')
redirect('.')
@expose()
def _lookup(self, name, *remainder):
return GroupController(name), remainder
class GroupController(BaseController):
def __init__(self, name):
self._group = M.ProjectRole.query.get(_id=ObjectId(name))
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_group.html')
def index(self, **kw):
if self._group.name in ('Admin', 'Developer', 'Member'):
show_settings = False
action = None
else:
show_settings = True
action = self._group.settings_href + 'update'
c.form = W.group_settings
return dict(
group=self._group,
show_settings=show_settings,
action=action)
@expose()
@h.vardec
@require_post()
@validate(W.group_settings)
def update(self, _id=None, delete=None, name=None, **kw):
pr = M.ProjectRole.by_name(name)
if pr and pr._id != _id._id:
flash('%s already exists' % name, 'error')
redirect('..')
if delete:
_id.delete()
M.AuditLog.log('delete group %s', _id.name)
flash('%s deleted' % name)
redirect('..')
M.AuditLog.log('update group name %s=>%s', _id.name, name)
_id.name = name
flash('%s updated' % name)
redirect('..')
class AuditController(BaseController):
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/audit.html')
def index(self, limit=25, page=0, **kwargs):
limit = int(limit)
page = int(page)
count = M.AuditLog.query.find(dict(project_id=c.project._id)).count()
q = M.AuditLog.query.find(dict(project_id=c.project._id))
q = q.sort('timestamp', -1)
q = q.skip(page * limit)
if count > limit:
q = q.limit(limit)
else:
limit = count
c.widget = W.audit
return dict(
entries=q.all(),
limit=limit,
page=page,
count=count)
class AdminAppAdminController(DefaultAdminController):
'''Administer the admin app'''
pass
|
heiths/allura
|
Allura/allura/ext/admin/admin_main.py
|
Python
|
apache-2.0
| 55,792
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import or_, and_
from flask import jsonify, make_response, Blueprint, abort
from app.auth.acls import skip_authorization
from data.database import DEFAULT_DATABASE as db
from data.models import Vulnerability, Nvd, Description, Cpe
bp = Blueprint("api_v1", __name__, url_prefix="/api/v1")
@bp.errorhandler(403)
def api_403(ex=None):
"""Return a 403 in JSON format."""
del ex
return make_response(jsonify({"error": "Forbidden", "code": 403}), 403)
@bp.errorhandler(404)
def api_404(ex=None):
"""Return a 404 in JSON format."""
del ex
return make_response(jsonify({"error": "Not found", "code": 404}), 404)
@bp.errorhandler(500)
def api_500(ex=None):
"""Return a 500 in JSON format."""
del ex
return make_response(jsonify({"error": "Internal server error", "code": 500}), 500)
@bp.route("/product/<vendor_id>/<product_id>")
@skip_authorization
def vulns_by_product(vendor_id=None, product_id=None):
"""View vulns associated to product."""
if product_id is None or product_id is None:
return abort(404)
nvd_ids = (
db.session.query(Cpe.nvd_json_id)
.filter(and_(Cpe.vendor == vendor_id, Cpe.product == product_id))
.distinct()
.all()
)
count = len(nvd_ids)
cve = db.session.query(Nvd.cve_id).filter(Nvd.id.in_(nvd_ids)).all()
return jsonify({"count": count, "cve_ids": [x for x, in cve]})
def _cpes_to_json(products):
"""Jsonify Cpes for API routes."""
count = len(products)
return jsonify(
{
"count": count,
"products": [{"product": x, "vendor": y} for x, y, in products],
}
) # yapf: disable
@bp.route("/search/product:<name>")
@skip_authorization
def search_product(name=None):
"""Return list of products matching name."""
products = (
db.session.query(Cpe.product, Cpe.vendor)
.filter(Cpe.product.like(f"%{name}%"))
.distinct()
.all()
)
return _cpes_to_json(products)
@bp.route("/search/vendor:<name>")
@skip_authorization
def search_vendor(name=None):
"""Return list of vendors matching name."""
products = (
db.session.query(Cpe.product, Cpe.vendor)
.filter(Cpe.vendor.like(f"%{name}%"))
.distinct()
.all()
)
return _cpes_to_json(products)
@bp.route("/search/vendor_or_product:<name>")
@bp.route("/search/product_or_vendor:<name>")
@skip_authorization
def search_product_or_vendor(name=None):
"""Return list of products and vendor matching name."""
products = (
db.session.query(Cpe.product, Cpe.vendor)
.filter(or_(Cpe.product.like(f"%{name}%"), Cpe.vendor.like(f"%{name}%")))
.distinct()
.all()
)
return _cpes_to_json(products)
@bp.route("/search/vendor:<vendor>/product:<product>")
@bp.route("/search/product:<product>/vendor:<vendor>")
@skip_authorization
def search_product_vendor(vendor=None, product=None):
"""Return list of products matching product and vendors matching vendor."""
if product is None or vendor is None:
return abort(404)
products = (
db.session.query(Cpe.product, Cpe.vendor)
.filter(and_(Cpe.product.like(f"%{product}%"), Cpe.vendor.like(f"%{vendor}%")))
.distinct()
.all()
)
return _cpes_to_json(products)
@bp.route("/search/description:<description>")
@skip_authorization
def vulns_for_description(description=None):
"""View vulns associated to description."""
if description is None:
return abort(404)
nvd_ids = (
db.session.query(Description.nvd_json_id)
.filter(Description.value.like(f"%{description}%"))
.distinct()
.all()
)
count = len(nvd_ids)
cve = db.session.query(Nvd.cve_id).filter(Nvd.id.in_(nvd_ids)).all()
return jsonify({"count": count, "cve_ids": [x for x, in cve]})
@bp.route("/<cve_id>")
@skip_authorization
def vuln_view(cve_id=None):
if cve_id is None:
return abort(404)
vuln = Vulnerability.query.filter_by(cve_id=cve_id).first()
if vuln is None:
vuln = Nvd.query.filter_by(cve_id=cve_id).first()
if vuln is None:
return abort(404)
return jsonify(vuln.to_json())
@bp.route("/details/<cve_id>")
@skip_authorization
def vuln_view_detailed(cve_id=None):
if cve_id is None:
return abort(404)
vuln = Vulnerability.query.filter_by(cve_id=cve_id).first()
if vuln is None:
vuln = Nvd.query.filter_by(cve_id=cve_id).first()
if vuln is None:
return abort(404)
return jsonify(vuln.to_json_full())
|
google/vulncode-db
|
app/api/v1/routes.py
|
Python
|
apache-2.0
| 5,192
|
__source__ = 'https://www.thumbtack.com/challenges/simple-database'
Material = '''
https://github.com/PramodhN/simple-database-challenge
'''
# Time: O()
# Space: O()
#
# Description:
# Simple Database Challenge
# In the Simple Database problem, you'll implement an in-memory database similar to Redis.
# For simplicity's sake, instead of dealing with multiple clients and communicating over the network,
# your program will receive commands via standard input (stdin), and should write appropriate responses
# to standard output (stdout).
# Guidelines
#
# We recommend that you use a high-level language, like Python, Go, Haskell, Ruby, or Java.
# We're much more interested in seeing clean code and good algorithmic performance than raw throughput.
# It is very helpful to the engineers who grade these challenges if you reduce external dependencies,
# make compiling your code as simple as possible, and include instructions for compiling and/or running
# your code directly from the command line, without the use of an IDE.
# Your submission must comply with the input/output formats and performance requirements specified below.
# Data Commands
#
# Your database should accept the following commands:
# SET name value - Set the variable name to the value value. Neither variable names nor values will contain spaces.
# GET name - Print out the value of the variable name, or NULL if that variable is not set.
# UNSET name - Unset the variable name, making it just like that variable was never set.
# NUMEQUALTO value - Print out the number of variables that are currently set to value.
# If no variables equal that value, print 0.
# END - Exit the program. Your program will always receive this as its last command.
# Commands will be fed to your program one at a time, with each command on its own line.
# Any output that your program generates should end with a newline character.
# Here are some example command sequences:
# #
Java = '''
#Thought:
'''
|
JulyKikuAkita/PythonPrac
|
newQ/SimpleDatabaseChallenge.py
|
Python
|
apache-2.0
| 1,961
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import os.path
import re
import tempfile
import kerberos
from datetime import datetime
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME, BACKEND_SESSION_KEY, authenticate, load_backend, login
from django.contrib.auth.middleware import RemoteUserMiddleware
from django.core import exceptions, urlresolvers
import django.db
from django.http import HttpResponseNotAllowed
from django.core.urlresolvers import resolve
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.translation import ugettext as _
from django.utils.http import urlquote
from django.utils.encoding import iri_to_uri
import django.views.static
import django.views.generic.simple
import desktop.conf
from desktop.context_processors import get_app_name
from desktop.lib import apputil, i18n
from desktop.lib.django_util import render, render_json, is_jframe_request
from desktop.lib.exceptions import StructuredException
from desktop.lib.exceptions_renderable import PopupException
from desktop.log.access import access_log, log_page_hit
from desktop import appmanager
from hadoop import cluster
from desktop.log import get_audit_logger
LOG = logging.getLogger(__name__)
MIDDLEWARE_HEADER = "X-Hue-Middleware-Response"
# Views inside Django that don't require login
# (see LoginAndPermissionMiddleware)
DJANGO_VIEW_AUTH_WHITELIST = [
django.views.static.serve,
django.views.generic.simple.redirect_to,
]
class AjaxMiddleware(object):
"""
Middleware that augments request to set request.ajax
for either is_ajax() (looks at HTTP headers) or ?format=json
GET parameters.
"""
def process_request(self, request):
request.ajax = request.is_ajax() or request.REQUEST.get("format", "") == "json"
return None
class ExceptionMiddleware(object):
"""
If exceptions know how to render themselves, use that.
"""
def process_exception(self, request, exception):
import traceback
tb = traceback.format_exc()
logging.info("Processing exception: %s: %s" % (i18n.smart_unicode(exception),
i18n.smart_unicode(tb)))
if isinstance(exception, PopupException):
return exception.response(request)
if isinstance(exception, StructuredException):
if request.ajax:
response = render_json(exception.response_data)
response[MIDDLEWARE_HEADER] = 'EXCEPTION'
response.status_code = getattr(exception, 'error_code', 500)
return response
else:
response = render("error.mako", request,
dict(error=exception.response_data.get("message")))
response.status_code = getattr(exception, 'error_code', 500)
return response
return None
class JFrameMiddleware(object):
"""
Updates JFrame headers to update path and push flash messages into headers.
"""
def process_response(self, request, response):
path = request.path
if request.GET:
get_params = request.GET.copy()
if "noCache" in get_params:
del get_params["noCache"]
query_string = get_params.urlencode()
if query_string:
path = request.path + "?" + query_string
response['X-Hue-JFrame-Path'] = iri_to_uri(path)
if response.status_code == 200:
if is_jframe_request(request):
if hasattr(request, "flash"):
flashes = request.flash.get()
if flashes:
response['X-Hue-Flash-Messages'] = json.dumps(flashes)
return response
class ClusterMiddleware(object):
"""
Manages setting request.fs and request.jt
"""
def process_view(self, request, view_func, view_args, view_kwargs):
"""
Sets request.fs and request.jt on every request to point to the
configured filesystem.
"""
request.fs_ref = request.REQUEST.get('fs', view_kwargs.get('fs', 'default'))
if "fs" in view_kwargs:
del view_kwargs["fs"]
try:
request.fs = cluster.get_hdfs(request.fs_ref)
except KeyError:
raise KeyError(_('Cannot find HDFS called "%(fs_ref)s".') % {'fs_ref': request.fs_ref})
if request.user.is_authenticated():
if request.fs is not None:
request.fs.setuser(request.user.username)
request.jt = cluster.get_default_mrcluster()
if request.jt is not None:
request.jt.setuser(request.user.username)
else:
request.jt = None
class NotificationMiddleware(object):
"""
Manages setting request.info and request.error
"""
def process_view(self, request, view_func, view_args, view_kwargs):
def message(title, detail=None):
if detail is None:
detail = ''
else:
detail = '<br/>%s' % detail
return '%s %s' % (title, detail)
def info(title, detail=None):
messages.info(request, message(title, detail))
def error(title, detail=None):
messages.error(request, message(title, detail))
def warn(title, detail=None):
messages.warning(request, message(title, detail))
request.info = info
request.error = error
request.warn = warn
class AppSpecificMiddleware(object):
@classmethod
def augment_request_with_app(cls, request, view_func):
""" Stuff the app into the request for use in later-stage middleware """
if not hasattr(request, "_desktop_app"):
module = apputil.getmodule_wrapper(view_func)
request._desktop_app = apputil.get_app_for_module(module)
if not request._desktop_app and not module.__name__.startswith('django.'):
logging.debug("no app for view func: %s in %s" % (view_func, module))
def __init__(self):
self.middlewares_by_app = {}
for app in appmanager.DESKTOP_APPS:
self.middlewares_by_app[app.name] = self._load_app_middleware(app)
def _get_middlewares(self, app, type):
return self.middlewares_by_app.get(app, {}).get(type, [])
def process_view(self, request, view_func, view_args, view_kwargs):
"""View middleware"""
self.augment_request_with_app(request, view_func)
if not request._desktop_app:
return None
# Run the middlewares
ret = None
for middleware in self._get_middlewares(request._desktop_app, 'view'):
ret = middleware(request, view_func, view_args, view_kwargs)
if ret: return ret # short circuit
return ret
def process_response(self, request, response):
"""Response middleware"""
# We have the app that we stuffed in there
if not hasattr(request, '_desktop_app'):
logging.debug("No desktop_app known for request.")
return response
for middleware in reversed(self._get_middlewares(request._desktop_app, 'response')):
response = middleware(request, response)
return response
def process_exception(self, request, exception):
"""Exception middleware"""
# We have the app that we stuffed in there
if not hasattr(request, '_desktop_app'):
logging.debug("No desktop_app known for exception.")
return None
# Run the middlewares
ret = None
for middleware in self._get_middlewares(request._desktop_app, 'exception'):
ret = middleware(request, exception)
if ret: return ret # short circuit
return ret
def _load_app_middleware(cls, app):
app_settings = app.settings
if not app_settings:
return
mw_classes = app_settings.__dict__.get('MIDDLEWARE_CLASSES', [])
result = {'view': [], 'response': [], 'exception': []}
for middleware_path in mw_classes:
# This code brutally lifted from django.core.handlers
try:
dot = middleware_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured, _('%(module)s isn\'t a middleware module.') % {'module': middleware_path}
mw_module, mw_classname = middleware_path[:dot], middleware_path[dot+1:]
try:
mod = __import__(mw_module, {}, {}, [''])
except ImportError, e:
raise exceptions.ImproperlyConfigured, _('Error importing middleware %(module)s: "%(error)s".') % {'module': mw_module, 'error': e}
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured, _('Middleware module "%(module)s" does not define a "%(class)s" class.') % {'module': mw_module, 'class':mw_classname}
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
# End brutal code lift
# We need to make sure we don't have a process_request function because we don't know what
# application will handle the request at the point process_request is called
if hasattr(mw_instance, 'process_request'):
raise exceptions.ImproperlyConfigured, \
_('AppSpecificMiddleware module "%(module)s" has a process_request function' + \
' which is impossible.') % {'module': middleware_path}
if hasattr(mw_instance, 'process_view'):
result['view'].append(mw_instance.process_view)
if hasattr(mw_instance, 'process_response'):
result['response'].insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
result['exception'].insert(0, mw_instance.process_exception)
return result
class LoginAndPermissionMiddleware(object):
"""
Middleware that forces all views (except those that opt out) through authentication.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
"""
We also perform access logging in ``process_view()`` since we have the view function,
which tells us the log level. The downside is that we don't have the status code,
which isn't useful for status logging anyways.
"""
access_log_level = getattr(view_func, 'access_log_level', None)
# First, skip views not requiring login
# If the view has "opted out" of login required, skip
if hasattr(view_func, "login_notrequired"):
log_page_hit(request, view_func, level=access_log_level or logging.DEBUG)
return None
# There are certain django views which are also opt-out, but
# it would be evil to go add attributes to them
if view_func in DJANGO_VIEW_AUTH_WHITELIST:
log_page_hit(request, view_func, level=access_log_level or logging.DEBUG)
return None
# If user is logged in, check that he has permissions to access the
# app.
if request.user.is_active and request.user.is_authenticated():
AppSpecificMiddleware.augment_request_with_app(request, view_func)
# Until we get Django 1.3 and resolve returning the URL name, we just do a match of the name of the view
try:
access_view = 'access_view:%s:%s' % (request._desktop_app, resolve(request.path)[0].__name__)
except Exception, e:
access_log(request, 'error checking view perm: %s', e, level=access_log_level)
access_view =''
# Accessing an app can access an underlying other app.
# e.g. impala or spark uses code from beeswax and so accessing impala shows up as beeswax here.
# Here we trust the URL to be the real app we need to check the perms.
app_accessed = request._desktop_app
ui_app_accessed = get_app_name(request)
if app_accessed != ui_app_accessed and ui_app_accessed not in ('logs', 'accounts', 'login'):
app_accessed = ui_app_accessed
if app_accessed and \
app_accessed not in ("desktop", "home", "about") and \
not (request.user.has_hue_permission(action="access", app=app_accessed) or
request.user.has_hue_permission(action=access_view, app=app_accessed)):
access_log(request, 'permission denied', level=access_log_level)
return PopupException(
_("You do not have permission to access the %(app_name)s application.") % {'app_name': app_accessed.capitalize()}, error_code=401).response(request)
else:
log_page_hit(request, view_func, level=access_log_level)
return None
logging.info("Redirecting to login page: %s", request.get_full_path())
access_log(request, 'login redirection', level=access_log_level)
if request.ajax:
# Send back a magic header which causes Hue.Request to interpose itself
# in the ajax request and make the user login before resubmitting the
# request.
response = HttpResponse("/* login required */", content_type="text/javascript")
response[MIDDLEWARE_HEADER] = 'LOGIN_REQUIRED'
return response
else:
return HttpResponseRedirect("%s?%s=%s" % (settings.LOGIN_URL, REDIRECT_FIELD_NAME, urlquote(request.get_full_path())))
class JsonMessage(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __str__(self):
return json.dumps(self.kwargs)
class AuditLoggingMiddleware(object):
def __init__(self):
from desktop.conf import AUDIT_EVENT_LOG_DIR
if not AUDIT_EVENT_LOG_DIR.get():
LOG.info('Unloading AuditLoggingMiddleware')
raise exceptions.MiddlewareNotUsed
def process_response(self, request, response):
try:
audit_logger = get_audit_logger()
audit_logger.debug(JsonMessage(**{
datetime.utcnow().strftime('%s'): {
'user': request.user.username if hasattr(request, 'user') else 'anonymous',
"status": response.status_code,
"impersonator": None,
"ip_address": request.META.get('REMOTE_ADDR'),
"authorization_failure": response.status_code == 401,
"service": get_app_name(request),
"url": request.path,
}
}))
response['audited'] = True
except Exception, e:
LOG.error('Could not audit the request: %s' % e)
return response
class SessionOverPostMiddleware(object):
"""
Django puts session info in cookies, which is reasonable.
Unfortunately, the plugin we use for file-uploading
doesn't forward the cookies, though it can do so over
POST. So we push the POST data back in.
This is the issue discussed at
http://www.stereoplex.com/two-voices/cookieless-django-sessions-and-authentication-without-cookies
and
http://digitarald.de/forums/topic.php?id=20
The author of fancyupload says (http://digitarald.de/project/fancyupload/):
Flash-request forgets cookies and session ID
See option appendCookieData. Flash FileReference is not an intelligent
upload class, the request will not have the browser cookies, Flash saves
his own cookies. When you have sessions, append them as get-data to the the
URL (e.g. "upload.php?SESSID=123456789abcdef"). Of course your session-name
can be different.
and, indeed, folks are whining about it: http://bugs.adobe.com/jira/browse/FP-78
There seem to be some other solutions:
http://robrosenbaum.com/flash/using-flash-upload-with-php-symfony/
and it may or may not be browser and plugin-dependent.
In the meanwhile, this is pretty straight-forward.
"""
def process_request(self, request):
cookie_key = settings.SESSION_COOKIE_NAME
if cookie_key not in request.COOKIES and cookie_key in request.POST:
request.COOKIES[cookie_key] = request.POST[cookie_key]
del request.POST[cookie_key]
class DatabaseLoggingMiddleware(object):
"""
If configured, logs database queries for every request.
"""
DATABASE_LOG = logging.getLogger("desktop.middleware.DatabaseLoggingMiddleware")
def process_response(self, request, response):
if desktop.conf.DATABASE_LOGGING.get():
if self.DATABASE_LOG.isEnabledFor(logging.INFO):
# This only exists if desktop.settings.DEBUG is true, hence the use of getattr
for query in getattr(django.db.connection, "queries", []):
self.DATABASE_LOG.info("(%s) %s" % (query["time"], query["sql"]))
return response
try:
import tidylib
_has_tidylib = True
except Exception, ex:
# The exception type is not ImportError. It's actually an OSError.
logging.warn("Failed to import tidylib (for debugging). Is libtidy installed?")
_has_tidylib = False
class HtmlValidationMiddleware(object):
"""
If configured, validate output html for every response.
"""
def __init__(self):
self._logger = logging.getLogger('HtmlValidationMiddleware')
if not _has_tidylib:
logging.error("HtmlValidationMiddleware not activatived: "
"Failed to import tidylib.")
return
# Things that we don't care about
self._to_ignore = (
re.compile('- Warning: <.*> proprietary attribute "data-'),
re.compile('- Warning: trimming empty'),
re.compile('- Info:'),
)
# Find the directory to write tidy html output
try:
self._outdir = os.path.join(tempfile.gettempdir(), 'hue_html_validation')
if not os.path.isdir(self._outdir):
os.mkdir(self._outdir, 0755)
except Exception, ex:
self._logger.exception('Failed to get temp directory: %s', (ex,))
self._outdir = tempfile.mkdtemp(prefix='hue_html_validation-')
# Options to pass to libtidy. See
# http://tidy.sourceforge.net/docs/quickref.html
self._options = {
'show-warnings': 1,
'output-html': 0,
'output-xhtml': 1,
'char-encoding': 'utf8',
'output-encoding': 'utf8',
'indent': 1,
'wrap': 0,
}
def process_response(self, request, response):
if not _has_tidylib or not self._is_html(request, response):
return response
html, errors = tidylib.tidy_document(response.content,
self._options,
keep_doc=True)
if not errors:
return response
# Filter out what we care about
err_list = errors.rstrip().split('\n')
err_list = self._filter_warnings(err_list)
if not err_list:
return response
try:
fn = urlresolvers.resolve(request.path)[0]
fn_name = '%s.%s' % (fn.__module__, fn.__name__)
except:
fn_name = '<unresolved_url>'
# Write the two versions of html out for offline debugging
filename = os.path.join(self._outdir, fn_name)
result = "HTML tidy result: %s [%s]:" \
"\n\t%s" \
"\nPlease see %s.orig %s.tidy\n-------" % \
(request.path, fn_name, '\n\t'.join(err_list), filename, filename)
file(filename + '.orig', 'w').write(i18n.smart_str(response.content))
file(filename + '.tidy', 'w').write(i18n.smart_str(html))
file(filename + '.info', 'w').write(i18n.smart_str(result))
self._logger.error(result)
return response
def _filter_warnings(self, err_list):
"""A hacky way to filter out things that we don't care about."""
res = [ ]
for err in err_list:
for ignore in self._to_ignore:
if ignore.search(err):
break
else:
res.append(err)
return res
def _is_html(self, request, response):
return not request.is_ajax() and \
'html' in response['Content-Type'] and \
200 <= response.status_code < 300
class SpnegoMiddleware(object):
"""
Based on the WSGI SPNEGO middlware class posted here:
http://code.activestate.com/recipes/576992/
"""
def __init__(self):
if not 'SpnegoDjangoBackend' in desktop.conf.AUTH.BACKEND.get():
LOG.info('Unloading SpnegoMiddleware')
raise exceptions.MiddlewareNotUsed
def process_response(self, request, response):
if 'GSS-String' in request.META:
response['WWW-Authenticate'] = request.META['GSS-String']
elif 'Return-401' in request.META:
response = HttpResponse("401 Unauthorized", content_type="text/plain",
status=401)
response['WWW-Authenticate'] = 'Negotiate'
response.status = 401
return response
def process_request(self, request):
"""
The process_request() method needs to communicate some state to the
process_response() method. The two options for this are to return an
HttpResponse object or to modify the META headers in the request object. In
order to ensure that all of the middleware is properly invoked, this code
currently uses the later approach. The following headers are currently used:
GSS-String:
This means that GSS authentication was successful and that we need to pass
this value for the WWW-Authenticate header in the response.
Return-401:
This means that the SPNEGO backend is in use, but we didn't get an
AUTHORIZATION header from the client. The way that the protocol works
(http://tools.ietf.org/html/rfc4559) is by having the first response to an
un-authenticated request be a 401 with the WWW-Authenticate header set to
Negotiate. This will cause the browser to re-try the request with the
AUTHORIZATION header set.
"""
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the SpnegoUserMiddleware class.")
if 'HTTP_AUTHORIZATION' in request.META:
type, authstr = request.META['HTTP_AUTHORIZATION'].split(' ', 1)
if type == 'Negotiate':
try:
result, context = kerberos.authGSSServerInit('HTTP')
if result != 1:
return
gssstring=''
r=kerberos.authGSSServerStep(context,authstr)
if r == 1:
gssstring=kerberos.authGSSServerResponse(context)
request.META['GSS-String'] = 'Negotiate %s' % gssstring
else:
kerberos.authGSSServerClean(context)
return
username = kerberos.authGSSServerUserName(context)
kerberos.authGSSServerClean(context)
if request.user.is_authenticated():
if request.user.username == self.clean_username(username, request):
return
user = authenticate(username=username)
if user:
request.user = user
login(request, user)
return
except:
LOG.exception('Unexpected error when authenticating against KDC')
return
else:
request.META['Return-401'] = ''
return
else:
if not request.user.is_authenticated():
request.META['Return-401'] = ''
return
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
backend_str = request.session[BACKEND_SESSION_KEY]
backend = load_backend(backend_str)
try:
username = backend.clean_username(username)
except AttributeError:
pass
return username
class HueRemoteUserMiddleware(RemoteUserMiddleware):
"""
Middleware to delegate authentication to a proxy server. The proxy server
will set an HTTP header (defaults to Remote-User) with the name of the
authenticated user. This class extends the RemoteUserMiddleware class
built into Django with the ability to configure the HTTP header and to
unload the middleware if the RemoteUserDjangoBackend is not currently
in use.
"""
def __init__(self):
if not 'RemoteUserDjangoBackend' in desktop.conf.AUTH.BACKEND.get():
LOG.info('Unloading HueRemoteUserMiddleware')
raise exceptions.MiddlewareNotUsed
self.header = desktop.conf.AUTH.REMOTE_USER_HEADER.get()
class EnsureSafeMethodMiddleware(object):
"""
Middleware to white list configured HTTP request methods.
"""
def process_request(self, request):
if request.method not in desktop.conf.HTTP_ALLOWED_METHODS.get():
return HttpResponseNotAllowed(desktop.conf.HTTP_ALLOWED_METHODS.get())
class EnsureSafeRedirectURLMiddleware(object):
"""
Middleware to white list configured redirect URLs.
"""
def process_response(self, request, response):
if response.status_code == 302:
if any([regexp.match(response['Location']) for regexp in desktop.conf.REDIRECT_WHITELIST.get()]):
return response
response = render("error.mako", request, dict(error=_('Redirect to %s is not allowed.') % response['Location']))
response.status_code = 403
return response
else:
return response
|
2013Commons/hue
|
desktop/core/src/desktop/middleware.py
|
Python
|
apache-2.0
| 25,143
|
'''
Integration test for testing max allowed vscsi data volume to be attached on mini.
@author: zhaohao.chen
'''
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_volume as test_volume_header
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.operations.volume_operations as vol_ops
import time
import os
import random
PROVISION = ["volumeProvisioningStrategy::ThinProvisioning","volumeProvisioningStrategy::ThickProvisioning"]
VIRTIOSCSI = "capability::virtio-scsi"
round_num = 30
volume = None
vm = None
test_obj_dict = test_state.TestStateDict()
def test():
global test_obj_dict
global round_num
global volume
global vm
VM_CPU = 2
VM_MEM = 2147483648
volume_creation_option = test_util.VolumeOption()
ps_uuid = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0].uuid
volume_creation_option.set_primary_storage_uuid(ps_uuid)
#1.create vm
vm_creation_option = test_util.VmOption()
image_name = os.environ.get('imageName_s')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
vm_creation_option.set_l3_uuids([l3_net_uuid])
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_name('Mini_vm_datavolume_test')
vm_creation_option.set_cpu_num(VM_CPU)
vm_creation_option.set_memory_size(VM_MEM)
vm = test_vm_header.ZstackTestVm()
vm.set_creation_option(vm_creation_option)
vm.create()
vm.check()
test_obj_dict.add_vm(vm)
vm_uuid = vm.get_vm().uuid
vm_cluster_uuid = vm.get_vm().clusterUuid
mini_cluster = "miniStorage::clusterUuid::%s" % vm_cluster_uuid
#create thin/thick data volume with random disksize and random provision type
#and attach to vm
for i in range(round_num):
volume_name = "volume_%s" % i
volume_creation_option.set_name(volume_name)
max_size = (res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0].availableCapacity - 1048576)/(20 * 512)
disk_size = random.randint(2048, max_size) * 512
volume_creation_option.set_diskSize(disk_size)
volume_creation_option.set_system_tags([random.choice(PROVISION), VIRTIOSCSI, mini_cluster])
volume = test_volume_header.ZstackTestVolume()
volume.set_volume(vol_ops.create_volume_from_diskSize(volume_creation_option))
volume.check()
test_obj_dict.add_volume(volume)
try:
volume.attach(vm)
except Exception as e:
#test_util.test_logger(e)
test_util.test_pass('Allowed max num of attached vscsi is %s' % i)
test_util.test_fail("Allowed max num of attached vscsi may is not %s" % round_num )
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
def env_recover():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
|
zstackio/zstack-woodpecker
|
integrationtest/vm/mini/volume/test_max_allowed_attached_vscsi_volume.py
|
Python
|
apache-2.0
| 3,208
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Compute-related Utilities and helpers."""
import netaddr
import nova.context
from nova import db
from nova import exception
from nova import flags
from nova import log
from nova import network
from nova.network import model as network_model
from nova import notifications
from nova.notifier import api as notifier_api
from nova import utils
FLAGS = flags.FLAGS
LOG = log.getLogger(__name__)
def notify_usage_exists(context, instance_ref, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
"""Generates 'exists' notification for an instance for usage auditing
purposes.
:param current_period: if True, this will generate a usage for the
current usage period; if False, this will generate a usage for the
previous audit period.
:param ignore_missing_network_data: if True, log any exceptions generated
while getting network info; if False, raise the exception.
:param system_metadata: system_metadata DB entries for the instance,
if not None. *NOTE*: Currently unused here in trunk, but needed for
potential custom modifications.
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification if not None.
"""
audit_start, audit_end = notifications.audit_period_bounds(current_period)
bw = notifications.bandwidth_usage(instance_ref, audit_start,
ignore_missing_network_data)
if system_metadata is None:
try:
system_metadata = db.instance_system_metadata_get(
context, instance_ref.uuid)
except exception.NotFound:
system_metadata = {}
# add image metadata to the notification:
image_meta = notifications.image_meta(system_metadata)
extra_info = dict(audit_period_beginning=str(audit_start),
audit_period_ending=str(audit_end),
bandwidth=bw, image_meta=image_meta)
if extra_usage_info:
extra_info.update(extra_usage_info)
notify_about_instance_usage(context, instance_ref, 'exists',
system_metadata=system_metadata, extra_usage_info=extra_info)
def legacy_network_info(network_model):
"""
Return the legacy network_info representation of the network_model
"""
def get_ip(ip):
if not ip:
return None
return ip['address']
def fixed_ip_dict(ip, subnet):
if ip['version'] == 4:
netmask = str(subnet.as_netaddr().netmask)
else:
netmask = subnet.as_netaddr()._prefixlen
return {'ip': ip['address'],
'enabled': '1',
'netmask': netmask,
'gateway': get_ip(subnet['gateway'])}
def get_meta(model, key, default=None):
if 'meta' in model and key in model['meta']:
return model['meta'][key]
return default
def convert_routes(routes):
routes_list = []
for route in routes:
r = {'route': str(netaddr.IPNetwork(route['cidr']).network),
'netmask': str(netaddr.IPNetwork(route['cidr']).netmask),
'gateway': get_ip(route['gateway'])}
routes_list.append(r)
return routes_list
network_info = []
for vif in network_model:
if not vif['network'] or not vif['network']['subnets']:
continue
network = vif['network']
# NOTE(jkoelker) The legacy format only supports one subnet per
# network, so we only use the 1st one of each type
# NOTE(tr3buchet): o.O
v4_subnets = []
v6_subnets = []
for subnet in vif['network']['subnets']:
if subnet['version'] == 4:
v4_subnets.append(subnet)
else:
v6_subnets.append(subnet)
subnet_v4 = None
subnet_v6 = None
if v4_subnets:
subnet_v4 = v4_subnets[0]
if v6_subnets:
subnet_v6 = v6_subnets[0]
if not subnet_v4:
raise exception.NovaException(
message=_('v4 subnets are required for legacy nw_info'))
routes = convert_routes(subnet_v4['routes'])
should_create_bridge = get_meta(network, 'should_create_bridge',
False)
should_create_vlan = get_meta(network, 'should_create_vlan', False)
gateway = get_ip(subnet_v4['gateway'])
dhcp_server = get_meta(subnet_v4, 'dhcp_server')
network_dict = dict(bridge=network['bridge'],
id=network['id'],
cidr=subnet_v4['cidr'],
cidr_v6=subnet_v6['cidr'] if subnet_v6 else None,
vlan=get_meta(network, 'vlan'),
injected=get_meta(network, 'injected', False),
multi_host=get_meta(network, 'multi_host',
False),
bridge_interface=get_meta(network,
'bridge_interface'))
# NOTE(tr3buchet): the 'ips' bit here is tricky, we support a single
# subnet but we want all the IPs to be there
# so we use the v4_subnets[0] and its IPs are first
# so that eth0 will be from subnet_v4, the rest of the
# IPs will be aliased eth0:1 etc and the gateways from
# their subnets will not be used
info_dict = dict(label=network['label'],
broadcast=str(subnet_v4.as_netaddr().broadcast),
mac=vif['address'],
vif_uuid=vif['id'],
rxtx_cap=get_meta(network, 'rxtx_cap', 0),
dns=[get_ip(ip) for ip in subnet_v4['dns']],
ips=[fixed_ip_dict(ip, subnet)
for subnet in v4_subnets
for ip in subnet['ips']],
should_create_bridge=should_create_bridge,
should_create_vlan=should_create_vlan,
dhcp_server=dhcp_server)
if routes:
info_dict['routes'] = routes
if gateway:
info_dict['gateway'] = gateway
if v6_subnets:
if subnet_v6['gateway']:
info_dict['gateway_v6'] = get_ip(subnet_v6['gateway'])
info_dict['ip6s'] = [fixed_ip_dict(ip, subnet_v6)
for ip in subnet_v6['ips']]
network_info.append((network_dict, info_dict))
return network_info
def notify_about_instance_usage(context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None, host=None):
"""
Send a notification about an instance.
:param event_suffix: Event type like "delete.start" or "exists"
:param network_info: Networking information, if provided.
:param system_metadata: system_metadata DB entries for the instance,
if provided.
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification.
:param host: Compute host for the instance, if specified. Default is
FLAGS.host
"""
if not host:
host = FLAGS.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = notifications.usage_from_instance(context, instance,
network_info, system_metadata, **extra_usage_info)
notifier_api.notify(context, 'compute.%s' % host,
'compute.instance.%s' % event_suffix,
notifier_api.INFO, usage_info)
|
josephsuh/extra-specs
|
nova/compute/utils.py
|
Python
|
apache-2.0
| 8,578
|
import sys
def dft(dihedrals):
"""Writes dft calculations for nwchem. This is wrong. Uses dihedrals. Runs
Keyword Arguments:
dihedrals - The list of dihedrals to generate a nwchem output from. If the dihedral is marked as dft it will run
"""
dftfile = open('outputs/dft.nw','w')
sys.stdout = dftfile
print 'title "SMDPPEH with displacement: 04.00 Angstroms"\n'
counter = "A"
for i in range(len(dihedrals)):
if dihedrals[i].dft:
print 'geometry mol%s units angstroms noautoz noautosym' % counter
print "%s %s %s %s" % (dihedrals[i].dihedral_master1.atom_type,dihedrals[i].dihedral_master1.x_pos,dihedrals[i].dihedral_master1.y_pos,dihedrals[i].dihedral_master1.z_pos)
print "%s %s %s %s" % (dihedrals[i].dihedral_master2.atom_type,dihedrals[i].dihedral_master2.x_pos,dihedrals[i].dihedral_master2.y_pos,dihedrals[i].dihedral_master2.z_pos)
print "%s %s %s %s" % (dihedrals[i].dihedral_slave1.atom_type,dihedrals[i].dihedral_slave1.x_pos,dihedrals[i].dihedral_slave1.y_pos,dihedrals[i].dihedral_slave1.z_pos)
print "%s %s %s %s" % (dihedrals[i].dihedral_slave2.atom_type,dihedrals[i].dihedral_slave2.x_pos,dihedrals[i].dihedral_slave2.y_pos,dihedrals[i].dihedral_slave2.z_pos)
print "end\n"
counter = chr(ord(counter)+1)
print "basis"
print " * library 6-31++G*"
print "end\n"
counter = "A"
for i in range(len(dihedrals)):
if dihedrals[i].dft:
print "set geometry mol%s" % counter
print "dft"
print " cdft 1 60 charge -1"
print "end\n"
print "task dft ignore\n"
counter = chr(ord(counter)+1)
dftfile.close()
|
sipjca/cmlparser_py
|
write_nwchem.py
|
Python
|
apache-2.0
| 1,749
|
"""
rhn_schema_version - Command ``/usr/bin/rhn-schema-version``
============================================================
Parse the output of command ``/usr/bin/rhn-schema-version``.
"""
from .. import parser
from insights.specs import rhn_schema_version
@parser(rhn_schema_version)
def rhn_schema_version(context):
"""
Function to parse the output of command ``/usr/bin/rhn-schema-version``.
Sample input::
5.6.0.10-2.el6sat
Examples:
>>> db_ver = shared[rhn_schema_version]
>>> db_ver
'5.6.0.10-2.el6sat'
"""
if context.content:
content = context.content
if len(content) == 1 and 'No such' not in content[0]:
ver = content[0].strip()
if ver:
return ver
|
wcmitchell/insights-core
|
insights/parsers/rhn_schema_version.py
|
Python
|
apache-2.0
| 778
|
# Copyright (c) 2012 - 2015 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fibre Channel Driver for EMC VNX array based on CLI.
"""
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_vnx_cli
from cinder.zonemanager import utils as zm_utils
LOG = logging.getLogger(__name__)
class EMCCLIFCDriver(driver.FibreChannelDriver):
"""EMC FC Driver for VNX using CLI.
Version history:
1.0.0 - Initial driver
2.0.0 - Thick/thin provisioning, robust enhancement
3.0.0 - Array-based Backend Support, FC Basic Support,
Target Port Selection for MPIO,
Initiator Auto Registration,
Storage Group Auto Deletion,
Multiple Authentication Type Support,
Storage-Assisted Volume Migration,
SP Toggle for HA
3.0.1 - Security File Support
4.0.0 - Advance LUN Features (Compression Support,
Deduplication Support, FAST VP Support,
FAST Cache Support), Storage-assisted Retype,
External Volume Management, Read-only Volume,
FC Auto Zoning
4.1.0 - Consistency group support
5.0.0 - Performance enhancement, LUN Number Threshold Support,
Initiator Auto Deregistration,
Force Deleting LUN in Storage Groups,
robust enhancement
5.1.0 - iSCSI multipath enhancement
5.2.0 - Pool-aware scheduler support
5.3.0 - Consistency group modification support
"""
def __init__(self, *args, **kwargs):
super(EMCCLIFCDriver, self).__init__(*args, **kwargs)
self.cli = emc_vnx_cli.getEMCVnxCli(
'FC',
configuration=self.configuration)
self.VERSION = self.cli.VERSION
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Creates a volume."""
return self.cli.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
return self.cli.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
return self.cli.create_cloned_volume(volume, src_vref)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
self.cli.extend_volume(volume, new_size)
def delete_volume(self, volume):
"""Deletes a volume."""
self.cli.delete_volume(volume)
def migrate_volume(self, ctxt, volume, host):
"""Migrate volume via EMC migration functionality."""
return self.cli.migrate_volume(ctxt, volume, host)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
return self.cli.retype(ctxt, volume, new_type, diff, host)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.cli.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self.cli.delete_snapshot(snapshot)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
@zm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
The initiator_target_map is a map that represents the remote wwn(s)
and a list of wwns which are visible to the remote wwn(s).
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
'access_mode': 'rw'
'initiator_target_map': {
'1122334455667788': ['1234567890123']
}
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
'access_mode': 'rw'
'initiator_target_map': {
'1122334455667788': ['1234567890123',
'0987654321321']
}
}
}
"""
conn_info = self.cli.initialize_connection(volume,
connector)
LOG.debug("Exit initialize_connection"
" - Returning FC connection info: %(conn_info)s."
% {'conn_info': conn_info})
return conn_info
@zm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
conn_info = self.cli.terminate_connection(volume, connector)
LOG.debug("Exit terminate_connection"
" - Returning FC connection info: %(conn_info)s."
% {'conn_info': conn_info})
return conn_info
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self.update_volume_stats()
return self._stats
def update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats.")
data = self.cli.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'EMCCLIFCDriver'
data['storage_protocol'] = 'FC'
self._stats = data
def manage_existing(self, volume, existing_ref):
"""Manage an existing lun in the array.
The lun should be in a manageable pool backend, otherwise
error would return.
Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
existing_ref:{
'id':lun_id
}
"""
LOG.debug("Reference lun id %s." % existing_ref['id'])
self.cli.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
"""
return self.cli.manage_existing_get_size(volume, existing_ref)
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
return self.cli.create_consistencygroup(context, group)
def delete_consistencygroup(self, context, group):
"""Deletes a consistency group."""
return self.cli.delete_consistencygroup(
self, context, group)
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates a cgsnapshot."""
return self.cli.create_cgsnapshot(
self, context, cgsnapshot)
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes a cgsnapshot."""
return self.cli.delete_cgsnapshot(self, context, cgsnapshot)
def get_pool(self, volume):
"""Returns the pool name of a volume."""
return self.cli.get_pool(volume)
def update_consistencygroup(self, context, group,
add_volumes,
remove_volumes):
"""Updates LUNs in consistency group."""
return self.cli.update_consistencygroup(context, group,
add_volumes,
remove_volumes)
|
Akrog/cinder
|
cinder/volume/drivers/emc/emc_cli_fc.py
|
Python
|
apache-2.0
| 9,214
|