code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from clip2zeus.common import Clip2ZeusApp, Clip2ZeusCtl, DEFAULT_PORT
def main():
from optparse import OptionParser
import sys
parser = OptionParser()
parser.add_option('-p', '--port', dest='port', default=DEFAULT_PORT, help='The port for the daemon to listen on')
options, args = parser.parse_args()
if len(args) < 1:
sys.exit('Please specify a command: %s' % ', '.join(Clip2ZeusApp.EXPOSED))
params = dict(
port=options.port,
)
Clip2ZeusCtl(**params).execute_command(args.pop(0), args)
if __name__ == '__main__':
main()
| codekoala/clip2zeus | clip2zeus/clip2zeus_ctl.py | Python | mit | 632 |
"""A module for handle .ndata files for Swift."""
from __future__ import annotations
# standard libraries
import binascii
import calendar
import datetime
import io
import logging
import json
import numpy
import numpy.typing
import os
import pathlib
import struct
import threading
import time
import typing
# local libraries
from nion.swift.model import StorageHandler
from nion.swift.model import Utility
from nion.utils import Geometry
PersistentDictType = typing.Dict[str, typing.Any]
_NDArray = numpy.typing.NDArray[typing.Any]
# http://en.wikipedia.org/wiki/Zip_(file_format)
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# https://issues.apache.org/jira/browse/COMPRESS-210
# http://proger.i-forge.net/MS-DOS_date_and_time_format/OFz
def make_directory_if_needed(directory_path: str) -> None:
"""
Make the directory path, if needed.
"""
if os.path.exists(directory_path):
if not os.path.isdir(directory_path):
raise OSError("Path is not a directory:", directory_path)
else:
os.makedirs(directory_path)
def write_local_file(fp: typing.BinaryIO, name_bytes: bytes, writer: typing.Callable[[typing.BinaryIO], int], dt: datetime.datetime) -> typing.Tuple[int, int]:
"""
Writes a zip file local file header structure at the current file position.
Returns data_len, crc32 for the data.
:param fp: the file point to which to write the header
:param name: the name of the file
:param writer: a function taking an fp parameter to do the writing, returns crc32
:param dt: the datetime to write to the archive
"""
fp.write(struct.pack('I', 0x04034b50)) # local file header
fp.write(struct.pack('H', 10)) # extract version (default)
fp.write(struct.pack('H', 0)) # general purpose bits
fp.write(struct.pack('H', 0)) # compression method
msdos_date = int(dt.year - 1980) << 9 | int(dt.month) << 5 | int(dt.day)
msdos_time = int(dt.hour) << 11 | int(dt.minute) << 5 | int(dt.second)
fp.write(struct.pack('H', msdos_time)) # extract version (default)
fp.write(struct.pack('H', msdos_date)) # extract version (default)
crc32_pos = fp.tell()
fp.write(struct.pack('I', 0)) # crc32 placeholder
data_len_pos = fp.tell()
fp.write(struct.pack('I', 0)) # compressed length placeholder
fp.write(struct.pack('I', 0)) # uncompressed length placeholder
fp.write(struct.pack('H', len(name_bytes))) # name length
fp.write(struct.pack('H', 0)) # extra length
fp.write(name_bytes)
data_start_pos = fp.tell()
crc32 = writer(fp)
data_end_pos = fp.tell()
data_len = data_end_pos - data_start_pos
fp.seek(crc32_pos)
fp.write(struct.pack('I', crc32)) # crc32
fp.seek(data_len_pos)
fp.write(struct.pack('I', data_len)) # compressed length placeholder
fp.write(struct.pack('I', data_len)) # uncompressed length placeholder
fp.seek(data_end_pos)
return data_len, crc32
def write_directory_data(fp: typing.BinaryIO, offset: int, name_bytes: bytes, data_len: int, crc32: int, dt: datetime.datetime) -> None:
"""
Write a zip fie directory entry at the current file position
:param fp: the file point to which to write the header
:param offset: the offset of the associated local file header
:param name: the name of the file
:param data_len: the length of data that will be written to the archive
:param crc32: the crc32 of the data to be written
:param dt: the datetime to write to the archive
"""
fp.write(struct.pack('I', 0x02014b50)) # central directory header
fp.write(struct.pack('H', 10)) # made by version (default)
fp.write(struct.pack('H', 10)) # extract version (default)
fp.write(struct.pack('H', 0)) # general purpose bits
fp.write(struct.pack('H', 0)) # compression method
msdos_date = int(dt.year - 1980) << 9 | int(dt.month) << 5 | int(dt.day)
msdos_time = int(dt.hour) << 11 | int(dt.minute) << 5 | int(dt.second)
fp.write(struct.pack('H', msdos_time)) # extract version (default)
fp.write(struct.pack('H', msdos_date)) # extract version (default)
fp.write(struct.pack('I', crc32)) # crc32
fp.write(struct.pack('I', data_len)) # compressed length
fp.write(struct.pack('I', data_len)) # uncompressed length
fp.write(struct.pack('H', len(name_bytes))) # name length
fp.write(struct.pack('H', 0)) # extra length
fp.write(struct.pack('H', 0)) # comments length
fp.write(struct.pack('H', 0)) # disk number
fp.write(struct.pack('H', 0)) # internal file attributes
fp.write(struct.pack('I', 0)) # external file attributes
fp.write(struct.pack('I', offset)) # relative offset of file header
fp.write(name_bytes)
def write_end_of_directory(fp: typing.BinaryIO, dir_size: int, dir_offset: int, count: int) -> None:
"""
Write zip file end of directory header at the current file position
:param fp: the file point to which to write the header
:param dir_size: the total size of the directory
:param dir_offset: the start of the first directory header
:param count: the count of files
"""
fp.write(struct.pack('I', 0x06054b50)) # central directory header
fp.write(struct.pack('H', 0)) # disk number
fp.write(struct.pack('H', 0)) # disk number
fp.write(struct.pack('H', count)) # number of files
fp.write(struct.pack('H', count)) # number of files
fp.write(struct.pack('I', dir_size)) # central directory size
fp.write(struct.pack('I', dir_offset)) # central directory offset
fp.write(struct.pack('H', 0)) # comment len
def write_zip_fp(fp: typing.BinaryIO, data: typing.Optional[_NDArray], properties: PersistentDictType,
dir_data_list: typing.Optional[typing.List[typing.Tuple[int, bytes, int, int]]] = None) -> None:
"""
Write custom zip file of data and properties to fp
:param fp: the file point to which to write the header
:param data: the data to write to the file; may be None
:param properties: the properties to write to the file; may be None
:param dir_data_list: optional list of directory header information structures
If dir_data_list is specified, data should be None and properties should
be specified. Then the existing data structure will be left alone and only
the directory headers and end of directory header will be written.
Otherwise, if both data and properties are specified, both are written
out in full.
The properties param must not change during this method. Callers should
take care to ensure this does not happen.
"""
assert data is not None or properties is not None
# dir_data_list has the format: local file record offset, name, data length, crc32
dir_data_list = list() if dir_data_list is None else dir_data_list
dt = datetime.datetime.now()
if data is not None:
offset_data = fp.tell()
def write_data(fp: typing.BinaryIO) -> int:
numpy_start_pos = fp.tell()
numpy.save(fp, data) # type: ignore
numpy_end_pos = fp.tell()
fp.seek(numpy_start_pos)
assert data is not None
data_c = numpy.require(data, dtype=data.dtype, requirements=["C_CONTIGUOUS"])
header_data = fp.read((numpy_end_pos - numpy_start_pos) - data_c.nbytes) # read the header
data_crc32 = binascii.crc32(data_c.data, binascii.crc32(header_data)) & 0xFFFFFFFF
fp.seek(numpy_end_pos)
return data_crc32
data_len, crc32 = write_local_file(fp, b"data.npy", write_data, dt)
dir_data_list.append((offset_data, b"data.npy", data_len, crc32))
if properties is not None:
json_str = str()
try:
class JSONEncoder(json.JSONEncoder):
def default(self, obj: typing.Any) -> typing.Any:
if isinstance(obj, Geometry.IntPoint) or isinstance(obj, Geometry.IntSize) or isinstance(obj, Geometry.IntRect) or isinstance(obj, Geometry.FloatPoint) or isinstance(obj, Geometry.FloatSize) or isinstance(obj, Geometry.FloatRect):
return tuple(obj)
else:
return json.JSONEncoder.default(self, obj)
json_io = io.StringIO()
json.dump(properties, json_io, cls=JSONEncoder)
json_str = json_io.getvalue()
except Exception as e:
# catch exceptions to avoid corrupt zip files
import traceback
logging.error("Exception writing zip file %s" + str(e))
traceback.print_exc()
traceback.print_stack()
def write_json(fp: typing.BinaryIO) -> int:
json_bytes = bytes(json_str, 'ISO-8859-1')
fp.write(json_bytes)
return binascii.crc32(json_bytes) & 0xFFFFFFFF
offset_json = fp.tell()
json_len, json_crc32 = write_local_file(fp, b"metadata.json", write_json, dt)
dir_data_list.append((offset_json, b"metadata.json", json_len, json_crc32))
dir_offset = fp.tell()
for offset, name_bytes, data_len, crc32 in dir_data_list:
write_directory_data(fp, offset, name_bytes, data_len, crc32, dt)
dir_size = fp.tell() - dir_offset
write_end_of_directory(fp, dir_size, dir_offset, len(dir_data_list))
fp.truncate()
def write_zip(file_path: str, data: typing.Optional[_NDArray], properties: PersistentDictType) -> None:
"""
Write custom zip file to the file path
:param file_path: the file to which to write the zip file
:param data: the data to write to the file; may be None
:param properties: the properties to write to the file; may be None
The properties param must not change during this method. Callers should
take care to ensure this does not happen.
See write_zip_fp.
"""
with open(file_path, "w+b") as fp:
write_zip_fp(fp, data, properties)
def parse_zip(fp: typing.BinaryIO) -> typing.Tuple[typing.Dict[int, typing.Tuple[bytes, int, int, int]], typing.Dict[bytes, typing.Tuple[int, int]], typing.Optional[typing.Tuple[int, int]]]:
"""
Parse the zip file headers at fp
:param fp: the file pointer from which to parse the zip file
:return: A tuple of local files, directory headers, and end of central directory
The local files are dictionary where the keys are the local file offset and the
values are each a tuple consisting of the name, data position, data length, and crc32.
The directory headers are a dictionary where the keys are the names of the files
and the values are a tuple consisting of the directory header position, and the
associated local file position.
The end of central directory is a tuple consisting of the location of the end of
central directory header and the location of the first directory header.
This method will seek to location 0 of fp and leave fp at end of file.
"""
local_files: typing.Dict[int, typing.Tuple[bytes, int, int, int]] = dict()
dir_files: typing.Dict[bytes, typing.Tuple[int, int]] = dict()
eocd: typing.Optional[typing.Tuple[int, int]] = None
fp.seek(0)
while True:
pos = fp.tell()
signature = struct.unpack('I', fp.read(4))[0]
if signature == 0x04034b50:
fp.seek(pos + 14)
crc32 = struct.unpack('I', fp.read(4))[0]
fp.seek(pos + 18)
data_len = struct.unpack('I', fp.read(4))[0]
fp.seek(pos + 26)
name_len = struct.unpack('H', fp.read(2))[0]
extra_len = struct.unpack('H', fp.read(2))[0]
name_bytes = fp.read(name_len)
fp.seek(extra_len, os.SEEK_CUR)
data_pos = fp.tell()
fp.seek(data_len, os.SEEK_CUR)
local_files[pos] = (name_bytes, data_pos, data_len, crc32)
elif signature == 0x02014b50:
fp.seek(pos + 28)
name_len = struct.unpack('H', fp.read(2))[0]
extra_len = struct.unpack('H', fp.read(2))[0]
comment_len = struct.unpack('H', fp.read(2))[0]
fp.seek(pos + 42)
pos2 = struct.unpack('I', fp.read(4))[0]
name_bytes = fp.read(name_len)
fp.seek(pos + 46 + name_len + extra_len + comment_len)
dir_files[name_bytes] = (pos, pos2)
elif signature == 0x06054b50:
fp.seek(pos + 16)
pos2 = struct.unpack('I', fp.read(4))[0]
eocd = (pos, pos2)
break
else:
raise IOError()
return local_files, dir_files, eocd
def read_data(fp: typing.BinaryIO, local_files: typing.Dict[int, typing.Tuple[bytes, int, int, int]], dir_files: typing.Dict[bytes, typing.Tuple[int, int]], name_bytes: bytes) -> typing.Optional[_NDArray]:
"""
Read a numpy data array from the zip file
:param fp: a file pointer
:param local_files: the local files structure
:param dir_files: the directory headers
:param name: the name of the data file to read
:return: the numpy data array, if found
The file pointer will be at a location following the
local file entry after this method.
The local_files and dir_files should be passed from
the results of parse_zip.
"""
if name_bytes in dir_files:
fp.seek(local_files[dir_files[name_bytes][1]][1])
return numpy.load(fp) # type: ignore
return None
def read_json(fp: typing.BinaryIO, local_files: typing.Dict[int, typing.Tuple[bytes, int, int, int]], dir_files: typing.Dict[bytes, typing.Tuple[int, int]], name_bytes: bytes) -> PersistentDictType:
"""
Read json properties from the zip file
:param fp: a file pointer
:param local_files: the local files structure
:param dir_files: the directory headers
:param name: the name of the json file to read
:return: the json properites as a dictionary, if found
The file pointer will be at a location following the
local file entry after this method.
The local_files and dir_files should be passed from
the results of parse_zip.
"""
if name_bytes in dir_files:
json_pos = local_files[dir_files[name_bytes][1]][1]
json_len = local_files[dir_files[name_bytes][1]][2]
fp.seek(json_pos)
json_properties = fp.read(json_len)
return typing.cast(PersistentDictType, json.loads(json_properties.decode("utf-8")))
return dict()
def rewrite_zip(file_path: str, properties: PersistentDictType) -> None:
"""
Rewrite the json properties in the zip file
:param file_path: the file path to the zip file
:param properties: the updated properties to write to the zip file
This method will attempt to keep the data file within the zip
file intact without rewriting it. However, if the data file is not the
first item in the zip file, this method will rewrite it.
The properties param must not change during this method. Callers should
take care to ensure this does not happen.
"""
with open(file_path, "r+b") as fp:
local_files, dir_files, eocd = parse_zip(fp)
# check to make sure directory has two files, named data.npy and metadata.json, and that data.npy is first
# TODO: check compression, etc.
if len(dir_files) == 2 and b"data.npy" in dir_files and b"metadata.json" in dir_files and dir_files[b"data.npy"][1] == 0:
fp.seek(dir_files[b"metadata.json"][1])
dir_data_list = list()
local_file_pos = dir_files[b"data.npy"][1]
local_file = local_files[local_file_pos]
dir_data_list.append((local_file_pos, b"data.npy", local_file[2], local_file[3]))
write_zip_fp(fp, None, properties, dir_data_list)
else:
data = None
if b"data.npy" in dir_files:
fp.seek(local_files[dir_files[b"data.npy"][1]][1])
data = numpy.load(fp) # type: ignore
fp.seek(0)
write_zip_fp(fp, data, properties)
class NDataHandler(StorageHandler.StorageHandler):
"""
A handler object for ndata files.
ndata files are a zip file consisting of data.npy file and a metadata.json file.
Both files must be uncompressed.
The handler will read zip files where the metadata.json file is the first of the
two files; however it will always make sure data is the first file upon writing.
The handler is meant to be fully independent so that it can easily be plugged into
earlier versions of Swift as it evolves.
:param file_path: The basic directory from which reference are based
TODO: Move NDataHandler into a plug-in
"""
count = 0 # useful for detecting leaks in tests
def __init__(self, file_path: typing.Union[str, pathlib.Path]) -> None:
self.__file_path = str(file_path)
self.__lock = threading.RLock()
NDataHandler.count += 1
def close(self) -> None:
NDataHandler.count -= 1
# called before the file is moved; close but don't count.
def prepare_move(self) -> None:
pass
@property
def reference(self) -> str:
return self.__file_path
@property
def is_valid(self) -> bool:
return True
@classmethod
def is_matching(cls, file_path: str) -> bool:
"""
Return whether the given absolute file path is an ndata file.
"""
if file_path.endswith(".ndata") and os.path.exists(file_path):
try:
with open(file_path, "r+b") as fp:
local_files, dir_files, eocd = parse_zip(fp)
contains_data = b"data.npy" in dir_files
contains_metadata = b"metadata.json" in dir_files
file_count = contains_data + contains_metadata # use fact that True is 1, False is 0
# TODO: make sure ndata isn't compressed, or handle it
if len(dir_files) != file_count or file_count == 0:
return False
return True
except Exception as e:
logging.error("Exception parsing ndata file: %s", file_path)
logging.error(str(e))
return False
@classmethod
def make(cls, file_path: pathlib.Path) -> StorageHandler.StorageHandler:
return NDataHandler(NDataHandler.make_path(file_path))
@classmethod
def make_path(cls, file_path: pathlib.Path) -> str:
return str(file_path.with_suffix(cls.get_extension()))
@classmethod
def get_extension(self) -> str:
return ".ndata"
def write_data(self, data: _NDArray, file_datetime: datetime.datetime) -> None:
"""
Write data to the ndata file specified by reference.
:param data: the numpy array data to write
:param file_datetime: the datetime for the file
"""
with self.__lock:
assert data is not None
absolute_file_path = self.__file_path
#logging.debug("WRITE data file %s for %s", absolute_file_path, key)
make_directory_if_needed(os.path.dirname(absolute_file_path))
properties = self.read_properties() if os.path.exists(absolute_file_path) else dict()
if properties is not None:
write_zip(absolute_file_path, data, properties)
# convert to utc time.
tz_minutes = Utility.local_utcoffset_minutes(file_datetime)
timestamp = calendar.timegm(file_datetime.timetuple()) - tz_minutes * 60
os.utime(absolute_file_path, (time.time(), timestamp))
def reserve_data(self, data_shape: typing.Tuple[int, ...], data_dtype: numpy.typing.DTypeLike, file_datetime: datetime.datetime) -> None:
self.write_data(numpy.zeros(data_shape, data_dtype), file_datetime)
def write_properties(self, properties: PersistentDictType, file_datetime: datetime.datetime) -> None:
"""
Write properties to the ndata file specified by reference.
:param reference: the reference to which to write
:param properties: the dict to write to the file
:param file_datetime: the datetime for the file
The properties param must not change during this method. Callers should
take care to ensure this does not happen.
"""
with self.__lock:
absolute_file_path = self.__file_path
#logging.debug("WRITE properties %s for %s", absolute_file_path, key)
make_directory_if_needed(os.path.dirname(absolute_file_path))
exists = os.path.exists(absolute_file_path)
if exists:
rewrite_zip(absolute_file_path, Utility.clean_dict(properties))
else:
write_zip(absolute_file_path, None, Utility.clean_dict(properties))
# convert to utc time.
tz_minutes = Utility.local_utcoffset_minutes(file_datetime)
timestamp = calendar.timegm(file_datetime.timetuple()) - tz_minutes * 60
os.utime(absolute_file_path, (time.time(), timestamp))
def read_properties(self) -> PersistentDictType:
"""
Read properties from the ndata file reference
:param reference: the reference from which to read
:return: a tuple of the item_uuid and a dict of the properties
"""
with self.__lock:
absolute_file_path = self.__file_path
with open(absolute_file_path, "rb") as fp:
local_files, dir_files, eocd = parse_zip(fp)
properties = read_json(fp, local_files, dir_files, b"metadata.json")
return properties
def read_data(self) -> typing.Optional[_NDArray]:
"""
Read data from the ndata file reference
:param reference: the reference from which to read
:return: a numpy array of the data; maybe None
"""
with self.__lock:
absolute_file_path = self.__file_path
#logging.debug("READ data file %s", absolute_file_path)
with open(absolute_file_path, "rb") as fp:
local_files, dir_files, eocd = parse_zip(fp)
return read_data(fp, local_files, dir_files, b"data.npy")
def remove(self) -> None:
"""
Remove the ndata file reference
:param reference: the reference to remove
"""
with self.__lock:
absolute_file_path = self.__file_path
#logging.debug("DELETE data file %s", absolute_file_path)
if os.path.isfile(absolute_file_path):
os.remove(absolute_file_path)
| nion-software/nionswift | nion/swift/model/NDataHandler.py | Python | gpl-3.0 | 23,221 |
"""
Copyright (C) 2014, Jaguar Land Rover
This program is licensed under the terms and conditions of the
Mozilla Public License, version 2.0. The full text of the
Mozilla Public License is at https://www.mozilla.org/MPL/2.0/
Maintainer: Rudolf Streif (rstreif@jaguarlandrover.com)
"""
"""
Backend daemon that connects to RVI, receives, dispatches and processes
incoming messages from vehicles.
"""
import sys, os, logging, jsonrpclib
import time
from signal import *
from urlparse import urlparse
import Queue
import django
from django.conf import settings
from django.db import connection
import __init__
from util.daemon import Daemon
from server.sotaserver import SOTACallbackServer, SOTATransmissionServer
from server.trackingserver import TrackingCallbackServer
from server.mqsinkserver import MQSinkServer
from server.hbaseserver import HBaseServer
from server.utils import get_settings
from __init__ import __RVI_LOGGER__ as rvi_logger
from __init__ import __SOTA_LOGGER__ as sota_logger
import sota.models
class RVIServer(Daemon):
"""
"""
rvi_service_edge = None
sota_cb_server = None
sota_tx_server = None
tracking_cb_server = None
mq_sink_server = None
hbase_server = None
def cleanup(self, *args):
rvi_logger.info('RVI Server: Caught signal: %d. Shutting down...', args[0])
if self.sota_cb_server:
self.sota_cb_server.shutdown()
if self.sota_tx_server:
self.sota_tx_server.shutdown()
if self.tracking_cb_server:
self.tracking_cb_server.shutdown()
if self.mq_sink_server:
self.mq_sink_server.shutdown()
if self.hbase_server:
self.hbase_server.shutdown()
sys.exit(0)
def run(self):
# Execution starts here
rvi_logger.info('RVI Server: Starting...')
conf = get_settings()
rvi_logger.info('RVI Server: General Configuration: ' +
'RVI_SERVICE_EDGE_URL: ' + conf['SERVICE_EDGE_URL'] + ', ' +
'MEDIA_ROOT: ' + conf['MEDIA_ROOT']
)
# setup RVI Service Edge
rvi_logger.info('RVI Server: Setting up outbound connection to RVI Service Edge at %s', conf['SERVICE_EDGE_URL'])
self.rvi_service_edge = jsonrpclib.Server(conf['SERVICE_EDGE_URL'])
# SOTA Startup
if conf['SOTA_ENABLE'] == True:
# log SOTA configuration
rvi_logger.info('RVI Server: SOTA Configuration: ' +
'RVI_SOTA_CALLBACK_URL: ' + conf['SOTA_CALLBACK_URL'] + ', ' +
'RVI_SOTA_SERVICE_ID: ' + conf['SOTA_SERVICE_ID'] + ', ' +
'RVI_SOTA_CHUNK_SIZE: ' + str(conf['SOTA_CHUNK_SIZE'])
)
# start the SOTA callback server
try:
rvi_logger.info('RVI Server: Starting SOTA Callback Server on %s with service id %s.', conf['SOTA_CALLBACK_URL'], conf['SOTA_SERVICE_ID'])
self.sota_cb_server = SOTACallbackServer(self.rvi_service_edge, conf['SOTA_CALLBACK_URL'], conf['SOTA_SERVICE_ID'])
self.sota_cb_server.start()
rvi_logger.info('RVI Server: SOTA Callback Server started.')
except Exception as e:
rvi_logger.error('RVI Server: Cannot start SOTA Callback Server: %s', e)
sys.exit(1)
# wait for SOTA callback server to come up
time.sleep(0.5)
# start SOTA Transmission Server
try:
rvi_logger.info('RVI Server: Starting SOTA Transmission Server.')
self.sota_tx_server = SOTATransmissionServer(self.rvi_service_edge, conf['SOTA_SERVICE_ID'], conf['SOTA_CHUNK_SIZE'])
self.sota_tx_server.start()
rvi_logger.info('RVI Server: SOTA Transmission Server started.')
except Exception as e:
rvi_logger.error('RVI Server: Cannot start SOTA Transmission Server: %s', e)
sys.exit(1)
# wait for SOTA transmission server to come up
time.sleep(0.5)
# Tracking Startup
if conf['TRACKING_ENABLE'] == True:
# log Tracking configuration
rvi_logger.info('RVI Server: Tracking Configuration: ' +
'RVI_TRACKING_CALLBACK_URL: ' + conf['TRACKING_CALLBACK_URL'] + ', ' +
'RVI_TRACKING_SERVICE_ID: ' + conf['TRACKING_SERVICE_ID']
)
# start the Tracking callback server
try:
rvi_logger.info('RVI Server: Starting Tracking Callback Server on %s with service id %s.', conf['TRACKING_CALLBACK_URL'], conf['TRACKING_SERVICE_ID'])
self.tracking_cb_server = TrackingCallbackServer(self.rvi_service_edge, conf['TRACKING_CALLBACK_URL'], conf['TRACKING_SERVICE_ID'])
self.tracking_cb_server.start()
rvi_logger.info('RVI Server: Tracking Callback Server started.')
except Exception as e:
rvi_logger.error('RVI Server: Cannot start Tracking Callback Server: %s', e)
sys.exit(1)
# wait for SOTA callback server to come up
time.sleep(0.5)
else:
rvi_logger.info('RVI Server: Tracking not enabled')
# Publish to Kafka Message Queue
if conf['TRACKING_MQ_PUBLISH'] == True:
#log kafka configuration
rvi_logger.info('RVI Server: Publishing to Kafka Message Queue: ' + conf['TRACKING_MQ_URL'] + ' , with topic: ' + conf['TRACKING_MQ_TOPIC'])
#Start the Kafka message queue forwarding server
try:
rvi_logger.info('%s: Publishing to message queue enabled.', self.__class__.__name__)
self.mq_sink_server = MQSinkServer(self.rvi_service_edge, conf['TRACKING_CALLBACK_URL'], conf['TRACKING_SERVICE_ID'])
self.mq_sink_server.start()
rvi_logger.info('RVI Server: Message Queue Server started.')
except Exception as e:
rvi_logger.error('RVI Server: Cannot start Message Queue Server: %s', e)
sys.exit(1)
else:
rvi_logger.info('RVI Server: MQ Publish not enabled')
# Save message Queue contents into HBase
if conf['TRACKING_MQ_HBASE'] == True:
rvi_logger.info('RVI Server: Saving to HBase: ' + conf['TRACKING_MQ_HBASE_URL'])
#Start HBase Server thread
try:
rvi_logger.info('%s: Saving messages to HBase enabled.', self.__class__.__name__)
self.hbase_server = HBaseServer(conf['TRACKING_MQ_URL'],conf['TRACKING_MQ_TOPIC'],conf['TRACKING_MQ_HBASE_URL'], conf['TRACKING_MQ_HBASE_PORT'], conf['TRACKING_MQ_HBASE_TABLE'])
self.hbase_server.start()
rvi_logger.info('RVI Server: Kafka -> HBase consumer started.')
except Exception as e:
rvi_logger.error('RVI Server: Cannot start HBase Server: %s', e)
sys.exit(1)
else:
rvi_logger.info('RVI Server: HBase server storage not enabled')
# catch signals for proper shutdown
for sig in (SIGABRT, SIGTERM, SIGINT):
signal(sig, self.cleanup)
# main execution loop
timeout = conf['DB_CLOSE_TIMEOUT']
while True:
try:
time.sleep(conf['DB_PING_INTERVAL'])
# If we are idle too long the database server may
# close the connection on us, ping the server to check if
# the connection is still up.
if (connection.connection is not None):
if (connection.is_usable() == True):
rvi_logger.debug('RVI Server: Database connection is up.')
# Close connection if open longer than the timeout
timeout -= conf['DB_PING_INTERVAL']
if (timeout <= 0):
connection.close()
timeout = conf['DB_CLOSE_TIMEOUT']
rvi_logger.info('RVI Server: Idle Timeout: closed database connection.')
else:
rvi_logger.error('RVI Server: Database connection is down.')
connection.close()
elif (conf['TRACKING_MQ_PUBLISH'] == True and conf['TRACKING_ENABLE'] == False):
pass
else:
rvi_logger.error('RVI Server: Database connection is closed.')
# As long as the connection is closed reset the timeout
timeout = conf['DB_CLOSE_TIMEOUT']
except KeyboardInterrupt:
print ('\n')
break
def usage():
print "RVI Server: Usage: %s foreground|start|stop|restart" % sys.argv[0]
if __name__ == "__main__":
pid_file = '/var/run/' + os.path.splitext(__file__)[0] + '.pid'
rvi_server = None
if len(sys.argv) == 3:
pid_file = sys.argv[2]
rvi_server = RVIServer(pid_file, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null')
if len(sys.argv) >= 2:
if sys.argv[1] in ('foreground', 'fg'):
# in foreground we also log to the console
rvi_logger.addHandler(logging._handlers['console'])
rvi_server.run()
elif sys.argv[1] in ('start', 'st'):
rvi_server.start()
elif sys.argv[1] in ('stop', 'sp'):
rvi_server.stop()
elif sys.argv[1] in ('restart', 're'):
rvi_server.restart()
else:
print "RVI Server: Unknown command."
usage()
sys.exit(2)
else:
usage()
sys.exit(2)
| rstreif/rvi_backend | server/rviserver.py | Python | mpl-2.0 | 9,882 |
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from ansible import errors
from ansible import utils
class Task(object):
__slots__ = [
'name', 'action', 'only_if', 'async_seconds', 'async_poll_interval',
'notify', 'module_name', 'module_args', 'module_vars', 'play', 'notified_by',
]
def __init__(self, play, ds, module_vars=None):
''' constructor loads from a task or handler datastructure '''
# TODO: more error handling
# include task specific vars
self.module_vars = module_vars
self.play = play
self.name = ds.get('name', None)
self.action = ds.get('action', '')
self.notified_by = []
if self.name is None:
self.name = self.action
self.only_if = ds.get('only_if', 'True')
self.async_seconds = int(ds.get('async', 0)) # not async by default
self.async_poll_interval = int(ds.get('poll', 10)) # default poll = 10 seconds
self.notify = ds.get('notify', [])
if isinstance(self.notify, basestring):
self.notify = [ self.notify ]
tokens = self.action.split(None, 1)
if len(tokens) < 1:
raise errors.AnsibleError("invalid/missing action in task")
self.module_name = tokens[0]
self.module_args = ''
if len(tokens) > 1:
self.module_args = tokens[1]
self.name = utils.template(self.name, self.module_vars)
self.action = utils.template(self.name, self.module_vars)
if 'first_available_file' in ds:
self.module_vars['first_available_file'] = ds.get('first_available_file')
| tbielawa/ansible | lib/ansible/playbook/task.py | Python | gpl-3.0 | 2,376 |
# Copyright 2013 Pawel Daniluk, Bartek Wilczynski
#
# This file is part of WeBIAS.
#
# WeBIAS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# WeBIAS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with WeBIAS. If not, see
# <http://www.gnu.org/licenses/>.
import webias.gnosis.xml.objectify as objectify
from genshi.template import NewTextTemplate
class XMLTree():
def __init__(self, xml_string):
self.tree=objectify.make_instance(xml_string, p=objectify.DOM)
for e in objectify.walk_xo(self.tree):
try:
e.type
except:
e.type='value'
def path(self, o):
all=list(objectify.walk_xo(self.tree))
if o not in all:
raise Exception('Object not in query tree')
res=''
par=objectify.parent(o)
if(par != self.tree):
res=self.path(par) + '/'
res+=objectify.tagname(o)
return res
def _get(self, tree, idx, *kwds):
if kwds==():
return tree
else:
l=kwds[0]
try:
(name, nidx)=l.split(':')
except:
(name, nidx)=(l,None)
subtree=tree.__dict__[name]
if idx!=None:
try:
i=iter(subtree)
except:
i=iter([subtree])
subtree=None
for t in i:
if t.index==idx:
subtree=t
break
return self._get(subtree, nidx, *kwds[1:])
def index_set(self, path):
el=self.get(path)
if el==None:
return []
res=list(set([t.index for t in objectify.children(el)]))
res.sort()
return res
def get_search_prefix(self):
try:
return self.search_prefixes[-1]
except:
return ''
def push_search_prefix(self, pref):
sp=self.get_search_prefix()
try:
self.search_prefixes.append(sp+pref)
except:
self.search_prefixes=[sp+pref]
def pop_search_prefix(self):
try:
self.search_prefixes.pop()
except:
pass
def clear_search_prefix(self):
self.search_prefixes=[]
def get(self, path):
try:
prefix=self.get_search_prefix()
except:
prefix=''
try:
return self._get(self.tree, None, *(prefix+path).split('/'))
except:
return None
def walk(self):
for c in objectify.children(self.tree):
# if objectify.tagname(c)=="BIAS_email":
# continue
prev=c
par=c
depth=1
for o in objectify.walk_xo(c):
if o!=c:
if objectify.parent(o) == prev:
depth+=1
par=prev
elif objectify.parent(o) != par:
depth-=1
par=objectify.parent(o)
prev=o
yield (depth, o)
class Query(XMLTree):
def make_command(self, template):
return NewTextTemplate(template).generate(**self.tree.__dict__).render('text').strip()
def walk(self):
for (d,o) in XMLTree.walk(self):
if objectify.tagname(o)=="BIAS_email":
continue
yield (d, o)
class Result(XMLTree):
pass
| pawelld/webias | webias/query.py | Python | agpl-3.0 | 3,961 |
# Copyright (c) 2008-2016 Szczepan Faber, Serhiy Oplakanets, Herr Kaste
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''Spying on real objects.'''
import inspect
from .mockito import when2
from .invocation import RememberedProxyInvocation
from .mocking import Mock, _Dummy, mock_registry
from .utils import get_obj
__all__ = ['spy']
def spy(object):
"""Spy an object.
Spying means that all functions will behave as before, so they will
be side effects, but the interactions can be verified afterwards.
Returns Dummy-like, almost empty object as proxy to `object`.
The *returned* object must be injected and used by the code under test;
after that all interactions can be verified as usual.
T.i. the original object **will not be patched**, and has no further
knowledge as before.
E.g.::
import time
time = spy(time)
# inject time
do_work(..., time)
verify(time).time()
"""
if inspect.isclass(object) or inspect.ismodule(object):
class_ = None
else:
class_ = object.__class__
class Spy(_Dummy):
if class_:
__class__ = class_
def __getattr__(self, method_name):
return RememberedProxyInvocation(theMock, method_name)
def __repr__(self):
name = 'Spied'
if class_:
name += class_.__name__
return "<%s id=%s>" % (name, id(self))
obj = Spy()
theMock = Mock(obj, strict=True, spec=object)
mock_registry.register(obj, theMock)
return obj
def spy2(fn): # type: (...) -> None
"""Spy usage of given `fn`.
Patches the module, class or object `fn` lives in, so that all
interactions can be recorded; otherwise executes `fn` as before, so
that all side effects happen as before.
E.g.::
import time
spy2(time.time)
do_work(...) # nothing injected, uses global patched `time` module
verify(time).time()
Note that builtins often cannot be patched because they're read-only.
"""
if isinstance(fn, str):
answer = get_obj(fn)
else:
answer = fn
when2(fn, Ellipsis).thenAnswer(answer)
| kaste/mockito-python | mockito/spying.py | Python | mit | 3,217 |
"""Decorators marks that a doctest should be skipped, for both python 2 and 3.
The IPython.testing.decorators module triggers various extra imports, including
numpy and sympy if they're present. Since this decorator is used in core parts
of IPython, it's in a separate module so that running IPython doesn't trigger
those imports."""
#-----------------------------------------------------------------------------
# Copyright (C) 2009-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
#-----------------------------------------------------------------------------
# Decorators
#-----------------------------------------------------------------------------
def skip_doctest(f):
"""Decorator - mark a function or method for skipping its doctest.
This decorator allows you to mark a function whose docstring you wish to
omit from testing, while preserving the docstring for introspection, help,
etc."""
f.skip_doctest = True
return f
def skip_doctest_py3(f):
"""Decorator - skip the doctest under Python 3."""
f.skip_doctest = (sys.version_info[0] >= 3)
return f
| Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/IPython/testing/skipdoctest.py | Python | artistic-2.0 | 1,484 |
import numpy as np
import pickle
import os
dim = {}
dim['xyz'] = [0,1,2]
dim['xy' ] = [0,1]
dim['xz' ] = [0,2]
dim['yz' ] = [1,2]
dim['x' ] = [0]
dim['y' ] = [1]
dim['z' ] = [2]
def user_material():
pass
class Material(object):
def save(self):
print(self._outdir)
"""save class as self.name.txt"""
try:
os.makedirs(self._outdir)
except:
pass
d = self
d.function = None
d.delta_function = None
d.delta_smooth_function = None
f = open(os.path.join(self._outdir,self.name + '.saved'),'w')
pickle.dump(d, f)
f.close()
def _set_vacuum(self):
if self.normalized_vacuum:
self.eo = 1.0
self.mo = 1.0
else:
self.eo = 8.854187817e-12
self.mo = 4e-7*np.pi
self.co = 1.0/np.sqrt(self.eo*self.mo)
self.zo = np.sqrt(self.mo/self.eo)
return self.eo,self.mo
def _unpack_options(self,options={}):
# first add the options passed to the function
for key in options:
setattr(self,key,options[key])
# unpack options from self.options{}
for key in self.options:
setattr(self,key,self.options[key])
def dump(self):
for attr in sorted(dir(self)):
if not attr.startswith('_'):
print("%s = %s" % (attr, getattr(self, attr)))
def _dump_to_latex(self):
from tabulate import tabulate
strt = r'\begin{table}[h!]' + '\n' + r'\centering' + '\n' + r'\begin{tabular}[cl]' + '\n' + r'\hline' + '\n'
strt = strt + r'variable & value(s) \\' + '\n' + r'\hline' +'\n'
for attr in sorted(dir(self)):
if not attr.startswith('_'):
s = getattr(self,attr)
if isinstance(s, str):
strt = strt + '\t' + r'\verb+' + attr + '+ \t' + r'&' + '\t' + s + r' \\' + '\n'
elif isinstance(s,float):
strt = strt + '\t' + r'\verb+' + attr + '+ \t' + r'&' + '\t' + str(s) + r' \\' + '\n'
elif isinstance(s,bool):
strt = strt + '\t' + r'\verb+' + attr + '+ \t' + r'&' + '\t' + str(s) + r' \\' + '\n'
else:
try:
len(s)
strt = strt + '\t' + r'\multicolumn{1}{c}\multirow{'+str(np.shape(s)[0])+r'}{*}{\verb+' + attr + r'+}' + '\t' + r'&' + '\t'
for k in range(np.shape(s)[0]):
strt = strt + str(s[k]) + r' \\'
strt = strt + '\n'
except:
if ('function' in str(s)): s=str(s).split('function ')[1].split('at')[0]
if ('method' in str(s)): s=str(s).split('method')[1].split('.')[1].split('of')[0]
if ('ufunc' in str(s)): s=str(s).split('ufunc ')[1].split('>')[0]
strt = strt + '\t' + r'\verb+' + attr + '+ \t' + r'&' + '\t' + str(s) + r' \\' + '\n'
strt = strt + r'\end{tabular}' + '\n' + r'\end{table}' + '\n'
import uuid
try:
os.makedirs(self._outdir)
except:
pass
f = open(os.path.join(self._outdir,'_material_'+str(uuid.uuid1())+'.tex'),'a')
f.write(strt)
f.close()
def _dump(self,obj):
for attr in sorted(dir(obj)):
try:
print("%s = %s" % (attr, getattr(obj, attr)))
except:
pass
def plot(self,eta):
import matplotlib
# set matplotlib to work over X-forwarding
matplotlib.use('Agg')
from matplotlib import pylab as plt
plt.figure()
plt.pcolormesh(eta)
plt.draw()
plt.savefig('./debug.png',dpi=320)
def setaux_lower(self,state,dim,t,qbc,auxbc,num_ghost):
grid = state.grid
grid.compute_c_centers_with_ghost(num_ghost,recompute=True)
if state.num_dim==1:
x = grid.x.centers_with_ghost[:num_ghost]
auxbc[:,:num_ghost] = self.function(x,t)
elif state.num_dim==2:
x = grid._c_centers_with_ghost[0]
y = grid._c_centers_with_ghost[1]
if dim.name==state.grid.dimensions[0].name:
x = x[:num_ghost,:]
y = y[:num_ghost,:]
auxbc[:,:num_ghost,:] = self.function(x,y,t)
else:
x = x[:,:num_ghost]
y = y[:,:num_ghost]
auxbc[:,:,:num_ghost] = self.function(x,y,t)
elif state.num_dim==3:
x = grid._c_centers_with_ghost[0]
y = grid._c_centers_with_ghost[1]
z = grid._c_centers_with_ghost[2]
if dim.name==state.grid.dimensions[0].name:
x = x[:num_ghost,:,:]
y = y[:num_ghost,:,:]
z = z[:num_ghost,:,:]
auxbc[:,:num_ghost,:,:] = self.function(x,y,z,t)
elif dim.name==state.grid.dimensions[1].name:
x = x[:,:num_ghost,:]
y = y[:,:num_ghost,:]
z = z[:,:num_ghost,:]
auxbc[:,:,:num_ghost,:] = self.function(x,y,z,t)
elif dim.name==state.grid.dimensions[2].name:
x = x[:,:,:num_ghost]
y = y[:,:,:num_ghost]
z = z[:,:,:num_ghost]
auxbc[:,:,:,:num_ghost] = self.function(x,y,z,t)
return auxbc
def setaux_upper(self,state,dim,t,qbc,auxbc,num_ghost):
grid = state.grid
grid.compute_c_centers_with_ghost(num_ghost,recompute=True)
if state.num_dim==1:
x = grid.x.centers_with_ghost[-num_ghost:]
auxbc[:,-num_ghost:] = self.function(x,t)
elif state.num_dim==2:
x = grid._c_centers_with_ghost[0]
y = grid._c_centers_with_ghost[1]
if dim.name==state.grid.dimensions[0].name:
x = x[-num_ghost:,:]
y = y[-num_ghost:,:]
auxbc[:,-num_ghost:,:] = self.function(x,y,t)
else:
x = x[:,-num_ghost:]
y = y[:,-num_ghost:]
auxbc[:,:,-num_ghost:] = self.function(x,y,t)
elif state.num_dim==3:
x = grid._c_centers_with_ghost[0]
y = grid._c_centers_with_ghost[1]
z = grid._c_centers_with_ghost[2]
if dim.name==state.grid.dimensions[0].name:
x = x[-num_ghost:,:,:]
y = y[-num_ghost:,:,:]
z = z[-num_ghost:,:,:]
auxbc[:,-num_ghost:,:,:] = self.function(x,y,z,t)
elif dim.name==state.grid.dimensions[1].name:
x = x[:,-num_ghost:,:]
y = y[:,-num_ghost:,:]
z = z[:,-num_ghost:,:]
auxbc[:,:,-num_ghost:,:] = self.function(x,y,z,t)
elif dim.name==state.grid.dimensions[2].name:
x = x[:,:,-num_ghost:]
y = y[:,:,-num_ghost:]
z = z[:,:,-num_ghost:]
auxbc[:,:,:,-num_ghost:] = self.function(x,y,z,t)
return auxbc
def update_aux(self,solver,state):
grid = state.grid
grid.compute_c_centers()
t = state.t
if state.num_dim==1:
x = grid.x.centers
state.aux = self.function(x,t)
if state.num_dim==2:
x,y = grid._c_centers
state.aux = self.function(x,y,t)
if state.num_dim==3:
x,y,z = grid._c_centers
state.aux = self.function(x,y,z,t)
return state
def impose_metal(self,solver,state):
if self.update_at_each_stage:
self.update_aux(solver,state)
if state.num_dim==2:
grid = state.grid
x,y = grid.c_centers
for k in range(0,len(self.metal_corners)):
xi1,xi2 = self.metal_corners[k,:,0]
yi1,yi2 = self.metal_corners[k,:,1]
state.q[0:2,(x>=xi1)*(x<=xi2)*(y>=yi1)*(y<=yi2)] = 0.0
return state
def init(self,state):
grid = state.grid
grid._compute_c_centers()
t = state.t
if state.num_dim==1:
x = grid.x.centers
state.aux = self.function(x,t)
if state.num_dim==2:
x,y = grid._c_centers
state.aux = self.function(x,y,t)
if state.num_dim==3:
x,y,z = grid._c_centers
state.aux = self.function(x,y,z,t)
return state
def _get_vibrate(self,x=0.0,y=0.0,span=None,t=0.0):
w = self.delta_function(self.delta_omega*t)
dw = self.delta_sign_dt*self.delta_omega*self.delta_function_dt(self.delta_omega*t)
if self.delta_smooth:
s = self.delta_smooth_function(x,y)
w = s*w
dw = s*dw
if span is not None:
w = w*span
dw = dw*span
return w,dw
def general_setup(self,options={}):
self._unpack_options(options=options)
self._set_vacuum()
temp_flag = False
if self.custom:
self.custom_function = user_material
if self.shape=='homogeneous':
self.function = self._homogeneous
if self.shape.startswith('moving'):
self.offset = np.zeros([self.num_dim,self.num_aux/2])
self.delta_velocity = np.zeros([self.num_dim,self.num_aux/2])
self._moving = True
self.update_at_each_stage = True
if 'gauss' in self.shape:
self.function = self._gaussian_rip
if 'tanh' in self.shape:
self.function = self._tanh_rip
self.offset[0,:].fill(10.0)
self.delta_velocity[0,:].fill(0.59)
if 'gauss' in self.shape:
temp_flag = True
if 'tanh' in self.shape:
temp_flag = True
self.temp_flag = temp_flag
if temp_flag:
self.delta_sigma = 5.0*np.ones([self.num_dim,self.num_aux//2])
self.relative_amplitude = 0.1*np.ones([self.num_aux//2])
self.delta_eta = self.relative_amplitude*self.bkg_eta
self.em_equal = True
self.delta_sign = 1.0
if not self._moving:
self.function = self._gaussian
self.delta_sign = 1.0
self._rip_precalc = False
if self.shape.startswith('fiber'):
self.fiber_eta = np.ones([self.num_aux//2])
self.fiber_corner = np.zeros([self.num_dim])
self.fiber_corner[0] = -5.0
self.fiber_width = 5.0
self.fiber_length = 100.0
if self.shape=='fiber single':
self.function = self._single_fiber
if self.shape=='fiber double':
self.fiber_eta = np.ones([2,self.num_aux//2])
self.fiber_corner = np.zeros([2,self.num_dim])
self.fiber_width = 5.0*np.ones([self.num_aux//2])
self.fiber_length = 100.0*np.ones([self.num_aux//2])
self.function = self._double_fiber
if self.shape=='fiber vibrate':
self.delta_width = 5.0
self.delta_length = 5.0
self.delta_corner = np.zeros([self.num_dim])
self.delta_corner[0] = 5.0
self.delta_eta = np.ones([self.num_aux//2])
self.delta_smooth = False
self.delta_omega = 2.0*np.pi
self.delta_function = np.cos
self.delta_function_dt = np.sin
self.delta_sign_dt = -1.0
self.delta_angular_velocity = None
self.delta_smooth_function = self._gaussianf
self.delta_smooth_width = 5.0
self.delta_smooth_length = 5.0
self.delta_smooth_np = 2.0*np.ones([self.num_dim])
self.function = self._oscillate
self.update_at_each_stage = True
if self.shape=='expansion':
self.delta_radii = 5.0*np.ones([self.num_aux//2])
self.delta_expansion_rate = np.zeros([self.num_aux//2])
self.delta_velocity = np.zeros([self.num_dim,self.num_aux//2])
self.offset = np.zeros([self.num_dim,self.num_aux//2])
self.delta_sigma = np.ones([self.num_dim,self.num_aux//2])
self.delta_eta = 0.1*np.ones([self.num_aux//2])
self.update_at_each_stage = True
self.delta_sign = 1.0
self.offset[0,:].fill(10.0)
self.dim = 'xy'
self.function = self._expanding
if self.nonlinear:
self.chi2 = np.zeros( [self.num_aux//2], order='F')
self.chi3 = np.zeros( [self.num_aux//2], order='F')
if self.metal:
self.metal_corners = []
return
def __init__(self,normalized=True,shape='homogeneous'):
self.normalized_vacuum = normalized
self.shape = shape
self.custom = False
self.averaged = False
self.nonlinear = True
self.bkg_eta = np.ones([self.num_aux//2])
self.delta = np.ones([self.num_aux//2])
self.bkg_n = np.ones([self.num_dim])
self.n_max = np.ones([self.num_dim])
self._outdir = './_output'
self.name = 'material'
self._moving = False
self.v = 1.0
self.update_at_each_stage = False
self.function = None
class Material1D(Material):
def setup(self,options={}):
self._unpack_options(options=options)
self._set_vacuum()
self.bkg_n = np.sqrt(self.bkg_e*self.bkg_h)
self.n_max = self.bkg_n
temp_flag = False
if self.custom:
self.custom_function = user_material
if self.shape=='homogeneous':
self.function = self._homogeneous
if self.shape.startswith('moving'):
self.delta_velocity_e = 0.59
self.delta_velocity_h = self.delta_velocity_e
self.offset_e = 10.0
self.offset_h = self.offset_e
self._moving = True
if 'gauss' in self.shape:
self.function = self._gaussian_rip
if 'tanh' in self.shape:
self.function = self._tanh_rip
if 'gauss' in self.shape:
temp_flag = True
if 'tanh' in self.shape:
temp_flag = True
self.temp_flag = temp_flag
if temp_flag:
self.sigma_e = 5.0
self.sigma_h = self.sigma_e
self.relative_amplitude = 0.1
self.delta_n = self.relative_amplitude*(self.bkg_n)
self.delta_e = self.delta_n
self.delta_h = self.delta_n
self.em_equal = True
if not self._moving:
self.function = self._gaussian
self._rip_precalc = False
if self.shape=='vibrate':
self.delta_length = 5.0
self.delta_corner = 5.0
self.delta_e = 1.0
self.delta_h = 1.0
self.delta_smooth = False
self.delta_omega = 2.0*np.pi
self.delta_function = np.cos
self.delta_function_dt = np.sin
self.delta_sign_dt = -1.0
self.delta_smooth_function = self._gaussianf
self.function = self._oscillate
if self.shape=='farago':
self.function = self._farago
if self.nonlinear:
self.chi2_e = 0.0
self.chi3_e = 0.0
self.chi2_m = 0.0
self.chi3_m = 0.0
return
def _farago(self,x,t):
eta = np.zeros( [4,len(x)], order='F')
eta[0,:].fill((1.0+t)**2)
eta[1,:].fill(1.0)
eta[2,:].fill(2.0*(1.0+t))
eta[3,:].fill(0.0)
return eta
def _x_w_offset(self,x,v=[0.0,0.0],t=0.0):
u_x_e = x - v[0]*t - self.offset_e
u_x_m = x - v[1]*t - self.offset_h
return u_x_e,u_x_m
def _gaussian_rip(self,x,t):
eta = np.zeros( [4,len(x)], order='F')
u_x_e,u_x_m = self._x_w_offset(x,v=[self.delta_velocity_e,self.delta_velocity_h],t=t)
u_e_t = 2.0*((self.delta_velocity_e*u_x_e)/(self.sigma_e**2))
u_m_t = 2.0*((self.delta_velocity_h*u_x_m)/(self.sigma_h**2))
u_e = (u_x_e/self.sigma_e)**2
u_m = (u_x_m/self.sigma_h)**2
if self.averaged:
from scipy.special import erf
ddx = self._dx/2.0
arg1_e = (ddx - u_x_e)/self.sigma_e
arg2_e = (ddx + u_x_e)/self.sigma_e
arg1_m = (ddx - u_x_m)/self.sigma_h
arg2_m = (ddx + u_x_m)/self.sigma_h
eta[0,:] = (1/self._dx)*(2.0*ddx*self.bkg_e + 0.5*self.delta_e*np.sqrt(np.pi)*self.sigma_e*(erf(arg1_e)+erf(arg2_e)))
eta[1,:] = (1/self._dx)*(2.0*ddx*self.bkg_h + 0.5*self.delta_h*np.sqrt(np.pi)*self.sigma_h*(erf(arg1_m)+erf(arg2_m)))
else:
eta[0,:] = self.delta_e*np.exp(-u_e) + self.bkg_e
eta[1,:] = self.delta_h*np.exp(-u_m) + self.bkg_h
eta[2,:] = u_e_t*self.delta_e*np.exp(-u_e)
eta[3,:] = u_m_t*self.delta_h*np.exp(-u_m)
return eta
def _gaussian(self,x,t=0):
eta = np.zeros( [4,len(x)], order='F')
u_x_e,u_x_m = self._x_w_offset(x,)
u_e = (u_x_e/selfsigma_e)**2
u_m = (u_x_m/selfsigma_h)**2
eta[0,:] = self.delta_e*np.exp(-u_e) + self.bkg_e
eta[1,:] = self.delta_h*np.exp(-u_m) + self.bkg_h
return eta
def _gaussianf(self,x):
u = x - self.delta_corner + self.delta_length/2.0
r2 = u**2/self.delta_length**2
g = np.exp(-r2)
return g
def _oscillate(self,x,t=0):
eta = np.zeros( [4,x.shape[0]], order='F')
xid = self.delta_corner
spand = ((x>=xid)*(x<=(xid+self.delta_length)))
w,dw = self._get_vibrate(span=spand,t=t)
eta[0,:] = self.bkg_e + self.delta_e*w
eta[1,:] = self.bkg_h + self.delta_h*w
eta[2,:] = self.delta_e*dw
eta[3,:] = self.delta_h*dw
return eta
def _tanh_rip(self,x,t):
eta = np.zeros( [4,len(x)], order='F')
u_x_e,u_x_m = self._x_w_offset(x,v=[self.delta_velocity_e,self.delta_velocity_h],t=t)
eta[0,:] = (self.delta_e/2.0)*(1.0 + np.tanh(u_x_e)) + self.bkg_e
eta[1,:] = (self.delta_h/2.0)*(1.0 + np.tanh(u_x_m)) + self.bkg_h
eta[2,:] = -(self.delta_e*self.delta_velocity_e/(2.0*self.sigma_e))/(np.cosh(u_x_e)**2)
eta[3,:] = -(self.delta_h*self.delta_velocity_h/(2.0*self.sigma_h))/(np.cosh(u_x_m)**2)
return eta
def _homogeneous(self,x,t=0):
eta = np.zeros( [4,len(x)], order='F')
eta[0,:] = self.bkg_e
eta[1,:] = self.bkg_h
return eta
def _calculate_n(self):
eta = self.bkg_eta
if hasattr(self,'fiber_eta'):
eta = eta + self.fiber_eta
if hasattr(self,'delta_eta'):
eta = eta + self.delta_eta
self.bkg_n = np.sqrt(self.bkg_eta[0]*self.bkg_eta[2])
self.n_max = np.sqrt(eta[0]*eta[2])
return
def __init__(self,normalized=True,shape='homogeneous'):
self.num_aux = 4
self.num_dim = 1
self.options = {}
super(Material1D,self).__init__(normalized,shape)
self.dim = 'x'
self.bkg_e = 1.0
self.bkg_h = 1.0
self._dx = 1
class Material2D(Material):
def setup(self,options={}):
self.general_setup()
def _calculate_n(self):
eta = self.bkg_eta
if hasattr(self,'fiber_eta'):
if len(self.fiber_eta)==1:
eta = eta + self.fiber_eta
else:
eta = eta + self.fiber_eta[0]
if hasattr(self,'delta_eta'):
eta = eta + self.delta_eta
self.bkg_n[0] = np.sqrt(self.bkg_eta[0]*self.bkg_eta[2])
self.bkg_n[1] = np.sqrt(self.bkg_eta[1]*self.bkg_eta[2])
self.n_max[0] = np.sqrt(eta[0]*eta[2])
self.n_max[1] = np.sqrt(eta[1]*eta[2])
return
def _gaussian_rip(self,x,y,t):
eta = np.zeros( [6,x.shape[0],y.shape[1]], order='F')
_r2 = np.zeros( [3,x.shape[0],y.shape[1]], order='F')
_rt = np.zeros( [3,x.shape[0],y.shape[1]], order='F')
u = v = False
if self.dim=='x': u = True
if self.dim=='y': v = True
if self.dim=='xy': u = v = True
if u:
for i in range(0,3):
_temp1 = x - self.offset[0,i] - self.delta_velocity[0,i]*t
_r2[i] = (_temp1/self.delta_sigma[0,i])**2
_rt[i] = (_temp1*self.delta_velocity[0,i])/(self.delta_sigma[0,i]**2)
if v:
for i in range(0,3):
_temp2 = y - self.offset[1,i] - self.delta_velocity[1,i]*t
_r2[i] = _r2[i] + (_temp2/self.delta_sigma[1,i])**2
_rt[i] = _rt[i] + (_temp2*self.delta_velocity[1,i])/(self.delta_sigma[1,i]**2)
_r2 = np.exp(-_r2)
_rt = 2.0*_r2*_rt
for i in range(0,3):
eta[i ] = self.delta_sign*self.delta_eta[i]*_r2[i] + self.bkg_eta[i]
eta[i+3] = self.delta_sign*self.delta_eta[i]*_rt[i]
return eta
def _averaged_gauss(self,x,dx=None,s=1.0,xo=0.0,v=0.0,t=0.0):
from scipy.special import erf
arg = xo + v*t - x
if dx is None:
dx = self._dx
ddx = dx/2.0
erravg = (np.sqrt(np.pi)*s*(erf((ddx + arg)/s) + erf((ddx - arg)/s)))/(2.0*dx)
return erravg
def _gaussian_rip_averaged(self,x,y,t=0):
eta = np.zeros( [6,x.shape[0],y.shape[1]], order='F')
_r2 = np.ones( [3,x.shape[0],y.shape[1]], order='F')
_rp = np.ones( [3,x.shape[0],y.shape[1]], order='F')
_rt = np.zeros( [3,x.shape[0],y.shape[1]], order='F')
u = v = False
if self.dim=='x': u = True
if self.dim=='y': v = True
if self.dim=='xy': u = v = True
if u:
for i in range(0,3):
_r2[i] = self._averaged_gauss(x,s=self.delta_sigma[0,i],xo=self.offset[0,i],v=self.delta_velocity[0,i],t=t)
_temp1 = x - self.offset[0,i] - self.delta_velocity[0,i]*t
_rp[i] = (_temp1/self.delta_sigma[0,i])**2
_rt[i] = (_temp1*self.delta_velocity[0,i])/(self.delta_sigma[0,i]**2)
if v:
for i in range(0,3):
_r2[i] = (_r2[i])*self._averaged_gauss(y,s=self.delta_sigma[1,i],xo=self.offset[1,i],v=self.delta_velocity[1,i],t=t)
_temp2 = y - self.offset[1,i] - self.delta_velocity[1,i]*t
_rp[i] = _rp[i] + (_temp2/self.delta_sigma[1,i])**2
_rt[i] = _rt[i] + (_temp2*self.delta_velocity[1,i])/(self.delta_sigma[1,i]**2)
_rp = np.exp(-_rp)
_rt = 2.0*_rp*_rt
for i in range(0,3):
eta[i ] = self.delta_eta[i]*_r2[i] + self.bkg_eta[i]
eta[i+3] = self.delta_eta[i]*_rt[i]
return eta
def _gaussianf(self,x,y):
u = x - (self.delta_corner[0] + self.delta_smooth_length/2.0)
v = y - (self.delta_corner[1] + self.delta_smooth_width/2.0)
r2 = u**2/self.delta_length**2 + v**2/self.delta_width**2
g = np.exp(-r2)
return g
def _sinsin(self,x,y):
l = self.delta_smooth_length
w = self.delta_smooth_width
s = np.sin(self.delta_smooth_np[0]*x*np.pi/l)*np.sin(self.delta_smooth_np[1]*y*np.pi/w)
s = s*(((x/l)<=1.0)*((y/w)<=1.0))
return s
def _rotating_sinsin(self,x,y,t):
p = self.delta_angular_velocity
o = self.delta_omega
l = self.delta_smooth_length
w = self.delta_smooth_width
xo = l/2
yo = w/2
x = self.delta_smooth_np[0]*(x-xo)*np.pi/xo
y = self.delta_smooth_np[1]*(y-yo)*np.pi/yo
s = np.sin(o*t)*(np.sin(x*np.cos(p*t) - y*np.sin(p*t))*np.sin(x*np.sin(p*t) + y*np.cos(p*t)))
ds = -p*np.cos(x*np.cos(p*t) - y*np.sin(p*t))*np.sin(o*t)*(y*np.cos(p*t) + x*np.sin(p*t))* \
np.sin(y*np.cos(p*t) + x*np.sin(p*t)) + (p*np.cos(y*np.cos(p*t) + x*np.sin(p*t))* \
np.sin(o*t)*(x*np.cos(p*t) - y*np.sin(p*t)) + o*np.cos(o*t)*np.sin(y*np.cos(p*t) + x*np.sin(p*t)))* \
np.sin(x*np.cos(p*t) - y*np.sin(p*t))
return s,ds
def _gaussian(self,x,y,t=0):
eta = np.zeros( [6,x.shape[0],y.shape[1]], order='F')
_r2 = np.zeros( [3,x.shape[0],y.shape[1]], order='F')
u = v = False
if self.dim=='x': u = True
if self.dim=='y': v = True
if self.dim=='xy': u = v = True
if u:
for i in range(0,3): _r2[i] = ((x - self.offset[0,i])/self.sigma[0,i])**2
if v:
for i in range(0,3): _r2[i] = _r2[i] + ((y - self.offset[0,i])/self.sigma[0,i])**2
for i in range(0,3): eta[i] = self.delta_eta[i]*np.exp(-_r2[i]) + self.bkg_eta[i]
return eta
def _tanh_rip(self,x,t):
eta = np.zeros( [6,x.shape[0],y.shape[1]], order='F')
_r2 = np.zeros( [3,x.shape[0],y.shape[1]], order='F')
u = v = False
if self.dim=='x': u = True
if self.dim=='y': v = True
if self.dim=='xy': u = v = True
if u:
for i in range(0,3): _r2[i] = (x - self.delta_velocity[0,i] - self.offset[0,i])/self.delta_sigma[0,i]
_rt = self.delta_velocity[0,:]/(2.0*self.delta_sigma[0,:])
if v:
for i in range(0,3): _r2[i] = _r2[i] + (y - self.delta_velocity[1,i] - self.offset[1,i])/self.delta_sigma[1,i]
_rt = _rt + self.delta_velocity[1,:]/(2.0*self.delta_sigma[1,:])
for i in range(0,3):
eta[i] = (self.delta_eta[i]/2.0)*(1.0 + np.tanh(_r2[i])) + self.bkg_eta[i]
eta[i+3] = -(self.delta_eta[i]*_rt[i])*(1.0/np.cosh(_r2[i])**2)
return eta
def _homogeneous(self,x,y,t=0):
eta = np.zeros( [6,x.shape[0],y.shape[1]], order='F')
for i in range(0,3): eta[i] = self.bkg_eta[i]
return eta
def _single_fiber(self,x,y,t=0):
eta = np.zeros( [6,x.shape[0],y.shape[1]], order='F')
yi = self.fiber_corner[1]
xi = self.fiber_corner[0]
spany = (y>=yi)*(y<=(yi + self.fiber_width ))
spanx = (x>=xi)*(y<=(xi + self.fiber_length))
span = spanx*spany
for i in range(0,3): eta[i] = self.bkg_eta[i] + self.fiber_eta[i]*span
return eta
def _double_fiber(self,x,y,t=0):
eta = np.zeros( [6,x.shape[0],y.shape[1]], order='F')
y1i,y2i = self.fiber_corner[:,1]
x1i,x2i = self.fiber_corner[:,0]
span1y = ((y>=y1i)*(y<=(y1i+self.fiber_width[0])))
span2y = ((y>=y2i)*(y<=(y2i+self.fiber_width[1])))
span1x = ((x>=x1i)*(x<=(x1i+self.fiber_length[0])))
span2x = ((x>=x2i)*(x<=(x2i+self.fiber_length[1])))
span1 = span1y*span1x
span2 = span2y*span2x
for i in range(0,3): eta[i] = self.bkg_eta[i] + self.fiber_eta[0,i]*span1 + self.fiber_eta[1,i]*span2
return eta
def _oscillate(self,x,y,t=0):
eta = np.zeros( [6,x.shape[0],y.shape[1]], order='F')
xi,yi = self.fiber_corner
span = ((y>=yi)*(y<=(yi+self.fiber_width)))*((x>=xi)*(x<=(xi+self.fiber_length)))
xid,yid = self.delta_corner
spand = ((y>=yid)*(y<=(yid+self.delta_width)))*((x>=xid)*(x<=(xid+self.delta_length)))
if self.delta_angular_velocity is None:
w,dw = self._get_vibrate(x=x,y=y,span=spand,t=t)
else:
w,dw = self._rotating_sinsin(x,y,t)
w = w*spand
dw = dw*spand
for i in range(0,3):
eta[i] = self.bkg_eta[i] + self.fiber_eta[i]*span + self.delta_eta[i]*w
eta[i+3] = self.delta_eta[i]*dw
return eta
def _expanding(self,x,y,t=0.0):
for k in range(2): self.delta_sigma[k,:].fill(self.delta_radii[k]/((1+self.delta_expansion_rate[k]*t)))
eta = self._gaussian_rip(x,y,t)
return eta
def __init__(self,normalized=True,shape='homogeneous',metal=False):
self.num_aux = 6
self.num_dim = 2
self.options = {}
super(Material2D,self).__init__(normalized,shape)
self.dim = 'x'
self.metal = metal
self._dx = 1.0
self._dy = 1.0
class Material3D(Material):
def setup(self,options={}):
self.general_setup()
def _calculate_n(self):
eta = self.bkg_eta
if hasattr(self,'fiber_eta'):
eta = eta + self.fiber_eta
if hasattr(self,'delta_eta'):
eta = eta + self.delta_eta
self.bkg_n[0] = np.sqrt(self.bkg_eta[0]*self.bkg_eta[3])
self.bkg_n[1] = np.sqrt(self.bkg_eta[1]*self.bkg_eta[4])
self.bkg_n[2] = np.sqrt(self.bkg_eta[2]*self.bkg_eta[5])
self.n_max[0] = np.sqrt(eta[0]*eta[3])
self.n_max[1] = np.sqrt(eta[1]*eta[4])
self.n_max[2] = np.sqrt(eta[2]*eta[5])
return eta
def _gaussian_rip(self,x,y,z,t=0):
grid = [x,y,z]
eta = np.zeros( [12,x.shape[0],y.shape[1],z.shape[2]], order='F')
_r2 = np.zeros( [ 6,x.shape[0],y.shape[1],z.shape[2]], order='F')
_rt = np.zeros( [ 6,x.shape[0],y.shape[1],z.shape[2]], order='F')
dims = dim[self.dim]
if len(self.dim)==1:
p = dims[0]
for i in range(0,6):
_temp1 = grid[p] - self.offset[p,i] - self.delta_velocity[p,i]*t
_r2[i] = (_temp1/self.delta_sigma[p,i])**2
_rt[i] = (_temp1*self.delta_velocity[p,i])/(self.delta_sigma[p,i]**2)
if len(self.dim)==2:
p,q = dims
for i in range(0,6):
_temp1 = grid[p] - self.offset[p,i] - self.delta_velocity[p,i]*t
_temp2 = grid[q] - self.offset[q,i] - self.delta_velocity[q,i]*t
_r2[i] = (_temp1/self.delta_sigma[p,i])**2 + (_temp2/self.delta_sigma[q,i])**2
_rt[i] = (_temp1*self.delta_velocity[p,i])/(self.delta_sigma[p,i]**2) + \
(_temp2*self.delta_velocity[q,i])/(self.delta_sigma[q,i]**2)
if len(self.dim)==3:
p,q,r = dims
for i in range(0,6):
_temp1 = grid[p] - self.offset[p,i] - self.delta_velocity[p,i]*t
_temp2 = grid[q] - self.offset[q,i] - self.delta_velocity[q,i]*t
_temp3 = grid[r] - self.offset[r,i] - self.delta_velocity[r,i]*t
_r2[i] = (_temp1/self.delta_sigma[p,i])**2 + (_temp2/self.delta_sigma[q,i])**2 + (_temp3/self.delta_sigma[r,i])**2
_rt[i] = (_temp1*self.delta_velocity[p,i])/(self.delta_sigma[p,i]**2) + \
(_temp2*self.delta_velocity[q,i])/(self.delta_sigma[q,i]**2) + \
(_temp3*self.delta_velocity[r,i])/(self.delta_sigma[r,i]**2)
_r2 = np.exp(-_r2)
_rt = 2.0*_r2*_rt
for i in range(0,6):
eta[i ] = self.delta_eta[i]*_r2[i] + self.bkg_eta[i]
eta[i+6] = self.delta_eta[i]*_rt[i]
return eta
def _gaussianf(self,x,y,z):
u = x - (self.delta_corner[0] + self.delta_smooth_length/2.0)
v = y - (self.delta_corner[1] + self.delta_smooth_width/2.0)
v = z - (self.delta_corner[2] + self.delta_smooth_height/2.0)
r2 = u**2/self.delta_length**2 + v**2/self.delta_width**2 + w**2/self.delta_length**2
g = np.exp(-r2)
return g
def _gaussian(self,x,y,z,t=0):
grid = [x,y,z]
eta = np.zeros( [12,x.shape[0],y.shape[1],z.shape[2]], order='F')
_r2 = np.zeros( [ 6,x.shape[0],y.shape[1],z.shape[2]], order='F')
dims = dim[self.dim]
if len(self.dim)==1:
p = dims[0]
for i in range(0,6):
_r2[i] = ((grid[p] - self.offset[p,i])/self.delta_sigma[p,i])**2
if len(self.dim)==2:
p,q = dims
for i in range(0,6):
_temp1 = ((grid[p] - self.offset[p,i])/self.delta_sigma[p,i])**2
_temp2 = ((grid[q] - self.offset[q,i])/self.delta_sigma[q,i])**2
_r2[i] = _temp1 + _temp2
if len(self.dim)==3:
p,q,r = dims
for i in range(0,6):
_temp1 = ((grid[p] - self.offset[p,i])/self.delta_sigma[p,i])**2
_temp2 = ((grid[q] - self.offset[q,i])/self.delta_sigma[q,i])**2
_temp3 = ((grid[r] - self.offset[r,i])/self.delta_sigma[r,i])**2
_r2[i] = _temp1 + _temp2 + _temp3
_r2 = np.exp(-_r2)
for i in range(0,6): eta[i] = self.delta_eta[i]*_r2[i] + self.bkg_eta[i]
return eta
def _tanh_rip(self,x,y,z,t=0):
grid = [x,y,z]
eta = np.zeros( [12,x.shape[0],y.shape[1],z.shape[2]], order='F')
_r2 = np.zeros( [ 6,x.shape[0],y.shape[1],z.shape[2]], order='F')
_rt = _r2
dims = dim[self.dim]
if len(self.dim)==1:
p = dims[0]
for i in range(0,6):
_r2[i] = ((grid[p] - self.offset[p,i] - self.delta_velocity[p,i]*t)/self.delta_sigma[p,i])
_rt[i] = self.delta_velocity[p,i]/(2.0*self.delta_sigma[p,i]**2)
if len(self.dim)==2:
p,q = dims
for i in range(0,6):
_temp1 = ((grid[p] - self.offset[p,i] - self.delta_velocity[p,i]*t)/self.delta_sigma[p,i])
_temp2 = ((grid[q] - self.offset[q,i] - self.delta_velocity[q,i]*t)/self.delta_sigma[q,i])
_rt[i] = self.delta_velocity[p,i]/(2.0*self.delta_sigma[p,i]) + \
self.delta_velocity[q,i]/(2.0*self.delta_sigma[q,i])
_r2[i] = _temp1 + _temp2
if len(self.dim)==2:
p,q,r = dims
for i in range(0,6):
_temp1 = ((grid[p] - self.offset[p,i] - self.delta_velocity[p,i]*t)/self.delta_sigma[p,i])
_temp2 = ((grid[q] - self.offset[q,i] - self.delta_velocity[q,i]*t)/self.delta_sigma[q,i])
_temp3 = ((grid[r] - self.offset[r,i] - self.delta_velocity[r,i]*t)/self.delta_sigma[r,i])
_rt[i] = self.delta_velocity[p,i]/(2.0*self.delta_sigma[p,i]**2) + \
self.delta_velocity[q,i]/(2.0*self.delta_sigma[q,i]) + \
self.delta_velocity[r,i]/(2.0*self.delta_sigma[r,i])
_r2[i] = _temp1 + _temp2 + _temp3
_r2 = 1.0 + np.tanh(_r2)
_rt = _rt/(np.cosh(r_2)**2)
for i in range(0,6):
eta[i ] = self.delta_eta[i]*_r2[i] + self.bkg_eta[i]
eta[i+6] = self.delta_eta[i]*_rt[i]
return eta
def _homogeneous(self,x,y,z,t=0):
eta = np.zeros( [12,x.shape[0],y.shape[1],z.shape[2]], order='F')
for i in range(0,6): eta[i] = self.bkg_eta[i]
return eta
def _single_fiber(self,x,y,z,t=0):
eta = np.zeros( [12,x.shape[0],y.shape[1],z.shape[2]], order='F')
xi,yi,zi = self.fiber_corner
spany = (y>=yi)*(y<=(yi + self.fiber_width ))
spanx = (x>=xi)*(y<=(xi + self.fiber_length))
spanz = (z>=zi)*(z<=(zi + self.fiber_height))
span = spanx*spany*spanz
for i in range(0,6): eta[i] = self.bkg_eta[i] + self.fiber_eta[i]*span
return eta
def _double_fiber(self,x,y,z,t=0):
eta = np.zeros( [12,x.shape[0],y.shape[1],z.shape[2]], order='F')
y1i,y2i = self.fiber_corner[:,1]
xi1,x2i = self.fiber_corner[:,0]
zi1,z2i = self.fiber_corner[:,2]
span1y = ((y>=y1i)*(y<=(y1i+self.fiber_width[0])))
span2y = ((y>=y2i)*(y<=(y2i+self.fiber_width[1])))
span1x = ((x>=x1i)*(x<=(x1i+self.fiber_length[0])))
span2x = ((x>=x2i)*(x<=(x2i+self.fiber_length[1])))
span1z = ((z>=z1i)*(z<=(z1i+self.fiber_height[0])))
span2z = ((z>=z2i)*(z<=(z2i+self.fiber_height[1])))
span1 = span1y*span1x*span1z
span2 = span2y*span2x*span2z
for i in range(0,6): eta[i] = self.bkg_eta[i] + self.fiber_eta[0,i]*span1 + self.fiber_eta[1,i]*span2
return eta
def _oscillate(self,x,y,z,t=0):
eta = np.zeros( [12,x.shape[0],y.shape[1],z.shape[2]], order='F')
xi,yi,zi = self.fiber_corner
span = ((y>=yi)*(y<=(yi+self.fiber_width)))*((x>=xi)*(x<=(xi+self.fiber_length)))*((z>=zi)*(z<=(zi+self.fiber_height)))
xid,yid,zid = self.delta_corner
spand = ((y>=yid)*(y<=(yid+self.delta_width)))*((x>=xid)*(x<=(xid+self.delta_length)))*((z>=zid)*(z<=(zid+self.delta_height)))
w,dw = self._get_vibrate(span=spand,t=t)
for i in range(0,6): eta[i] = self.bkg_eta[i] + self.fiber_eta[i]*span + self.delta_eta[i]*w
return eta
def __init__(self,normalized=True,shape='homogeneous',metal=False):
self.num_aux = 12
self.num_dim = 3
self.options = {}
super(Material3D,self).__init__(normalized,shape)
self.dim = 'x'
self.metal = metal
self._dx = 1.0
self._dy = 1.0
self._dz = 1.0
| MaxwellGEMS/emclaw | emclaw/utils/materials.py | Python | lgpl-3.0 | 37,566 |
import sqlalchemy as sa
from sqlalchemy_utils import batch_fetch, with_backrefs
from tests import TestCase
class TestBatchFetchDeepRelationships(TestCase):
def create_models(self):
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
class Category(self.Base):
__tablename__ = 'category'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
class Article(self.Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
category_id = sa.Column(sa.Integer, sa.ForeignKey(Category.id))
category = sa.orm.relationship(
Category,
primaryjoin=category_id == Category.id,
backref=sa.orm.backref(
'articles'
)
)
article_tag = sa.Table(
'article_tag',
self.Base.metadata,
sa.Column(
'article_id',
sa.Integer,
sa.ForeignKey('article.id', ondelete='cascade')
),
sa.Column(
'tag_id',
sa.Integer,
sa.ForeignKey('tag.id', ondelete='cascade')
)
)
class Tag(self.Base):
__tablename__ = 'tag'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
articles = sa.orm.relationship(
Article,
secondary=article_tag,
backref=sa.orm.backref(
'tags'
)
)
self.User = User
self.Category = Category
self.Article = Article
self.Tag = Tag
def init_data(self):
articles = [
self.Article(name=u'Article 1'),
self.Article(name=u'Article 2'),
self.Article(name=u'Article 3'),
self.Article(name=u'Article 4'),
self.Article(name=u'Article 5')
]
self.session.add_all(articles)
self.session.flush()
tags = [
self.Tag(name=u'Tag 1'),
self.Tag(name=u'Tag 2'),
self.Tag(name=u'Tag 3')
]
articles[0].tags = tags
articles[3].tags = tags[1:]
category = self.Category(name=u'Category #1')
category.articles = articles[0:2]
category2 = self.Category(name=u'Category #2')
category2.articles = articles[2:]
self.session.add(category)
self.session.add(category2)
self.session.commit()
def test_supports_empty_related_entities(self):
category = self.Category(name=u'Category #1')
self.session.add(category)
self.session.commit()
categories = self.session.query(self.Category).all()
batch_fetch(
categories,
'articles',
'articles.tags'
)
query_count = self.connection.query_count
assert not categories[0].articles
assert self.connection.query_count == query_count
def test_deep_relationships(self):
self.init_data()
categories = self.session.query(self.Category).all()
batch_fetch(
categories,
'articles',
'articles.tags'
)
query_count = self.connection.query_count
categories[0].articles[0].tags
assert self.connection.query_count == query_count
categories[1].articles[1].tags
assert self.connection.query_count == query_count
def test_many_to_many_backref_population(self):
self.init_data()
categories = self.session.query(self.Category).all()
batch_fetch(
categories,
'articles',
with_backrefs('articles.tags'),
)
query_count = self.connection.query_count
tags = categories[0].articles[0].tags
tags2 = categories[1].articles[1].tags
tags[0].articles
tags2[0].articles
names = [article.name for article in tags[0].articles]
assert u'Article 1' in names
assert self.connection.query_count == query_count
| tonyseek/sqlalchemy-utils | tests/batch_fetch/test_deep_relationships.py | Python | bsd-3-clause | 4,368 |
html = """
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Gallery</title>
<meta name="description" content="The HTML5 Herald">
<meta name="author" content="SitePoint">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/css/bootstrap.min.css" integrity="sha384-TX8t27EcRE3e/ihU7zmQxVncDAy5uIKz4rEkgIXeMed4M0jlfIDPvg6uqKI2xXr2" crossorigin="anonymous">
<script src="https://code.jquery.com/jquery-3.5.1.slim.min.js" integrity="sha384-DfXdz2htPH0lsSSs5nCTpuj/zy4C+OGpamoFVy38MVBnE+IbbVYUew+OrCXaRkfj" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/js/bootstrap.bundle.min.js" integrity="sha384-ho+j7jyWK8fNQe+A12Hb8AhRq26LrZ/JpcUGGOn+Y7RsweNrtN/tE3MoK7ZeZDyx" crossorigin="anonymous"></script>
<link rel="stylesheet" href="bootstrap-gallery/styles.css">
<script src="js/lazysizes.min.js" async=""></script>
</head>
<!-- Gallery -->
<!--
Gallery is linked to lightbox using data attributes.
To open lightbox, this is added to the gallery element: {data-toggle="modal" data-target="#exampleModal"}.
To open carousel on correct image, this is added to each image element: {data-target="#carouselExample" data-slide-to="0"}.
Replace '0' with corresponding slide number.
-->
<body>
<div class="row" id="gallery" data-toggle="modal" data-target="#exampleModal">
{% for image in images %}
<div class="col-3 col-lg-2">
<img class="w-100 lazyload" data-src="thumbs/{{image}}" alt="First slide" data-target="#carouselExample" data-slide-to="{{images.index(image)}}">
</div>
{% endfor %}
</div>
<!-- Modal -->
<!--
This part is straight out of Bootstrap docs. Just a carousel inside a modal.
-->
<div class="modal fade" id="exampleModal" tabindex="-1" role="dialog" aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
<div id="carouselExample" class="carousel slide" data-ride="carousel">
<!-- <ol class="carousel-indicators">
<li data-target="#carouselExample" data-slide-to="0" class="active"></li>
{% for image in images %}
<li data-target="#carouselExample" data-slide-to="{{images.index(image)}}"></li>
{% endfor %}
</ol> -->
<div class="carousel-inner">
<div class="carousel-item active">
<img class="d-block w-100" src="large_imgs/{{images[0]}}" alt="First slide">
<div class="carousel-caption d-none d-md-block">
<p>{{images[0]}}</p>
</div>
</div>
{% for image in images[1:] %}
<div class="carousel-item">
<img class="d-block w-100 lazyload" data-src="large_imgs/{{image}}" alt="Second slide">
<div class="carousel-caption d-none d-md-block">
<p>{{image}}</p>
</div>
</div>
{% endfor %}
</div>
<a class="carousel-control-prev" href="#carouselExample" role="button" data-slide="prev">
<span class="carousel-control-prev-icon" aria-hidden="true"></span>
<span class="sr-only">Previous</span>
</a>
<a class="carousel-control-next" href="#carouselExample" role="button" data-slide="next">
<span class="carousel-control-next-icon" aria-hidden="true"></span>
<span class="sr-only">Next</span>
</a>
</div>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
</body>
</html>
""" | btnpushnmunky/pygallerycreator | pygallerycreator/bootstrap_html.py | Python | mit | 3,883 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Tests for validator.api.middleware.ssl """
from __future__ import unicode_literals
import mock
from validator.api.middleware.ssl import SSLMiddleware
from validator.tests.base import ValidatorTestCase
class SSLMiddlewareTestCase(ValidatorTestCase):
""" Tests for class SSLMiddleware """
def setUp(self):
""" Create a SSLMiddleware instance """
super(SSLMiddlewareTestCase, self).setUp()
self.item = SSLMiddleware()
def test_process_request(self):
""" Tests for method process_request """
self.item.external = mock.MagicMock()
input = "MyInput"
expected = "OK"
self.item.external.return_value = "OK"
observed = self.item.process_request(input)
self.assertEqual(expected, observed)
def tearDown(self):
""" Cleanup the SSLMiddleware instance """
super(SSLMiddlewareTestCase, self).tearDown()
self.m.UnsetStubs()
self.m.ResetAll()
| pmverdugo/fiware-validator | validator/tests/api/middleware/test_ssl.py | Python | apache-2.0 | 1,568 |
from datetime import datetime, timedelta
import json
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.datastructures import SortedDict
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from corehq.apps.domain.models import Domain
from dimagi.utils.couch.cache import cache_core
from dimagi.utils.logging import notify_exception
from dimagi.utils.web import json_handler
import corehq.apps.style.utils as style_utils
register = template.Library()
@register.filter
def JSON(obj):
return mark_safe(json.dumps(obj, default=json_handler))
@register.filter
def to_javascript_string(obj):
# seriously: http://stackoverflow.com/a/1068548/8207
return mark_safe(JSON(obj).replace('</script>', '<" + "/script>'))
@register.filter
def BOOL(obj):
try:
obj = obj.to_json()
except AttributeError:
pass
return 'true' if obj else 'false'
@register.filter
def dict_lookup(dict, key):
'''Get an item from a dictionary.'''
return dict.get(key)
@register.filter
def array_lookup(array, index):
'''Get an item from an array.'''
if index < len(array):
return array[index]
@register.simple_tag
def dict_as_query_string(dict, prefix=""):
'''Convert a dictionary to a query string, minus the initial ?'''
return "&".join(["%s%s=%s" % (prefix, key, value) for key, value in dict.items()])
@register.filter
def add_days(date, days=1):
'''Return a date with some days added'''
span = timedelta(days=days)
try:
return date + span
except:
return datetime.strptime(date,'%m/%d/%Y').date() + span
@register.filter
def concat(str1, str2):
"""Concatenate two strings"""
return "%s%s" % (str1, str2)
try:
from resource_versions import resource_versions
except (ImportError, SyntaxError):
resource_versions = {}
@register.simple_tag
def static(url):
resource_url = url
version = resource_versions.get(resource_url)
url = settings.STATIC_URL + url
is_less = url.endswith('.less')
if version and not is_less:
url += "?version=%s" % version
return url
@register.simple_tag
def cachebuster(url):
return resource_versions.get(url, "")
@register.simple_tag
def new_static(url, **kwargs):
"""Caching must explicitly be defined on tags with any of the extensions
that could be compressed by django compressor. The static tag above will
eventually turn into this tag.
:param url:
:param kwargs:
:return:
"""
can_be_compressed = url.endswith(('.less', '.css', '.js'))
use_cache = kwargs.pop('cache', False)
use_versions = not can_be_compressed or use_cache
resource_url = url
url = settings.STATIC_URL + url
if use_versions:
version = resource_versions.get(resource_url)
if version:
url += "?version=%s" % version
return url
@register.simple_tag
def domains_for_user(request, selected_domain=None):
"""
Generate pulldown menu for domains.
Cache the entire string alongside the couch_user's doc_id that can get invalidated when
the user doc updates via save.
"""
domain_list = []
if selected_domain != 'public':
cached_domains = cache_core.get_cached_prop(request.couch_user.get_id, 'domain_list')
if cached_domains:
domain_list = [Domain.wrap(x) for x in cached_domains]
else:
try:
domain_list = Domain.active_for_user(request.couch_user)
cache_core.cache_doc_prop(request.couch_user.get_id, 'domain_list', [x.to_json() for x in domain_list])
except Exception:
if settings.DEBUG:
raise
else:
domain_list = Domain.active_for_user(request.user)
notify_exception(request)
domain_list = [dict(
url=reverse('domain_homepage', args=[d.name]),
name=d.long_display_name()
) for d in domain_list]
context = {
'is_public': selected_domain == 'public',
'domain_list': domain_list,
'current_domain': selected_domain,
}
template = {
style_utils.BOOTSTRAP_2: 'hqwebapp/partials/domain_list_dropdown.html',
style_utils.BOOTSTRAP_3: 'style/includes/domain_list_dropdown.html',
}[style_utils.bootstrap_version(request)]
return mark_safe(render_to_string(template, context))
@register.simple_tag
def list_my_orgs(request):
org_list = request.couch_user.get_organizations()
lst = list()
lst.append('<ul class="nav nav-pills nav-stacked">')
for org in org_list:
default_url = reverse("orgs_landing", args=[org.name])
lst.append('<li><a href="%s">%s</a></li>' % (default_url, org.title))
lst.append('</ul>')
return "".join(lst)
@register.simple_tag
def commcare_user():
return _(settings.COMMCARE_USER_TERM)
@register.simple_tag
def hq_web_user():
return _(settings.WEB_USER_TERM)
@register.filter
def mod(value, arg):
return value % arg
# This is taken from https://code.djangoproject.com/ticket/15583
@register.filter(name='sort')
def listsort(value):
if isinstance(value, dict):
new_dict = SortedDict()
key_list = value.keys()
key_list.sort()
for key in key_list:
new_dict[key] = value[key]
return new_dict
elif isinstance(value, list):
new_list = list(value)
new_list.sort()
return new_list
else:
return value
listsort.is_safe = True
@register.filter(name='getattr')
def get_attribute(obj, arg):
""" Get attribute from obj
Usage: {{ couch_user|getattr:"full_name" }}
"""
return getattr(obj, arg, None)
@register.filter
def pretty_doc_info(doc_info):
return render_to_string('hqwebapp/pretty_doc_info.html', {
'doc_info': doc_info,
})
@register.filter
def toggle_enabled(request, toggle_name):
import corehq.toggles
toggle = getattr(corehq.toggles, toggle_name)
return (
(hasattr(request, 'user') and toggle.enabled(request.user.username)) or
(hasattr(request, 'domain') and toggle.enabled(request.domain))
)
| SEL-Columbia/commcare-hq | corehq/apps/hqwebapp/templatetags/hq_shared_tags.py | Python | bsd-3-clause | 6,328 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (c) 2010-Today Elico Corp. All Rights Reserved.
# Author: LIN Yu <lin.yu@elico-corp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import invoice
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| PierreFaniel/openerp-7.0 | l10n_cn_report_invoice/__init__.py | Python | agpl-3.0 | 1,191 |
import numpy as np
import tensorflow as tf
import connect_data
from neural_network_decision_tree import nn_decision_tree
import time
from sklearn.model_selection import train_test_split
from NNDT_RF import random_forest
x = connect_data.feature
y = connect_data.label
seed = 1990
X_train, X_test, y_train, y_test = train_test_split(x, y, train_size=0.70, random_state=seed)
args = ([X_train, y_train],[X_test, y_test], 10, 100, 10)
start_time = time.time()
pred, test = random_forest(*args)
print('error rate %.5f' % (1 - np.mean(np.argmax(pred, axis=1) == np.argmax(test, axis=1))))
print("--- %s seconds ---" % (time.time() - start_time))
| Knight13/Exploring-Deep-Neural-Decision-Trees | Connect-4/RF.py | Python | unlicense | 651 |
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# (C) British Crown Copyright 2012-5 Met Office.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
#-----------------------------------------------------------------------------
import re
import rose.macro
class DuplicateChecker(rose.macro.MacroBase):
"""Returns settings whose duplicate status does not match their name."""
WARNING_DUPL_SECT_NO_NUM = ('incorrect "duplicate=true" metadata')
WARNING_NUM_SECT_NO_DUPL = ('{0} requires "duplicate=true" metadata')
def validate(self, config, meta_config=None):
"""Return a list of errors, if any."""
self.reports = []
sect_error_no_dupl = {}
sect_keys = config.value.keys()
sorter = rose.config.sort_settings
sect_keys.sort(sorter)
for section in sect_keys:
node = config.get([section])
if not isinstance(node.value, dict):
continue
metadata = self.get_metadata_for_config_id(section, meta_config)
duplicate = metadata.get(rose.META_PROP_DUPLICATE)
is_duplicate = duplicate == rose.META_PROP_VALUE_TRUE
basic_section = rose.macro.REC_ID_STRIP.sub("", section)
if is_duplicate:
if basic_section == section:
self.add_report(section, None, None,
self.WARNING_DUPL_SECT_NO_NUM)
elif section != basic_section:
if basic_section not in sect_error_no_dupl:
sect_error_no_dupl.update({basic_section: 1})
no_index_section = rose.macro.REC_ID_STRIP_DUPL.sub(
"", section)
if no_index_section != section:
basic_section = no_index_section
warning = self.WARNING_NUM_SECT_NO_DUPL
if self._get_has_metadata(metadata, basic_section,
meta_config):
self.add_report(section, None, None,
warning.format(basic_section))
return self.reports
def _get_has_metadata(self, metadata, basic_section, meta_config):
if metadata.keys() != ["id"]:
return True
for meta_keys, meta_node in meta_config.walk(no_ignore=True):
meta_section = meta_keys[0]
if len(meta_keys) > 1:
continue
if ((meta_section == basic_section or
meta_section.startswith(
basic_section + rose.CONFIG_DELIMITER)) and
isinstance(meta_node.value, dict)):
return True
return False
| ScottWales/rose | lib/python/rose/macros/duplicate.py | Python | gpl-3.0 | 3,446 |
from django.shortcuts import render
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth import get_user_model
from .forms import SignUpForm
MyUser = get_user_model()
def home(request):
if request.user.is_authenticated():
return render(request, 'home/feed.html', {})
else:
return render(request, 'home/welcome.html', {})
@csrf_protect
def sign_up(request):
registered = False
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
registered = True
else:
form = SignUpForm()
return render(request, 'home/signup.html', {'form': form, 'registered': registered})
| TheRedLady/codebook | codebook/home/views.py | Python | gpl-3.0 | 720 |
import argparse
import json
import logging
import os
import sys
from tools import localpaths # noqa: flake8
from six import iteritems
from . import virtualenv
here = os.path.dirname(__file__)
wpt_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir))
def load_commands():
rv = {}
with open(os.path.join(here, "paths"), "r") as f:
paths = [item.strip().replace("/", os.path.sep) for item in f if item.strip()]
for path in paths:
abs_path = os.path.join(wpt_root, path, "commands.json")
base_dir = os.path.dirname(abs_path)
with open(abs_path, "r") as f:
data = json.load(f)
for command, props in iteritems(data):
assert "path" in props
assert "script" in props
rv[command] = {
"path": os.path.join(base_dir, props["path"]),
"script": props["script"],
"parser": props.get("parser"),
"parse_known": props.get("parse_known", False),
"help": props.get("help"),
"virtualenv": props.get("virtualenv", True),
"install": props.get("install", []),
"requirements": [os.path.join(base_dir, item)
for item in props.get("requirements", [])]
}
return rv
def parse_args(argv, commands):
parser = argparse.ArgumentParser()
parser.add_argument("--venv", action="store", help="Path to an existing virtualenv to use")
parser.add_argument("--debug", action="store_true", help="Run the debugger in case of an exception")
subparsers = parser.add_subparsers(dest="command")
for command, props in iteritems(commands):
subparsers.add_parser(command, help=props["help"], add_help=False)
args, extra = parser.parse_known_args(argv)
return args, extra
def import_command(prog, command, props):
# This currently requires the path to be a module,
# which probably isn't ideal but it means that relative
# imports inside the script work
rel_path = os.path.relpath(props["path"], wpt_root)
parts = os.path.splitext(rel_path)[0].split(os.path.sep)
mod_name = ".".join(parts)
mod = __import__(mod_name)
for part in parts[1:]:
mod = getattr(mod, part)
script = getattr(mod, props["script"])
if props["parser"] is not None:
parser = getattr(mod, props["parser"])()
parser.prog = "%s %s" % (os.path.basename(prog), command)
else:
parser = None
return script, parser
def setup_virtualenv(path, props):
if path is None:
path = os.path.join(wpt_root, "_venv")
venv = virtualenv.Virtualenv(path)
venv.start()
for name in props["install"]:
venv.install(name)
for path in props["requirements"]:
venv.install_requirements(path)
return venv
def main(prog=None, argv=None):
logging.basicConfig(level=logging.INFO)
if prog is None:
prog = sys.argv[0]
if argv is None:
argv = sys.argv[1:]
commands = load_commands()
main_args, command_args = parse_args(argv, commands)
if not(len(argv) and argv[0] in commands):
sys.exit(1)
command = main_args.command
props = commands[command]
venv = None
if props["virtualenv"]:
venv = setup_virtualenv(main_args.venv, props)
script, parser = import_command(prog, command, props)
if parser:
if props["parse_known"]:
kwargs, extras = parser.parse_known_args(command_args)
extras = (extras,)
kwargs = vars(kwargs)
else:
extras = ()
kwargs = vars(parser.parse_args(command_args))
else:
extras = ()
kwargs = {}
if venv is not None:
args = (venv,) + extras
else:
args = extras
if script:
try:
rv = script(*args, **kwargs)
if rv is not None:
sys.exit(int(rv))
except Exception:
if main_args.debug:
import pdb
pdb.post_mortem()
else:
raise
sys.exit(0)
if __name__ == "__main__":
main()
| dati91/servo | tests/wpt/web-platform-tests/tools/wpt/wpt.py | Python | mpl-2.0 | 4,243 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This is the command line interface to py-obfsproxy.
It is designed to be a drop-in replacement for the obfsproxy executable.
Currently, not all of the obfsproxy command line options have been implemented.
"""
import sys
import obfsproxy.network.launch_transport as launch_transport
import obfsproxy.transports.transports as transports
import obfsproxy.common.log as logging
import obfsproxy.common.argparser as argparser
import obfsproxy.common.heartbeat as heartbeat
import obfsproxy.common.transport_config as transport_config
import obfsproxy.managed.server as managed_server
import obfsproxy.managed.client as managed_client
from obfsproxy import __version__
from pyptlib.config import checkClientMode
from twisted.internet import task # for LoopingCall
log = logging.get_obfslogger()
def set_up_cli_parsing():
"""Set up our CLI parser. Register our arguments and options and
query individual transports to register their own external-mode
arguments."""
parser = argparser.MyArgumentParser(
description='py-obfsproxy: A pluggable transports proxy written in Python')
subparsers = parser.add_subparsers(title='supported transports', dest='name')
parser.add_argument('-v', '--version', action='version', version=__version__)
parser.add_argument('--log-file', help='set logfile')
parser.add_argument('--log-min-severity',
choices=['error', 'warning', 'info', 'debug'],
help='set minimum logging severity (default: %(default)s)')
parser.add_argument('--no-log', action='store_true', default=False,
help='disable logging')
parser.add_argument('--no-safe-logging', action='store_true',
default=False,
help='disable safe (scrubbed address) logging')
parser.add_argument('--data-dir', help='where persistent information should be stored.',
default=None)
# Managed mode is a subparser for now because there are no
# optional subparsers: bugs.python.org/issue9253
subparsers.add_parser("managed", help="managed mode")
# Add a subparser for each transport. Also add a
# transport-specific function to later validate the parsed
# arguments.
for transport, transport_class in transports.transports.items():
subparser = subparsers.add_parser(transport, help='%s help' % transport)
transport_class['base'].register_external_mode_cli(subparser)
subparser.set_defaults(validation_function=transport_class['base'].validate_external_mode_cli)
return parser
def do_managed_mode():
"""This function starts obfsproxy's managed-mode functionality."""
if checkClientMode():
log.info('Entering client managed-mode.')
managed_client.do_managed_client()
else:
log.info('Entering server managed-mode.')
managed_server.do_managed_server()
def do_external_mode(args):
"""This function starts obfsproxy's external-mode functionality."""
assert(args)
assert(args.name)
assert(args.name in transports.transports)
from twisted.internet import reactor
pt_config = transport_config.TransportConfig()
pt_config.setStateLocation(args.data_dir)
pt_config.setListenerMode(args.mode)
pt_config.setObfsproxyMode("external")
# Run setup() method.
run_transport_setup(pt_config)
launch_transport.launch_transport_listener(args.name, args.listen_addr, args.mode, args.dest, pt_config, args.ext_cookie_file)
log.info("Launched '%s' listener at '%s:%s' for transport '%s'." % \
(args.mode, log.safe_addr_str(args.listen_addr[0]), args.listen_addr[1], args.name))
reactor.run()
def consider_cli_args(args):
"""Check out parsed CLI arguments and take the appropriate actions."""
if args.log_file:
log.set_log_file(args.log_file)
if args.log_min_severity:
log.set_log_severity(args.log_min_severity)
if args.no_log:
log.disable_logs()
if args.no_safe_logging:
log.set_no_safe_logging()
# validate:
if (args.name == 'managed') and (not args.log_file) and (args.log_min_severity):
log.error("obfsproxy in managed-proxy mode can only log to a file!")
sys.exit(1)
elif (args.name == 'managed') and (not args.log_file):
# managed proxies without a logfile must not log at all.
log.disable_logs()
def run_transport_setup(pt_config):
"""Run the setup() method for our transports."""
for transport, transport_class in transports.transports.items():
transport_class['base'].setup(pt_config)
def pyobfsproxy():
"""Actual pyobfsproxy entry-point."""
parser = set_up_cli_parsing()
args = parser.parse_args()
consider_cli_args(args)
log.warning('Obfsproxy (version: %s) starting up.' % (__version__))
log.debug('argv: ' + str(sys.argv))
log.debug('args: ' + str(args))
# Fire up our heartbeat.
l = task.LoopingCall(heartbeat.heartbeat.talk)
l.start(3600.0, now=False) # do heartbeat every hour
# Initiate obfsproxy.
if (args.name == 'managed'):
do_managed_mode()
else:
# Pass parsed arguments to the appropriate transports so that
# they can initialize and setup themselves. Exit if the
# provided arguments were corrupted.
# XXX use exceptions
if (args.validation_function(args) == False):
sys.exit(1)
do_external_mode(args)
def run():
"""Fake entry-point so that we can log unhandled exceptions."""
# Pyobfsproxy's CLI uses "managed" whereas C-obfsproxy uses
# "--managed" to configure managed-mode. Python obfsproxy can't
# recognize "--managed" because it uses argparse subparsers and
# http://bugs.python.org/issue9253 is not yet solved. This is a crazy
# hack to maintain CLI compatibility between the two versions. we
# basically inplace replace "--managed" with "managed" in the argument
# list.
if len(sys.argv) > 1 and '--managed' in sys.argv:
for n, arg in enumerate(sys.argv):
if arg == '--managed':
sys.argv[n] = 'managed'
try:
pyobfsproxy()
except Exception, e:
log.exception(e)
raise
if __name__ == '__main__':
run()
| NullHypothesis/obfsproxy | obfsproxy/pyobfsproxy.py | Python | bsd-3-clause | 6,365 |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
#URL de listado de clientes
url(r'^$', 'customers.views.index', name='home'),
# TALLER agregar la url para la vista de cada cliente
url(r'^c/(?P<id_company>(\d)+)/$', 'customers.views.company', name='company'),
url(r'^admin/', include(admin.site.urls)),
)
| davoshack/crm | simplecrm/urls.py | Python | gpl-2.0 | 466 |
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
"""Markers that can be added to `Signal` plots.
Example
-------
>>> import scipy.misc
>>> im = hs.signals.Signal2D(scipy.misc.ascent())
>>> m = hs.plot.markers.rectangle(x1=150, y1=100, x2=400, y2=400, color='red')
>>> im.add_marker(m)
"""
from hyperspy.drawing._markers.horizontal_line import \
HorizontalLine as horizontal_line
from hyperspy.drawing._markers.horizontal_line_segment import \
HorizontalLineSegment as horizontal_line_segment
from hyperspy.drawing._markers.line_segment import LineSegment as line_segment
from hyperspy.drawing._markers.point import Point as point
from hyperspy.drawing._markers.rectangle import Rectangle as rectangle
from hyperspy.drawing._markers.text import Text as text
from hyperspy.drawing._markers.vertical_line import \
VerticalLine as vertical_line
from hyperspy.drawing._markers.vertical_line_segment import \
VerticalLineSegment as vertical_line_segment
| erh3cq/hyperspy | hyperspy/utils/markers.py | Python | gpl-3.0 | 1,653 |
from argparse import SUPPRESS
from time import time
from loguru import logger
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.orm.query import Query
from sqlalchemy.sql.expression import ClauseElement, Executable
from flexget import manager, options
from flexget.event import event
logger = logger.bind(name='explain_sql')
class Explain(Executable, ClauseElement):
def __init__(self, stmt):
self.statement = stmt.__clause_element__()
@compiles(Explain)
def explain(element, compiler, **kw):
text = 'EXPLAIN QUERY PLAN ' + compiler.process(element.statement)
return text
class ExplainQuery(Query):
def __iter__(self):
logger.info('Query:\n\t{}', str(self).replace('\n', '\n\t'))
explain = self.session.execute(Explain(self)).fetchall()
text = '\n\t'.join('|'.join(str(x) for x in line) for line in explain)
before = time()
result = Query.__iter__(self)
logger.info('Query Time: {:0.3f} Explain Query Plan: {}', time() - before, text)
return result
@event('manager.execute.started')
def register_sql_explain(man, options):
if options.explain_sql:
manager.Session.kw['query_cls'] = ExplainQuery
@event('manager.execute.completed')
def deregister_sql_explain(man, options):
if options.explain_sql:
manager.Session.kw.pop('query_cls', None)
@event('options.register')
def register_parser_arguments():
options.get_parser('execute').add_argument(
'--explain-sql', action='store_true', dest='explain_sql', default=False, help=SUPPRESS
)
| crawln45/Flexget | flexget/plugins/cli/explain_sql.py | Python | mit | 1,577 |
"""mBuild recipe for a silica interface."""
import math
import random
import numpy as np
from mbuild import Compound, Port
from mbuild.lib.recipes.tiled_compound import TiledCompound
class SilicaInterface(Compound):
"""A recipe for creating an interface from bulk silica.
Carves silica interface from bulk, adjusts to a reactive surface site
density of 5.0 sites/nm^2 (agreeing with experimental results, see
Zhuravlev 2000) by creating Si-O-Si bridges, and yields a 2:1 Si:O ratio
(excluding the reactive surface sites).
Parameters
----------
bulk_silica : Compound
Bulk silica from which to cleave an interface
tile_x : int, optional, default=1
Number of times to replicate bulk silica in x-direction
tile_y : int, optional, default=1
Number of times to replicate bulk silica in y-direction
thickness : float, optional, default=1.0
Thickness of the slab to carve from the silica bulk. (in nm; not
including oxygen layers on the top and bottom of the surface)
References
----------
.. [1] Hartkamp, R., Siboulet, B., Dufreche, J.-F., Boasne, B.
"Ion-specific adsorption and electroosmosis in charged
amorphous porous silica." (2015) Phys. Chem. Chem. Phys.
17, 24683-24695
.. [2] L.T. Zhuravlev, "The surface chemistry of amorphous silica.
Zhuravlev model." (2000) Colloids Surf., A. 10, 1-38
"""
def __init__(
self, bulk_silica, tile_x=1, tile_y=1, thickness=1.0, seed=12345
):
super(SilicaInterface, self).__init__()
random.seed(seed)
self._oh_density = 5.0
self._O_buffer = 0.275
self._cleave_interface(bulk_silica, tile_x, tile_y, thickness)
self.generate_bonds(name_a="Si", name_b="O", dmin=0.0, dmax=0.20419)
self._strip_stray_atoms()
self._bridge_dangling_Os(self._oh_density, thickness)
self._identify_surface_sites(thickness)
self._adjust_stoichiometry()
def _cleave_interface(self, bulk_silica, tile_x, tile_y, thickness):
"""Carve interface from bulk silica.
Also includes a buffer of O's above and below the surface to ensure the
interface is coated.
"""
O_buffer = self._O_buffer
z_height = bulk_silica.box.lengths[2]
tile_z = int(math.ceil((thickness + 2 * O_buffer) / z_height))
bulk = TiledCompound(bulk_silica, n_tiles=(tile_x, tile_y, tile_z))
interface = Compound(
periodicity=(bulk.periodicity[0], bulk.periodicity[1], False)
)
for i, particle in enumerate(bulk.particles()):
if (
particle.name == "Si"
and O_buffer < particle.pos[2] < (thickness + O_buffer)
) or (
particle.name == "O"
and particle.pos[2] < (thickness + 2 * O_buffer)
):
interface_particle = Compound(
name=particle.name, pos=particle.pos
)
interface.add(
interface_particle, particle.name + "_{}".format(i)
)
self.add(interface, inherit_box=True, inherit_periodicity=True)
def _strip_stray_atoms(self):
"""Remove stray atoms and surface pieces."""
components = self.bond_graph.connected_components()
major_component = max(components, key=len)
for atom in list(self.particles()):
if atom not in major_component:
self.remove(atom)
def _bridge_dangling_Os(self, oh_density, thickness):
"""Form Si-O-Si bridges to yield desired density of surface sites.
References
----------
.. [1] Hartkamp, R., Siboulet, B., Dufreche, J.-F., Boasne, B.
"Ion-specific adsorption and electroosmosis in charged
amorphous porous silica." (2015) Phys. Chem. Chem. Phys.
17, 24683-24695
"""
area = self.box.lengths[0] * self.box.lengths[1]
target = int(oh_density * area)
dangling_Os = [
atom
for atom in self.particles()
if atom.name == "O"
and atom.pos[2] > thickness
and len(self.bond_graph.neighbors(atom)) == 1
]
n_bridges = int((len(dangling_Os) - target) / 2)
for _ in range(n_bridges):
bridged = False
while not bridged:
O1 = random.choice(dangling_Os)
Si1 = self.bond_graph.neighbors(O1)[0]
for O2 in dangling_Os:
if O2 == O1:
continue
Si2 = self.bond_graph.neighbors(O2)[0]
if Si1 == Si2:
continue
if any(
neigh in self.bond_graph.neighbors(Si2)
for neigh in self.bond_graph.neighbors(Si1)
):
continue
r = self.min_periodic_distance(Si1.pos, Si2.pos)
if r < 0.45:
bridged = True
self.add_bond((O1, Si2))
dangling_Os.remove(O1)
dangling_Os.remove(O2)
self.remove(O2)
break
def _identify_surface_sites(self, thickness):
"""Label surface sites and add ports above them."""
for atom in list(self.particles()):
if len(self.bond_graph.neighbors(atom)) == 1:
if atom.name == "O" and atom.pos[2] > thickness:
atom.name = "O_surface"
port = Port(anchor=atom)
port.spin(np.pi / 2, [1, 0, 0])
port.translate(np.array([0.0, 0.0, 0.1]))
self.add(port, f"port_{len(self.referenced_ports())}")
def _adjust_stoichiometry(self):
"""Remove O's from underside of surface to yield a 2:1 Si:O ratio."""
num_O = len(list(self.particles_by_name("O")))
num_Si = len(list(self.particles_by_name("Si")))
n_deletions = num_O - 2 * num_Si
bottom_Os = [
atom
for atom in self.particles()
if atom.name == "O"
and atom.pos[2] < self._O_buffer
and len(self.bond_graph.neighbors(atom)) == 1
]
for _ in range(n_deletions):
O1 = random.choice(bottom_Os)
bottom_Os.remove(O1)
self.remove(O1)
if __name__ == "__main__":
from mbuild.lib.bulk_materials import AmorphousSilicaBulk
silica_interface = SilicaInterface(
bulk_silica=AmorphousSilicaBulk(), thickness=1.2
)
silica_interface.save("silica_interface.mol2", show_ports=True)
| iModels/mbuild | mbuild/lib/recipes/silica_interface.py | Python | mit | 6,819 |
import abc
import re
class Formatter:
@abc.abstractmethod
def format(self, query):
''' Should return a human-readable version of the query string
'''
pass
class BasicFormatter(Formatter):
''' Provides a basic default formatting for query strings
This formatter provides only indentation levels and newlines at
open braces.
'''
def __init__(self):
self.indent_str = " "
def format(self, query):
if not isinstance(query, str):
query = query.serialize()
#TODO handle braces inside literals correctly
formatted_query = ""
indent_level = 0
for letter in query:
# newline and reindent on open brace
if letter == "{":
indent_level += 1
formatted_query += "{\n" + self.indent_str*indent_level
# newline and reindent on close brace
elif letter == "}":
indent_level -= 1
formatted_query += "\n" + self.indent_str*indent_level + "}"
# reindent after any newline
elif len(formatted_query) and formatted_query[-1] == '\n':
formatted_query += self.indent_str*indent_level + letter
# otherwise just add the letter
else:
formatted_query += letter
# trim whitespace
formatted_query = re.sub(r'(.)\s+\n', '\g<1>\n', formatted_query, flags=re.MULTILINE)
# remove duplicate newlines
formatted_query = re.sub(r'(\n+)', '\n', formatted_query, flags=re.MULTILINE)
return formatted_query
| ALSchwalm/sparqllib | sparqllib/formatter.py | Python | mit | 1,625 |
#!/usr/bin/python3.5
# -*- coding: utf-8 -*-
"""
# FriendBot, a Telegram bot that emulates friend's phrases.
# Copyright (C) 2016 Quantum Code
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import telepot.aio
import friendbot.file as file
import friendbot.test as check
import random
from collections import namedtuple
from binascii import hexlify
BOT_TEMPLATE = {
'bot_name': '',
'api_token': '',
'persona': {
'command': '',
'responses': [],
'phrases': []
},
'moderator': {
'moderator_id': 0,
}
}
Properties = namedtuple('Properties', ['name', 'command', 'responses', 'phrases'])
class Persona:
def __init__(self, io, properties=None):
self.file = io
if properties is None:
properties = self.file.extract_persona()
elif not check.is_valid_persona(properties):
properties = self.file.define_new_bot()
self.name, self.command, self.responses, self.phrases = properties
def command_analyzer(self, msg):
"""Takes in a message and returns the proper response depending on the command"""
if msg.startswith('/start'):
return self.name + ' bot is active'
if msg.startswith('/help'):
return "Commands: /start /help"
if msg.startswith('/' + self.command):
return self.random_phrase()
if msg.startswith('/new' + self.command):
self.add_new_phrase()
return random.choice(responses)
def add_new_phrase(self, phrase):
""" """
try:
self.phrases.index(phrase)
return 'phrase exists'
except:
self.phrases.append(phrase)
random.shuffle(self.phrases)
return 'phrases added'
def random_phrase(self):
"""Shuffles then selects a phrase from the phrase list"""
random.shuffle(self.phrases)
return random.chose(self.phrases)
def remove_phrase(self, *, phrase=None, index=None):
""" """
if phrase is None and index is None:
return 'nothing entered'
elif phrase is not None:
try:
self.phrases.remove(phrase)
return 'phrase removed'
except:
return 'phrase not found'
try:
del self.phrases[index]
return 'phrase removed'
except:
return 'phrase not found'
def save(self):
"""Saves file to bot file"""
self.file.save(self._get_data)
def _get_data(self):
"""Returns specific dictionary for Persona class of all it's attributes"""
return {
'bot_name': self.name,
'persona': {
'command': self.command,
'responses': self.responses,
'phrases': self.phrases
}
}
class FriendBase(telepot.aio.Bot):
def __init__(self, file_path, api_token=None, properties=None):
self.file = file.BotFile(file_path, template=BOT_TEMPLATE)
# TODO(Alex Z.) Add condition for whether there is or isn't an api_token
if self.file.is_new_file:
self.persona = Persona(self.file, properties)
self.persona.save()
self.file.is_new_file = False
else:
self.persona = Persona(self.file)
super().__init__(self, self.file.get_bot_token)
self._answerer = telepot.aio.helper.Answerer(self)
def save(self):
"""Saves all objects current states"""
self.persona.save()
| Quantum-Code/friendbot | friendbot/bot.py | Python | gpl-3.0 | 4,194 |
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import six
def load_json(input_data):
if six.PY3:
data_str = input_data.decode()
else:
data_str = input_data
return json.loads(data_str)
def dump_json(obj):
serialized = json.dumps(obj, sort_keys=True)
if six.PY3:
serialized = serialized.encode()
return serialized
| Yelp/kafka-utils | kafka_utils/util/serialization.py | Python | apache-2.0 | 930 |
import asyncio
from datetime import datetime
from random import choice
import pytz
import common
from cogs.drinkingcog import consume_drink
from cogs.duelcog import item_chance_roll
from discord.ext import commands
from objs.duel_item import get_name, get_slot
from objs.weekend_games import pretty_date
class Gametime(commands.Cog):
""" Gametime Commands"""
def __init__(self, bot):
self.bot = bot
@commands.group(name='gametime')
async def gametime(self, ctx):
"""Handles gametime actions"""
gametime_help_string = \
"That's not a valid command for **!gametime**\n\n" \
"Please use:\n" \
"!gametime <add> <day of the week>" \
"<_optional_: military time, HH:MM> to **add a gametime**\n" \
"!gametime <remove> <index> to **delete a gametime**\n" \
"!gametime <list> to **list current gametimes**\n" \
"!gametime <set> <index> <time> to " \
"**set the time of a gametime**"
if ctx.invoked_subcommand is None:
await ctx.send(gametime_help_string)
@gametime.command(name="list")
async def list_gametimes(self, ctx):
"""List current gametimes"""
await ctx.send(common.whos_in.get_gametimes())
@gametime.command(name="add")
async def add_gametime(self, ctx, day, start_time=None):
"""Create a new gametime"""
await ctx.send(common.whos_in.create_gametime(day, start_time))
@gametime.command(name="remove")
async def rem_gametime(self, ctx, index):
"""Remove a gametime"""
await ctx.send(common.whos_in.remove_gametime(index))
@gametime.command(name="set")
async def set_gametime(self, ctx, index, new_time):
"""Set a gametime"""
await ctx.send(common.whos_in.set_gametime(index, new_time))
@commands.group(name='poll')
async def poll(self, ctx):
"""Handles Polls actions"""
poll_help_string = \
"That's not a valid command for **!poll**\n\n" \
"Please use:\n" \
"!poll start \"option 1\" \"option 2\" etc... to **start a " \
"poll**\n" \
"!poll stop to **delete the current poll**"
if ctx.invoked_subcommand is None:
await ctx.send(poll_help_string)
@poll.command(name='start')
async def start_poll(self, ctx):
"""Creates a poll"""
await ctx.send(common.whos_in.create_poll(ctx.message.content[6:]))
@poll.command(name='stop')
async def stop_poll(self, ctx):
"""Stops a poll"""
await ctx.send(common.whos_in.stop_poll())
@commands.command(name='in')
async def in_command(self, ctx, gt_num=""):
"""Marks you as in for a gametime"""
if gt_num == "":
await ctx.send("When are you in for, though?\n\n{}"
.format(common.whos_in.get_gametimes()))
else:
await ctx.send(common.whos_in.add(ctx.message.author, gt_num,
status="in"))
@commands.command(name='possible')
async def possible_command(self, ctx, gt_num=""):
"""Marks you as possible for a gametime"""
if gt_num == "":
await ctx.send("When are you possibly in for, though?\n\n{}"
.format(common.whos_in.get_gametimes()))
else:
await ctx.send(common.whos_in.add(ctx.message.author, gt_num,
status="possible"))
@commands.command(name='late')
async def late_command(self, ctx, gt_num=""):
"""Marks you as going to be late for a gametime"""
if gt_num == "":
await ctx.send("For what session are you going to be late for, "
"though?\n\n{}"
.format(common.whos_in.get_gametimes()))
else:
await ctx.send(common.whos_in.add(ctx.message.author, gt_num,
status="going to be late"))
@commands.command(name='out')
async def out_command(self, ctx, gt_num=""):
"""Removes you from a gametime"""
if gt_num == "":
await ctx.send("When are you out for, though?\n\n{}"
.format(common.whos_in.get_gametimes()))
else:
await ctx.send(common.whos_in.remove(ctx.message.author,
gt_num))
@commands.command(name='whosin')
async def whosin_command(self, ctx):
"""See who is in for a gametime"""
await ctx.send(common.whos_in.whos_in())
@commands.command(name='win')
async def record_win(self, ctx):
"""Add a win to the record books"""
common.whos_in.add_win()
await ctx.send("Congrats on the win!")
await ctx.invoke(ctx.get_command('get-record'))
if common.whos_in.consecutive == 3:
await common.trigger_social(ctx)
common.whos_in.consecutive = 0
else:
await ctx.invoke(ctx.get_command('shot-lottery'), True)
@commands.command(name='loss')
async def record_loss(self, ctx):
"""Add a loss to the record books"""
common.whos_in.add_loss()
await ctx.send("You guys are bad!")
await ctx.invoke(ctx.get_command('get-record'))
if common.whos_in.consecutive == 3:
await common.trigger_social(ctx)
common.whos_in.consecutive = 0
@commands.command(name='draw')
async def record_draw(self, ctx):
"""Add a draw to the record books"""
common.whos_in.add_draw()
await ctx.send("What a waste!")
await ctx.invoke(ctx.get_command('get-record'))
if common.whos_in.consecutive == 3:
await common.trigger_social(ctx)
common.whos_in.consecutive = 0
@commands.command(name='clear-record')
async def record_clear(self, ctx):
"""Clears the session record."""
record_string = "You went: {}".format(common.whos_in.get_record())
await ctx.send(record_string)
common.whos_in.clear_record()
await ctx.send("Record Cleared!")
@commands.command(name='get-record')
async def record_get(self, ctx):
"""Get the current record."""
record_string = "Current Record: {}".format(common.whos_in.get_record())
await ctx.send(record_string)
record_string = "Overall Record {}".format(
common.whos_in.get_c_record())
await ctx.send(record_string)
@commands.command(name='vote')
async def add_vote(self, ctx, option=""):
"""Vote in a poll"""
if common.whos_in.poll is None:
await ctx.send("No Poll currently taking place")
return
if option == "":
await ctx.send("What are you voting for, though?\n\n{}"
.format(common.whos_in.poll.get_current_state()))
else:
try:
await ctx.send(
common.whos_in.poll.add_vote(option, ctx.message.author))
except IndexError:
await ctx.send("Not a valid option!")
async def print_at_midnight(bot):
"""
Prints list at midnight
:return:
"""
c_to_send = None
await bot.wait_until_ready()
for channel in bot.get_all_channels():
if channel.name == 'gen_testing' or \
channel.name == common.ARGS['channel']:
c_to_send = channel
break
while not bot.is_closed():
now = datetime.now(pytz.timezone('US/Eastern'))
midnight = now.replace(hour=23, minute=59, second=59, microsecond=59)
if now > midnight:
midnight = midnight.replace(day=(now.day + 1))
print("Scheduling next list print at {}".format(pretty_date(midnight)))
await asyncio.sleep((midnight - now).seconds)
common.whos_in.remove_old_gametimes()
await c_to_send.send(common.whos_in.whos_in())
# Community Drop Time
i_awarded = False
i = False
while not i_awarded:
for m in bot.get_all_members():
if m.display_name != 'brochat-bot' and m.display_name in \
common.users and \
'duel_record' in common.users[m.display_name]:
i = await item_chance_roll(m.display_name, c_to_send)
i_awarded = i_awarded or i
# Drink Debt Enforcement
for m in bot.get_all_members():
nc = m.display_name
if nc != 'brochat-bot' and nc in common.users and \
'drinks_owed' in common.users[nc] \
and common.users[nc]['drinks_owed'] > 6 \
and 'inventory' in common.users[nc] \
and len(common.users[nc]['inventory']) > 0:
item_take = choice(list(common.users[nc]['inventory'].keys()))
await c_to_send.send("{}, the bank sent me to collect on "
"your debt. I'll have to take your {} "
"in lieu of one drink. Can't cheat "
"friendship around these parts."
.format(nc, get_name(item_take)))
if get_slot(item_take) in common.users[nc]['equip']:
del(common.users[nc]['equip'][get_slot(item_take)])
del common.users[nc]['inventory'][item_take]
await c_to_send.send("You now owe {} drinks."
.format(consume_drink(nc)))
common.whos_in.update_db()
await asyncio.sleep(60 * 10)
def setup(bot):
bot.add_cog(Gametime(bot))
bot.loop.create_task(print_at_midnight(bot))
| nluedtke/brochat-bot | cogs/gametimecog.py | Python | mit | 9,841 |
import _plotly_utils.basevalidators
class IdsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="ids", parent_name="parcoords", **kwargs):
super(IdsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/parcoords/_ids.py | Python | mit | 389 |
import bleach
from django import forms
from django.contrib.auth.models import Group
from osf.models import PreprintProvider, Subject
from admin.base.utils import (get_subject_rules, get_toplevel_subjects,
get_nodelicense_choices, get_defaultlicense_choices, validate_slug)
class PreprintProviderForm(forms.ModelForm):
toplevel_subjects = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), required=False)
subjects_chosen = forms.CharField(widget=forms.HiddenInput(), required=False)
_id = forms.SlugField(
required=True,
help_text='URL Slug',
validators=[validate_slug]
)
class Meta:
model = PreprintProvider
exclude = ['primary_identifier_name', 'primary_collection', 'type']
widgets = {
'licenses_acceptable': forms.CheckboxSelectMultiple(),
'subjects_acceptable': forms.HiddenInput(),
}
def __init__(self, *args, **kwargs):
toplevel_choices = get_toplevel_subjects()
nodelicense_choices = get_nodelicense_choices()
defaultlicense_choices = get_defaultlicense_choices()
super(PreprintProviderForm, self).__init__(*args, **kwargs)
self.fields['toplevel_subjects'].choices = toplevel_choices
self.fields['licenses_acceptable'].choices = nodelicense_choices
self.fields['default_license'].choices = defaultlicense_choices
def clean_subjects_acceptable(self, *args, **kwargs):
subject_ids = [_f for _f in self.data['subjects_chosen'].split(', ') if _f]
subjects_selected = Subject.objects.filter(id__in=subject_ids)
rules = get_subject_rules(subjects_selected)
return rules
def clean_advisory_board(self, *args, **kwargs):
if not self.data.get('advisory_board'):
return u''
return bleach.clean(
self.data.get('advisory_board'),
tags=['a', 'b', 'br', 'div', 'em', 'h2', 'h3', 'li', 'p', 'strong', 'ul'],
attributes=['class', 'style', 'href', 'title', 'target'],
styles=['text-align', 'vertical-align'],
strip=True
)
def clean_description(self, *args, **kwargs):
if not self.data.get('description'):
return u''
return bleach.clean(
self.data.get('description'),
tags=['a', 'br', 'em', 'p', 'span', 'strong'],
attributes=['class', 'style', 'href', 'title', 'target'],
styles=['text-align', 'vertical-align'],
strip=True
)
def clean_footer_links(self, *args, **kwargs):
if not self.data.get('footer_links'):
return u''
return bleach.clean(
self.data.get('footer_links'),
tags=['a', 'br', 'div', 'em', 'p', 'span', 'strong'],
attributes=['class', 'style', 'href', 'title', 'target'],
styles=['text-align', 'vertical-align'],
strip=True
)
class PreprintProviderCustomTaxonomyForm(forms.Form):
add_missing = forms.BooleanField(required=False)
custom_taxonomy_json = forms.CharField(widget=forms.Textarea, initial='{"include": [], "exclude": [], "custom": {}, "merge": {}}', required=False)
include = forms.ChoiceField(choices=[], required=False)
exclude = forms.ChoiceField(choices=[], required=False)
custom_name = forms.CharField(required=False)
custom_parent = forms.CharField(required=False)
bepress = forms.ChoiceField(choices=[], required=False)
merge_from = forms.ChoiceField(choices=[], required=False)
merge_into = forms.ChoiceField(choices=[], required=False)
def __init__(self, *args, **kwargs):
super(PreprintProviderCustomTaxonomyForm, self).__init__(*args, **kwargs)
subject_choices = [(x, x) for x in Subject.objects.filter(bepress_subject__isnull=True).values_list('text', flat=True)]
for name, field in self.fields.items():
if hasattr(field, 'choices'):
if field.choices == []:
field.choices = subject_choices
class PreprintProviderRegisterModeratorOrAdminForm(forms.Form):
""" A form that finds an existing OSF User, and grants permissions to that
user so that they can use the admin app"""
def __init__(self, *args, **kwargs):
provider_id = kwargs.pop('provider_id')
super(PreprintProviderRegisterModeratorOrAdminForm, self).__init__(*args, **kwargs)
self.fields['group_perms'] = forms.ModelMultipleChoiceField(
queryset=Group.objects.filter(name__startswith='reviews_preprint_{}'.format(provider_id)),
required=False,
widget=forms.CheckboxSelectMultiple
)
user_id = forms.CharField(required=True, max_length=5, min_length=5)
| mfraezz/osf.io | admin/preprint_providers/forms.py | Python | apache-2.0 | 4,770 |
"""Test the JuiceNet config flow."""
import aiohttp
from pyjuicenet import TokenError
from homeassistant import config_entries, setup
from homeassistant.components.juicenet.const import DOMAIN
from homeassistant.const import CONF_ACCESS_TOKEN
from tests.async_mock import MagicMock, patch
def _mock_juicenet_return_value(get_devices=None):
juicenet_mock = MagicMock()
type(juicenet_mock).get_devices = MagicMock(return_value=get_devices)
return juicenet_mock
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.juicenet.config_flow.Api.get_devices",
return_value=MagicMock(),
), patch(
"homeassistant.components.juicenet.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.juicenet.async_setup_entry", return_value=True
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: "access_token"}
)
assert result2["type"] == "create_entry"
assert result2["title"] == "JuiceNet"
assert result2["data"] == {CONF_ACCESS_TOKEN: "access_token"}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.juicenet.config_flow.Api.get_devices",
side_effect=TokenError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: "access_token"}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.juicenet.config_flow.Api.get_devices",
side_effect=aiohttp.ClientError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: "access_token"}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_catch_unknown_errors(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.juicenet.config_flow.Api.get_devices",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: "access_token"}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_import(hass):
"""Test that import works as expected."""
with patch(
"homeassistant.components.juicenet.config_flow.Api.get_devices",
return_value=MagicMock(),
), patch(
"homeassistant.components.juicenet.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.juicenet.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_ACCESS_TOKEN: "access_token"},
)
assert result["type"] == "create_entry"
assert result["title"] == "JuiceNet"
assert result["data"] == {CONF_ACCESS_TOKEN: "access_token"}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
| tchellomello/home-assistant | tests/components/juicenet/test_config_flow.py | Python | apache-2.0 | 4,247 |
from django.utils import timezone
import datetime
from polls.models import Question
def create_question(question_text, days):
"""
Creates a question with the given `question_text` published the given
number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text,
pub_date=time) | leifos/tango_with_tests | django_tutorial/polls/test_utils.py | Python | mit | 529 |
from rest_framework import permissions
class IsStaffOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow staff members to edit object.
"""
def has_permission(self, request, view):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
if request.user and request.user.is_staff:
return True
return False
class IsStaffOrCreateOnly(permissions.BasePermission):
"""
Custom permission to only allow staff members to list
and retrieve object, and allow anybody to create new
"""
def has_permission(self, request, view):
# Create permissions are allowed to any request,
# so we'll always allow POST, HEAD or OPTIONS requests.
# admin user can GET or DELETE but can't PUT or PATCH resource
ALLOWED_METHODS = ('POST', 'HEAD', 'OPTIONS')
ADMIN_ALLOWED_METHODS = ('GET', 'DELETE')
if request.method in ALLOWED_METHODS:
return True
if (request.user and request.user.is_staff and
request.method in ADMIN_ALLOWED_METHODS):
return True
return False
class IsWeblogAuthor(permissions.BasePermission):
"""
Custom permission to allow only weblog authors to access
an object.
"""
def has_permission(self, request, view):
return (request.user and request.user.is_authenticated() and
request.user.is_weblog_author)
class IsWeblogAuthorOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow staff members or weblog authors
to create object.
"""
def has_permission(self, request, view):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
return (request.method in permissions.SAFE_METHODS or
request.user and request.user.is_authenticated() and
request.user.is_weblog_author)
class IsAuthorOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow author to edit an object.
Assumes the model instance has an `author` attribute.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Instance must have an attribute named `author`.
return obj.author == request.user
| Fenykepy/phiroom | src/api/phiroom/permissions.py | Python | agpl-3.0 | 2,652 |
import psutil
def is_up(ps_name):
return True in [psutil.Process(pid).name() == ps_name for pid in psutil.get_pid_list()]
| knmkr/perGENIE | pergenie/lib/utils/service.py | Python | agpl-3.0 | 127 |
#!/usr/bin/env python
# -*- coding: utf-8 -*
#
# @author XU Kai(xukai.ken@gmail.com)
# @date 2016-12-04 星期日
#
#
# #fileOverview PMW 调试测试
#
#
#
import RPi.GPIO as GPIO
import time
import atexit
atexit.register(GPIO.cleanup)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.OUT, initial=False)
p = GPIO.PWM(7, 50) #50HZ
p.start(1.5)
time.sleep(2)
p.ChangeDutyCycle(0)
p.stop()
# PWM控制信号周期20ms, 脉宽 0.5ms-2.5ms 对应的角度-90到+90度, 范围180度(3度左右偏差), 当脉宽1.5ms时舵机在中立点(0度);
# while (True):
# for i in range(0, 181, 10):
# cycle = 1.2 + 10 * i / 180
# p.ChangeDutyCycle(cycle) #设置转动角度
# print('高电平脉宽:' + bytes(cycle))
# time.sleep(0.02) #等该20ms周期结束
# p.ChangeDutyCycle(0) #归零信号
# for i in range(181, 0, -10):
# cycle = 1.2 + 10 * i / 180
# p.ChangeDutyCycle(cycle)
# print('高电平脉宽:' + bytes(cycle))
# time.sleep(0.02)
# p.ChangeDutyCycle(0)
| EchoFUN/raspi | tests/camera_pwm_1.py | Python | mit | 1,072 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import answer_record_management
import conversation_management
import conversation_profile_management
import participant_management
PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT")
SMART_REPLY_MODEL = os.getenv("SMART_REPLY_MODEL")
SMART_REPLY_ALLOWLIST = os.getenv("SMART_REPLY_ALLOWLIST")
CONVERSATION_PROFILE_DISPLAY_NAME = "sample code profile for smart reply"
def test_smart_reply(capsys):
"""Test smart reply feature.
"""
# Create conversation profile.
conversation_profile_management.create_conversation_profile_smart_reply(
project_id=PROJECT_ID,
display_name=CONVERSATION_PROFILE_DISPLAY_NAME,
smart_reply_allowlist_name=SMART_REPLY_ALLOWLIST,
smart_reply_model_name=SMART_REPLY_MODEL,
)
out, _ = capsys.readouterr()
assert "Display Name: {}".format(CONVERSATION_PROFILE_DISPLAY_NAME) in out
conversation_profile_id = out.split("conversationProfiles/")[1].rstrip()
# Create conversation.
conversation_management.create_conversation(
project_id=PROJECT_ID, conversation_profile_id=conversation_profile_id
)
out, _ = capsys.readouterr()
conversation_id = out.split("conversations/")[1].rstrip()
# Create end user participant.
participant_management.create_participant(
project_id=PROJECT_ID, conversation_id=conversation_id, role="END_USER"
)
out, _ = capsys.readouterr()
end_user_id = out.split("participants/")[1].rstrip()
# Create human agent participant.
participant_management.create_participant(
project_id=PROJECT_ID, conversation_id=conversation_id, role="HUMAN_AGENT"
)
out, _ = capsys.readouterr()
human_agent_id = out.split("participants/")[1].rstrip()
# AnalyzeContent
participant_management.analyze_content_text(
project_id=PROJECT_ID,
conversation_id=conversation_id,
participant_id=human_agent_id,
text="Hi, how are you?",
)
out, _ = capsys.readouterr()
assert "What would you like to know?" in out
response = participant_management.analyze_content_text(
project_id=PROJECT_ID,
conversation_id=conversation_id,
participant_id=end_user_id,
text="I am doing well, just want to check",
)
out, _ = capsys.readouterr()
assert "Sounds good." in out
# Update AnswerRecord.
answer_record_id = (
response.human_agent_suggestion_results[0]
.suggest_smart_replies_response.smart_reply_answers[0]
.answer_record.split("answerRecords/")[1]
.rstrip()
)
answer_record_management.update_answer_record(
project_id=PROJECT_ID, answer_record_id=answer_record_id, is_clicked=True
)
out, _ = capsys.readouterr()
assert "Clicked: True" in out
# Complete conversation.
conversation_management.complete_conversation(
project_id=PROJECT_ID, conversation_id=conversation_id
)
# Delete conversation profile.
conversation_profile_management.delete_conversation_profile(
project_id=PROJECT_ID, conversation_profile_id=conversation_profile_id
)
| googleapis/python-dialogflow | samples/snippets/answer_record_management_test.py | Python | apache-2.0 | 3,690 |
from . import Block
class BlockRunwayView(Block):
validation = '^R\d{2}/(\d{4}|[M,P]\d{4}|\d{4}V\d{4})([U,D,N]$|$)'
name = 'runway_view'
patterns = {
'qnh': [('\d{3,4}', '_qnh')],
}
def _qnh(self, q):
return int(q) | tspycher/python-aviationdata | aviationdata/blocks/blockrunwayview.py | Python | mit | 253 |
from numpy import exp, array, random, dot
class NeuronLayer():
def __init__(self, number_of_neurons, number_of_inputs_per_neuron):
self.synaptic_weights = 2 * random.random((number_of_inputs_per_neuron, number_of_neurons)) - 1
class NeuralNetwork():
def __init__(self, layer1, layer2):
self.layer1 = layer1
self.layer2 = layer2
# The Sigmoid function, which describes an S shaped curve.
# We pass the weighted sum of the inputs through this function to
# normalise them between 0 and 1.
def __sigmoid(self, x):
return 1 / (1 + exp(-x))
# The derivative of the Sigmoid function.
# This is the gradient of the Sigmoid curve.
# It indicates how confident we are about the existing weight.
def __sigmoid_derivative(self, x):
return x * (1 - x)
# We train the neural network through a process of trial and error.
# Adjusting the synaptic weights each time.
def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations):
for iteration in range(number_of_training_iterations):
# Pass the training set through our neural network
output_from_layer_1, output_from_layer_2 = self.think(training_set_inputs)
# Calculate the error for layer 2 (The difference between the desired output
# and the predicted output).
layer2_error = training_set_outputs - output_from_layer_2
layer2_delta = layer2_error * self.__sigmoid_derivative(output_from_layer_2)
# Calculate the error for layer 1 (By looking at the weights in layer 1,
# we can determine by how much layer 1 contributed to the error in layer 2).
layer1_error = layer2_delta.dot(self.layer2.synaptic_weights.T)
layer1_delta = layer1_error * self.__sigmoid_derivative(output_from_layer_1)
# Calculate how much to adjust the weights by
layer1_adjustment = training_set_inputs.T.dot(layer1_delta)
layer2_adjustment = output_from_layer_1.T.dot(layer2_delta)
# Adjust the weights.
self.layer1.synaptic_weights += layer1_adjustment
self.layer2.synaptic_weights += layer2_adjustment
# The neural network thinks.
def think(self, inputs):
output_from_layer1 = self.__sigmoid(dot(inputs, self.layer1.synaptic_weights))
output_from_layer2 = self.__sigmoid(dot(output_from_layer1, self.layer2.synaptic_weights))
return output_from_layer1, output_from_layer2
# The neural network prints its weights
def print_weights(self):
print(" Layer 1 (4 neurons, each with 3 inputs):")
print(self.layer1.synaptic_weights)
print(" Layer 2 (1 neuron, with 4 inputs):")
print(self.layer2.synaptic_weights)
if __name__ == "__main__":
#Seed the random number generator
random.seed(1)
# Create layer 1 (4 neurons, each with 3 inputs)
layer1 = NeuronLayer(4, 3)
# Create layer 2 (a single neuron with 4 inputs)
layer2 = NeuronLayer(1, 4)
# Combine the layers to create a neural network
neural_network = NeuralNetwork(layer1, layer2)
print("Stage 1) Random starting synaptic weights: ")
neural_network.print_weights()
# The training set. We have 7 examples, each consisting of 3 input values
# and 1 output value.
training_set_inputs = array([[0, 0, 1], [0, 1, 1], [1, 0, 1], [0, 1, 0], [1, 0, 0], [1, 1, 1], [0, 0, 0]])
training_set_outputs = array([[0, 1, 1, 1, 1, 0, 0]]).T
# Train the neural network using the training set.
# Do it 60,000 times and make small adjustments each time.
neural_network.train(training_set_inputs, training_set_outputs, 60000)
print("Stage 2) New synaptic weights after training: ")
neural_network.print_weights()
# Test the neural network with a new situation.
print("Stage 3) Considering a new situation [1, 1, 0] -> ?: ")
hidden_state, output = neural_network.think(array([1, 1, 0]))
print(output)
| miloharper/multi-layer-neural-network | main.py | Python | mit | 4,053 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class ExportJobsOperationResultsOperations(object):
"""ExportJobsOperationResultsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-12-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def get(
self, vault_name, resource_group_name, operation_id, custom_headers=None, raw=False, **operation_config):
"""Gets the operation result of operation triggered by Export Jobs API. If
the operation is successful, then it also contains URL of a Blob and a
SAS key to access the same. The blob contains exported jobs in JSON
serialized format.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param operation_id: OperationID which represents the export job.
:type operation_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`OperationResultInfoBaseResource
<azure.mgmt.recoveryservicesbackup.models.OperationResultInfoBaseResource>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupJobs/operationResults/{operationId}'
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationResultInfoBaseResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| SUSE/azure-sdk-for-python | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/export_jobs_operation_results_operations.py | Python | mit | 4,728 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'GigType'
db.create_table('gig_registry_gigtype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=40)),
))
db.send_create_signal('gig_registry', ['GigType'])
# Adding field 'Gig.gig_type'
db.add_column('gig_registry_gig', 'gig_type',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gig_registry.GigType'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting model 'GigType'
db.delete_table('gig_registry_gigtype')
# Deleting field 'Gig.gig_type'
db.delete_column('gig_registry_gig', 'gig_type_id')
models = {
'gig_registry.band': {
'Meta': {'object_name': 'Band'},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '300', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'founded': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'genre': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['gig_registry.Genre']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['gig_registry.Musician']", 'null': 'True', 'through': "orm['gig_registry.BandMembership']", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'updated_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
},
'gig_registry.bandmembership': {
'Meta': {'object_name': 'BandMembership'},
'band': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gig_registry.Band']"}),
'finished': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'musician': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gig_registry.Musician']"}),
'started': ('django.db.models.fields.DateField', [], {})
},
'gig_registry.genre': {
'Meta': {'object_name': 'Genre'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'gig_registry.gig': {
'Meta': {'object_name': 'Gig'},
'bands': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['gig_registry.Band']", 'symmetrical': 'False'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '300', 'blank': 'True'}),
'cost': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'finish': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'gig_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gig_registry.GigType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'start': ('django.db.models.fields.DateField', [], {}),
'updated_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gig_registry.Venue']"})
},
'gig_registry.gigtype': {
'Meta': {'object_name': 'GigType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
'gig_registry.location': {
'Meta': {'object_name': 'Location'},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '300', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'created_at': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '6', 'blank': 'True'}),
'lon': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '6', 'blank': 'True'}),
'post_code': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'suburb': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'updated_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
'gig_registry.manager': {
'Meta': {'object_name': 'Manager', '_ormbases': ['gig_registry.Person']},
'person_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['gig_registry.Person']", 'unique': 'True', 'primary_key': 'True'})
},
'gig_registry.musician': {
'Meta': {'object_name': 'Musician', '_ormbases': ['gig_registry.Person']},
'instrument': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'person_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['gig_registry.Person']", 'unique': 'True', 'primary_key': 'True'})
},
'gig_registry.owner': {
'Meta': {'object_name': 'Owner', '_ormbases': ['gig_registry.Person']},
'person_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['gig_registry.Person']", 'unique': 'True', 'primary_key': 'True'})
},
'gig_registry.person': {
'Meta': {'object_name': 'Person'},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '300', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'nick_name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
'gig_registry.stage': {
'Meta': {'object_name': 'Stage'},
'capacity': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'gig_registry.venue': {
'Meta': {'object_name': 'Venue'},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '300', 'blank': 'True'}),
'established': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gig_registry.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'stages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['gig_registry.Stage']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'O'", 'max_length': '1'}),
'status_notes': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'venue_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
}
}
complete_apps = ['gig_registry'] | shaunokeefe/gigs | gigs/gig_registry/migrations/0013_auto__add_gigtype__add_field_gig_gig_type.py | Python | bsd-3-clause | 9,596 |
#!/usr/bin/env python
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import dab
class qa_complex_to_interleaved_float_vcf(gr_unittest.TestCase):
"""
@brief QA for the complex to interleaved float block
This class implements a test bench to verify the corresponding C++ class.
"""
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001_complex_to_interleaved_float_vcf(self):
src_data = (1+2j,3+4j,5+6j,7+8j)
expected_result = (1,3,2,4,5,7,6,8)
src = blocks.vector_source_c(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_gr_complex, 2)
complex_to_interleaved_float_vcf = dab.complex_to_interleaved_float_vcf(2)
v2s = blocks.vector_to_stream(gr.sizeof_float, 4)
dst = blocks.vector_sink_f()
self.tb.connect(src, s2v, complex_to_interleaved_float_vcf, v2s, dst)
self.tb.run()
result_data = dst.data()
# print result_data
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 6)
if __name__ == '__main__':
gr_unittest.main()
| kit-cel/gr-dab | python/qa/qa_complex_to_interleaved_float_vcf.py | Python | gpl-3.0 | 1,038 |
__copyright__ = """
Copyright (C) 2005, Catalin Marinas <catalin.marinas@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see http://www.gnu.org/licenses/.
"""
import sys, os
from pydoc import pager
from stgit.argparse import opt
from stgit.commands.common import *
from stgit.utils import *
from stgit.out import *
from stgit import argparse, stack, git
from stgit.lib import git as gitlib
help = 'Show the tree diff'
kind = 'wc'
usage = ['[options] [--] [<files or dirs>]']
description = """
Show the diff (default) or diffstat between the current working copy
or a tree-ish object and another tree-ish object (defaulting to HEAD).
File names can also be given to restrict the diff output. The
tree-ish object has the format accepted by the linkstg:id[] command."""
args = [argparse.known_files, argparse.dirty_files]
options = [
opt('-r', '--range', metavar = 'rev1[..[rev2]]', dest = 'revs',
args = [argparse.patch_range(argparse.applied_patches,
argparse.unapplied_patches,
argparse.hidden_patches)],
short = 'Show the diff between revisions'),
opt('-s', '--stat', action = 'store_true',
short = 'Show the stat instead of the diff'),
] + argparse.diff_opts_option()
directory = DirectoryHasRepository(log = False)
def func(parser, options, args):
"""Show the tree diff
"""
args = git.ls_files(args)
directory.cd_to_topdir()
if options.revs:
rev_list = options.revs.split('..')
rev_list_len = len(rev_list)
if rev_list_len == 1:
rev1 = rev_list[0]
rev2 = None
elif rev_list_len == 2:
rev1 = rev_list[0]
rev2 = rev_list[1]
else:
parser.error('incorrect parameters to -r')
else:
rev1 = 'HEAD'
rev2 = None
if not options.stat:
options.diff_flags.extend(color_diff_flags())
diff_str = git.diff(args, rev1 and git_id(crt_series, rev1),
rev2 and git_id(crt_series, rev2),
diff_flags = options.diff_flags)
if options.stat:
out.stdout_raw(gitlib.diffstat(diff_str) + '\n')
else:
if diff_str:
pager(diff_str)
| vincele/stgit | stgit/commands/diff.py | Python | gpl-2.0 | 2,738 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#modulo para traducir datos
import zdb
class ZT:
def __init__(self,idioma):
self.idioma=idioma
def t(self,oracion):
pass
def t2(self,oracion,contexto):
pass
def agregar_exp(self,idioma_t,idioma_t2,exp1,exp2):
pass
def agregar_p(self,idioma_t,idioma_t2,p1,p2):
pass
def crear_i(self,idioma):
pass
| ZerpaTechnology/AsenZor | modulos/ztec/zt.py | Python | lgpl-3.0 | 385 |
from functools import wraps
from tornado.options import options
import test.base
import models
def twittertest(fn):
# This would be a "skip" if unittest v1 supported skipping.
@wraps(fn)
def test(self):
if options.twitter_consumer_key:
return fn(self)
return test
class ToolsFindPeopleTests(test.base.BaseAsyncTestCase):
def setUp(self):
super(ToolsFindPeopleTests, self).setUp()
self.user = models.User(name='admin', email='admin@mltshp.com', email_confirmed=1)
self.user.set_password('pass')
self.user.save()
def test_tools_find_shakes(self):
"""
/tools/find-people should be accessible.
"""
response = self.fetch('/tools/find-shakes')
self.assertEqual(200, response.code)
response = self.fetch('/tools/find-shakes/people')
self.assertEqual(200, response.code)
response = self.fetch('/tools/find-shakes/twitter')
self.assertEqual(200, response.code)
@twittertest
def test_tools_find_people_from_twitter_errors(self):
"""
/tools/find-people-from-twitter should return an
error message if no external service connected.
"""
self.sign_in('admin', 'pass')
response = self.fetch_url('/tools/find-shakes/quick-fetch-twitter')
self.assertTrue(response.body.find('to find your Twitter friends') > 0)
@twittertest
def test_tools_find_people_from_twitter_returns_friends_when_populated(self):
"""
/tools/find-shakes/quick-fetch-twitter should return an
a list of users if we already have a friend graph for a user populated.
"""
self.sign_in('admin', 'pass')
self.user_service = models.Externalservice(user_id=self.user.id, service_id=1000, screen_name='torrez',
type=models.Externalservice.TWITTER, service_key='asdf', service_secret='qwer')
self.user_service.save()
user2 = models.User(name='user2', email='user2@mltshp.com', email_confirmed=1)
user2.save()
user2_service = models.Externalservice(user_id=user2.id, service_id=2000, screen_name='user2',
type=models.Externalservice.TWITTER, service_key='asdf', service_secret='qwer')
user2_service.save()
models.ExternalRelationship.add_relationship(self.user, 2000, models.Externalservice.TWITTER)
response = self.fetch_url('/tools/find-shakes/quick-fetch-twitter')
self.assertTrue(response.body.find('user2') > 1)
self.assertEqual(200, response.code)
| spaceninja/mltshp | test/functional/tools_find_people_tests.py | Python | mpl-2.0 | 2,623 |
#!/usr/bin/python
# Copyright 2008-2010 WebDriver committers
# Copyright 2008-2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import tempfile
import time
import shutil
import unittest
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchFrameException
class ElementAttributeTests(unittest.TestCase):
def testShouldReturnNullWhenGettingTheValueOfAnAttributeThatIsNotListed(self):
self._loadSimplePage()
head = self.driver.find_element_by_xpath("/html")
attribute = head.get_attribute("cheese")
self.assertTrue(attribute is None)
def testShouldReturnEmptyAttributeValuesWhenPresentAndTheValueIsActuallyEmpty(self):
self._loadSimplePage()
body = self.driver.find_element_by_xpath("//body")
self.assertEqual("", body.get_attribute("style"))
def testShouldReturnTheValueOfTheDisabledAttributeAsFalseIfNotSet(self):
self._loadPage("formPage")
inputElement = self.driver.find_element_by_xpath("//input[@id='working']")
self.assertEqual("false", inputElement.get_attribute("disabled"))
self.assertTrue(inputElement.is_enabled())
pElement = self.driver.find_element_by_id("peas")
self.assertEqual("false", inputElement.get_attribute("disabled"))
self.assertTrue(inputElement.is_enabled())
def testShouldReturnTheValueOfTheIndexAttrbuteEvenIfItIsMissing(self):
self._loadPage("formPage")
multiSelect = self.driver.find_element_by_id("multi")
options = multiSelect.find_elements_by_tag_name("option")
self.assertEqual("1", options[1].get_attribute("index"))
def testShouldIndicateTheElementsThatAreDisabledAreNotis_enabled(self):
self._loadPage("formPage")
inputElement = self.driver.find_element_by_xpath("//input[@id='notWorking']")
self.assertFalse(inputElement.is_enabled())
inputElement = self.driver.find_element_by_xpath("//input[@id='working']")
self.assertTrue(inputElement.is_enabled())
def testElementsShouldBeDisabledIfTheyAreDisabledUsingRandomDisabledStrings(self):
self._loadPage("formPage")
disabledTextElement1 = self.driver.find_element_by_id("disabledTextElement1")
self.assertFalse(disabledTextElement1.is_enabled())
disabledTextElement2 = self.driver.find_element_by_id("disabledTextElement2")
self.assertFalse(disabledTextElement2.is_enabled())
disabledSubmitElement = self.driver.find_element_by_id("disabledSubmitElement")
self.assertFalse(disabledSubmitElement.is_enabled())
def testShouldIndicateWhenATextAreaIsDisabled(self):
self._loadPage("formPage")
textArea = self.driver.find_element_by_xpath("//textarea[@id='notWorkingArea']")
self.assertFalse(textArea.is_enabled())
def testShouldThrowExceptionIfSendingKeysToElementDisabledUsingRandomDisabledStrings(self):
self._loadPage("formPage")
disabledTextElement1 = self.driver.find_element_by_id("disabledTextElement1")
try:
disabledTextElement1.send_keys("foo")
self.fail("Should have thrown exception")
except:
pass
self.assertEqual("", disabledTextElement1.text)
disabledTextElement2 = self.driver.find_element_by_id("disabledTextElement2")
try:
disabledTextElement2.send_keys("bar")
self.fail("Should have thrown exception")
except:
pass
self.assertEqual("", disabledTextElement2.text)
def testShouldIndicateWhenASelectIsDisabled(self):
self._loadPage("formPage")
enabled = self.driver.find_element_by_name("selectomatic")
disabled = self.driver.find_element_by_name("no-select")
self.assertTrue(enabled.is_enabled())
self.assertFalse(disabled.is_enabled())
def testShouldReturnTheValueOfCheckedForACheckboxEvenIfItLacksThatAttribute(self):
self._loadPage("formPage")
checkbox = self.driver.find_element_by_xpath("//input[@id='checky']")
self.assertTrue(checkbox.get_attribute("checked") is None)
checkbox.click()
self.assertEqual("true", checkbox.get_attribute("checked"))
def testShouldReturnTheValueOfSelectedForRadioButtonsEvenIfTheyLackThatAttribute(self):
self._loadPage("formPage")
neverSelected = self.driver.find_element_by_id("cheese")
initiallyNotSelected = self.driver.find_element_by_id("peas")
initiallySelected = self.driver.find_element_by_id("cheese_and_peas")
self.assertTrue(neverSelected.get_attribute("selected") is None, "false")
self.assertTrue(initiallyNotSelected.get_attribute("selected") is None, "false")
self.assertEqual("true", initiallySelected.get_attribute("selected"), "true")
initiallyNotSelected.click()
self.assertTrue(neverSelected.get_attribute("selected") is None)
self.assertEqual("true", initiallyNotSelected.get_attribute("selected"))
self.assertTrue(initiallySelected.get_attribute("selected") is None)
def testShouldReturnTheValueOfSelectedForOptionsInSelectsEvenIfTheyLackThatAttribute(self):
self._loadPage("formPage")
selectBox = self.driver.find_element_by_xpath("//select[@name='selectomatic']")
options = selectBox.find_elements_by_tag_name("option")
one = options[0]
two = options[1]
self.assertTrue(one.is_selected())
self.assertFalse(two.is_selected())
self.assertEqual("true", one.get_attribute("selected"))
self.assertTrue(two.get_attribute("selected") is None)
def testShouldReturnValueOfClassAttributeOfAnElement(self):
self._loadPage("xhtmlTest")
heading = self.driver.find_element_by_xpath("//h1")
classname = heading.get_attribute("class")
self.assertEqual("header", classname)
# Disabled due to issues with Frames
#def testShouldReturnValueOfClassAttributeOfAnElementAfterSwitchingIFrame(self):
# self._loadPage("iframes")
# self.driver.switch_to_frame("iframe1")
#
# wallace = self.driver.find_element_by_xpath("//div[@id='wallace']")
# classname = wallace.get_attribute("class")
# self.assertEqual("gromit", classname)
def testShouldReturnTheContentsOfATextAreaAsItsValue(self):
self._loadPage("formPage")
value = self.driver.find_element_by_id("withText").get_attribute("value")
self.assertEqual("Example text", value)
def testShouldTreatReadonlyAsAValue(self):
self._loadPage("formPage")
element = self.driver.find_element_by_name("readonly")
readOnlyAttribute = element.get_attribute("readonly")
textInput = self.driver.find_element_by_name("x")
notReadOnly = textInput.get_attribute("readonly")
self.assertNotEqual(readOnlyAttribute, notReadOnly)
def testShouldGetNumericAtribute(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id("withText")
self.assertEqual("5", element.get_attribute("rows"))
def testCanReturnATextApproximationOfTheStyleAttribute(self):
self._loadPage("javascriptPage")
style = self.driver.find_element_by_id("red-item").get_attribute("style")
self.assertTrue("background-color" in style.lower())
def tesShouldGetUnicodeCharsFromAttribute(self):
self._loadPage("formPage")
title = self.driver.find_element_by_id("vsearchGadget").get_attribute("title")
self.assertEqual(u"Hvad s\xf8ger du?", title)
def _pageURL(self, name):
return "http://localhost:%d/%s.html" % (self.webserver.port, name)
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| leighpauls/k2cro4 | third_party/webdriver/pylib/test/selenium/webdriver/common/element_attribute_tests.py | Python | bsd-3-clause | 8,580 |
from __future__ import print_function
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import sys
def plot_image_patches(x, ksize_rows=299, ksize_cols=299):
nr = x.shape[1]
nc = x.shape[2]
# figsize: width and height in inches. can be changed to make
#+output figure fit well.
#fig = plt.figure(figsize=(nr, nc))
fig = plt.figure()
gs = gridspec.GridSpec(nr, nc)
gs.update(wspace=0.01, hspace=0.01)
for i in range(nr):
for j in range(nc):
ax = plt.subplot(gs[i*nc+j])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('auto')
plt.imshow(x[0,i,j,].reshape(ksize_rows, ksize_cols, 3))
return fig
def plot_image_patches2(image_patches, sess, ksize_rows=299, ksize_cols=299):
#x = sess.run(image_patches)
#nr = x.shape[1]
#nc = x.shape[2]
#del x
a = sess.run(tf.shape(image_patches))
nr, nc = a[1], a[2]
print('width: {}; height: {}'.format(nr, nc), file=sys.stderr)
# figsize: width and height in inches. can be changed to make
#+output figure fit well. The default often works well.
#fig = plt.figure(figsize=(nr, nc))
fig = plt.figure()
gs = gridspec.GridSpec(nr, nc)
gs.update(wspace=0.01, hspace=0.01)
for i in range(nr):
for j in range(nc):
ax = plt.subplot(gs[i*nc+j])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('auto')
patch = tf.reshape(image_patches[0,i,j,], [ksize_rows, ksize_cols, 3])
#patch = tf.image.random_brightness(patch, 0.3)
#patch = tf.image.random_contrast(patch, 0.1, 0.9)
#patch = tf.image.random_saturation(patch, 0.1, 0.9)
#patch = tf.image.random_hue(patch, 0.4)
#patch = tf.image.random_flip_up_down(patch, 0.4)
plt.imshow(sess.run(patch))
print('processed {},{} patch, {}.'.format(i,j, i*nc+j),file=sys.stderr)
return fig
image_file = '3.png'
image_string = tf.gfile.FastGFile(image_file).read()
ksize_rows = 299
ksize_cols = 299
# strides_rows and strides_cols determine the distance between
#+ the centers of two consecutive patches.
strides_rows = 299 # 128
strides_cols = 299 # 128
sess = tf.InteractiveSession()
image = tf.image.decode_image(image_string, channels=3)
# The size of sliding window
ksizes = [1, ksize_rows, ksize_cols, 1]
# How far the centers of 2 consecutive patches are in the image
strides = [1, strides_rows, strides_cols, 1]
rates = [1, 1, 1, 1] # sample pixel consecutively
padding='VALID' # or 'SAME'
image = tf.expand_dims(image, 0)
image_patches = tf.extract_image_patches(image, ksizes, strides, rates, padding)
# Method 1:
#x=sess.run(image_patches)
#print(x.shape, file=sys.stderr)
#fig = plot_image_patches(x)
# Method 2:
fig = plot_image_patches2(image_patches, sess)
#plt.savefig('image_patches.png', bbox_inches='tight',dpi=300) # use dpi to control image size, e.g. 800
plt.savefig('image_patches.png', bbox_inches='tight',dpi=120) # use dpi to control image size, e.g. 800
plt.close(fig)
sess.close()
| lixiangchun/mynotebook | machine_learning/extract_image_patches.py | Python | gpl-3.0 | 3,071 |
from flask.sessions import SessionInterface, SessionMixin
import os, binascii
class SessionData(dict, SessionMixin): pass
class Session(SessionInterface):
session_class = SessionData
def open_session(self, app, request):
self.cookie_session_id = request.cookies.get(app.session_cookie_name, None)
self.session_new = False
if self.cookie_session_id is None:
self.cookie_session_id = binascii.hexlify(os.urandom(40)).decode('ascii')
self.session_new = True
self.memcache_session_id = '@'.join(
[
request.remote_addr,
self.cookie_session_id
]
)
app.logger.debug('Open session %s', self.memcache_session_id)
session = app.cache.get(self.memcache_session_id) or {}
app.cache.set(self.memcache_session_id, session)
return self.session_class(session)
def save_session(self, app, session, response):
expires = self.get_expiration_time(app, session)
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
httponly = self.get_cookie_httponly(app)
secure = self.get_cookie_secure(app)
app.cache.set(self.memcache_session_id, session)
if self.session_new:
response.set_cookie(app.session_cookie_name, self.cookie_session_id, path=path,
expires=expires, httponly=httponly,
secure=secure, domain=domain)
app.logger.debug('Set session %s with %s', self.memcache_session_id, session)
| isucon/isucon3 | qualifier/webapp/python/flask_memcache_session/session.py | Python | mit | 1,635 |
##############################################################################
#
# Copyright (C) 2009 - 2011 EduSense BV (<http://www.edusense.nl>)
# and Therp BV (<http://therp.nl>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Triodos (NL) Bank Statements Import',
'version': '0.92',
'license': 'AGPL-3',
'author': ['Therp BV', 'EduSense BV'],
'website': 'https://launchpad.net/account-banking',
'category': 'Account Banking',
'depends': ['account_banking'],
'description': '''
Module to import Dutch Triodos bank format transation files (CSV format).
As the Triodos bank does not provide detailed specification concerning possible
values and their meaning for the fields in the CSV file format, the statements
are parsed according to an educated guess based on incomplete information.
You can contact the account-banking developers through their launchpad page and
help improve the performance of this import filter on
https://launchpad.net/account-banking.
Note that imported bank transfers are organized in statements covering periods
of one week, even if the imported files cover a different period.
This modules contains no logic, just an import filter for account_banking.
''',
'installable': True,
}
| rschnapka/bank-payment | account_banking_nl_triodos/__openerp__.py | Python | agpl-3.0 | 2,042 |
from gi.repository import GLib
import sys
import softwarecenter.plugin
class ExamplePlugin(softwarecenter.plugin.Plugin):
""" example plugin that will hide the exhibits banner """
def _try_to_hide_banner(self):
if not self.app.available_pane.view_initialized:
# wait for the pane to fully initialize
return True
self.app.available_pane.cat_view.vbox.get_children()[0].hide()
return False
def init_plugin(self):
sys.stderr.write("init_plugin\n")
GLib.timeout_add(100, self._try_to_hide_banner)
| sti-lyneos/shop | doc/example_plugin.py | Python | lgpl-3.0 | 576 |
from astrodata.adutils.testutil import eq_, AstroData, sci123, scivardq123
def test7():
"""
ASTRODATA-insert TEST 7: AUTO NUMBER, Incremnt XVER & latter indx if XNAME exists
"""
ad1 = AstroData(sci123)
ad4 = AstroData(scivardq123)
print "\n >>>>>>> AD <<<<<<<<"
ad1.info()
print "\n >>>>>>> AD insert <<<<<<<<"
adsci = ad4['SCI', 1]
print("adsci = ad4['SCI', 1]")
ad1.insert(index=1, header=adsci.header, data=adsci.data, auto_number=True)
print "ad1.insert(index=1, header=adsci.header, data=adsci.data, auto_number=True)"
print "\n >>>>>>> MODIFIED AD <<<<<<<<"
ad1.info()
eq_(ad1[1].hdulist[1].name, "SCI")
eq_(ad1[1].extver(), 4)
| pyrrho314/recipesystem | trunk/astrodata/scripts/test/testinsert/test7/testinsert_test7.py | Python | mpl-2.0 | 753 |
# Copyright 2016 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from trove.common import cfg
from trove.common.i18n import _
from trove.common import utils
CONF = cfg.CONF
"""
The classes below are generic and can be used for any datastore, but will not
provide validation. To add a new datastore create a sub-package (see mysql for
example) and create new child classes inheriting from these generic classes.
As a guideline, for new datastores the following class methods/variables should
be overridden if validation is desired (see their docstrings for additional
info):
DatastoreModelsBase:
__init__
DatastoreSchema:
_max_schema_name_length
_is_valid_schema_name
verify_dict
_create_checks
_delete_checks
DatastoreUser:
_is_valid_user_name
_is_valid_host_name
_is_valid_password
_is_valid_database
verify_dict
_create_checks
_delete_checks
"""
class DatastoreModelsBase(object):
"""Base model for the datastore schema and user models."""
def serialize(self):
return self.__dict__
def _deserialize(self, obj):
self.__dict__ = obj
def __repr__(self):
return str(self.serialize())
@classmethod
def deserialize(cls, value, verify=True):
item = cls(deserializing=True)
item._deserialize(value)
if verify:
item.verify_dict()
return item
@abc.abstractmethod
def verify_dict(self):
"""Validate the object's data dictionary.
:returns: True if dictionary is valid.
"""
@staticmethod
def check_string(value, desc):
"""Check if the value is a string/unicode.
:param value: Value to check.
:param desc: Description for exception message.
:raises: ValueError if not a string/unicode.
"""
if not isinstance(value, str):
raise ValueError(_("%(desc)s is not a string. Type = %(t)s.")
% {'desc': desc, 't': type(value)})
class DatastoreSchema(DatastoreModelsBase):
"""Represents a database schema."""
def __init__(self, name=None, deserializing=False):
self._name = None
self._collate = None
self._character_set = None
# If both or neither are passed in this is a bug.
if bool(deserializing) == bool(name):
raise RuntimeError(_("Bug in DatastoreSchema()"))
if not deserializing:
self.name = name
def __str__(self):
return str(self.name)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._validate_schema_name(value)
self._name = value
def _validate_schema_name(self, value):
"""Perform checks on a given schema name.
:param value: Validated schema name.
:type value: string
:raises: ValueError On validation errors.
"""
if not value:
raise ValueError(_("Schema name empty."))
self.check_string(value, 'Schema name')
if self._max_schema_name_length and (len(value) >
self._max_schema_name_length):
raise ValueError(_("Schema name '%(name)s' is too long. "
"Max length = %(max_length)d.")
% {'name': value,
'max_length': self._max_schema_name_length})
elif not self._is_valid_schema_name(value):
raise ValueError(_("'%s' is not a valid schema name.") % value)
@property
def _max_schema_name_length(self):
"""Return the maximum valid schema name length if any.
:returns: Maximum schema name length or None if unlimited.
"""
return None
def _is_valid_schema_name(self, value):
"""Validate a given schema name.
:param value: Validated schema name.
:type value: string
:returns: TRUE if valid, FALSE otherwise.
"""
return True
def verify_dict(self):
"""Check that the object's dictionary values are valid by reloading
them via the property setters. The checkers should raise the
ValueError exception if invalid. All mandatory fields should be
checked.
"""
self.name = self._name
@property
def ignored_dbs(self):
return cfg.get_ignored_dbs()
def is_ignored(self):
return self.name in self.ignored_dbs
def check_reserved(self):
"""Check if the name is on the ignore_dbs list, meaning it is
reserved.
:raises: ValueError if name is on the reserved list.
"""
if self.is_ignored():
raise ValueError(_('Database name "%(name)s" is on the reserved '
'list: %(reserved)s.')
% {'name': self.name,
'reserved': self.ignored_dbs})
def _create_checks(self):
"""Checks to be performed before database can be created."""
self.check_reserved()
def check_create(self):
"""Check if the database can be created.
:raises: ValueError if the schema is not valid for create.
"""
try:
self._create_checks()
except ValueError as e:
raise ValueError(_('Cannot create database: %(error)s')
% {'error': str(e)})
def _delete_checks(self):
"""Checks to be performed before database can be deleted."""
self.check_reserved()
def check_delete(self):
"""Check if the database can be deleted.
:raises: ValueError if the schema is not valid for delete.
"""
try:
self._delete_checks()
except ValueError as e:
raise ValueError(_('Cannot delete database: %(error)s')
% {'error': str(e)})
class DatastoreUser(DatastoreModelsBase):
"""Represents a datastore user."""
_HOSTNAME_WILDCARD = '%'
root_username = 'root'
def __init__(self, name=None, password=None, host=None, databases=None,
deserializing=False):
self._name = None
self._password = None
self._host = self._HOSTNAME_WILDCARD
self._databases = []
self._is_root = False
if not deserializing:
self.name = name
if password:
self.password = password
if host:
self.host = host
if databases:
self.databases = databases
@classmethod
def root(cls, name=None, password=None, *args, **kwargs):
if not name:
name = cls.root_username
if not password:
password = utils.generate_random_password()
user = cls(name, password, *args, **kwargs)
user.make_root()
return user
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._validate_user_name(value)
self._name = value
@property
def password(self):
return self._password
@password.setter
def password(self, value):
self.check_string(value, "User password")
if self._is_valid_password(value):
self._password = value
else:
raise ValueError(_("'%s' is not a valid password.") % value)
def _add_database(self, value):
serial_db = self._build_database_schema(value).serialize()
if self._is_valid_database(serial_db):
self._databases.append(serial_db)
@property
def databases(self):
return self._databases
@databases.setter
def databases(self, value):
if isinstance(value, list):
for dbname in value:
self._add_database(dbname)
else:
self._add_database(value)
@property
def host(self):
if self._host is None:
return self._HOSTNAME_WILDCARD
return self._host
@host.setter
def host(self, value):
self.check_string(value, "User host name")
if self._is_valid_host_name(value):
self._host = value
else:
raise ValueError(_("'%s' is not a valid hostname.") % value)
def _build_database_schema(self, name):
"""Build a schema for this user.
:type name: string
"""
return self.schema_model(name)
def deserialize_schema(self, value):
"""Deserialize a user's databases value.
:type value: dict
"""
return self.schema_model.deserialize(value)
def _validate_user_name(self, value):
"""Perform validations on a given user name.
:param value: Validated user name.
:type value: string
:raises: ValueError On validation errors.
"""
if not value:
raise ValueError(_("User name empty."))
self.check_string(value, "User name")
if self._max_user_name_length and (len(value) >
self._max_user_name_length):
raise ValueError(_("User name '%(name)s' is too long. "
"Max length = %(max_length)d.")
% {'name': value,
'max_length': self._max_user_name_length})
elif not self._is_valid_user_name(value):
raise ValueError(_("'%s' is not a valid user name.") % value)
@property
def _max_user_name_length(self):
"""Return the maximum valid user name length if any.
:returns: Maximum user name length or None if unlimited.
"""
return None
def _is_valid_user_name(self, value):
"""Validate a given user name.
:param value: User name to be validated.
:type value: string
:returns: TRUE if valid, FALSE otherwise.
"""
return True
def _is_valid_host_name(self, value):
"""Validate a given host name.
:param value: Host name to be validated.
:type value: string
:returns: TRUE if valid, FALSE otherwise.
"""
return True
def _is_valid_password(self, value):
"""Validate a given password.
:param value: Password to be validated.
:type value: string
:returns: TRUE if valid, FALSE otherwise.
"""
return True
def _is_valid_database(self, value):
"""Validate a given database (serialized schema object).
:param value: The database to be validated.
:type value: dict
:returns: TRUE if valid, FALSE otherwise.
:raises: ValueError if operation not allowed.
"""
return value not in self.databases
def verify_dict(self):
"""Check that the object's dictionary values are valid by reloading
them via the property setters. The checkers should raise the
ValueError exception if invalid. All mandatory fields should be
checked.
"""
self.name = self._name
if self.__dict__.get('_password'):
self.password = self._password
else:
self._password = None
if self.__dict__.get('_host'):
self.host = self._host
else:
self._host = self._HOSTNAME_WILDCARD
if self.__dict__.get('_databases'):
for database in self._databases:
# Create the schema for validation only
self.deserialize_schema(database)
else:
self._databases = []
if not self.__dict__.get('_is_root'):
self._is_root = False
@property
def schema_model(self):
return DatastoreSchema
@property
def ignored_users(self):
if self._is_root:
return []
return cfg.get_ignored_users()
@property
def is_ignored(self):
return self.name in self.ignored_users
def make_root(self):
self._is_root = True
def check_reserved(self):
"""Check if the name is on the ignore_users list, meaning it is
reserved.
:raises: ValueError if name is on the reserved list.
"""
if self.is_ignored:
raise ValueError(_('User name "%(name)s" is on the reserved '
'list: %(reserved)s.')
% {'name': self.name,
'reserved': self.ignored_users})
def _create_checks(self):
"""Checks to be performed before user can be created."""
self.check_reserved()
def check_create(self):
"""Check if the user can be created.
:raises: ValueError if the user is not valid for create.
"""
try:
self._create_checks()
except ValueError as e:
raise ValueError(_('Cannot create user: %(error)s')
% {'error': str(e)})
def _delete_checks(self):
"""Checks to be performed before user can be created."""
self.check_reserved()
def check_delete(self):
"""Check if the user can be deleted.
:raises: ValueError if the user is not valid for delete.
"""
try:
self._delete_checks()
except ValueError as e:
raise ValueError(_('Cannot delete user: %(error)s')
% {'error': str(e)})
| openstack/trove | trove/common/db/models.py | Python | apache-2.0 | 14,376 |
# -*- coding: UTF-8 -*-
# Copyright 2013-2015 Luc Saffre
# This file is part of Lino Welfare.
#
# Lino Welfare is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Lino Welfare is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Lino Welfare. If not, see
# <http://www.gnu.org/licenses/>.
"""Adds functionality for managing *external* courses.
.. autosummary::
:toctree:
models
roles
fixtures
"""
from lino.api import ad, _
class Plugin(ad.Plugin):
verbose_name = _("Courses")
short_name = _("Courses")
def setup_main_menu(self, site, user_type, m):
if True: # user_type.courses_level:
m = m.add_menu(self.app_label, self.verbose_name)
m.add_action('xcourses.CourseProviders')
m.add_action('xcourses.CourseOffers')
m.add_action('xcourses.PendingCourseRequests')
def setup_config_menu(self, site, user_type, m):
m = m.add_menu(self.app_label, self.verbose_name)
m.add_action('xcourses.CourseContents')
def setup_explorer_menu(self, site, user_type, m):
m = m.add_menu(self.app_label, self.verbose_name)
m.add_action('xcourses.Courses')
m.add_action('xcourses.CourseRequests')
| khchine5/lino-welfare | lino_welfare/modlib/xcourses/__init__.py | Python | agpl-3.0 | 1,678 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import model_utils.fields
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('contributions', '0008_auto_20150219_1555'),
('promotions', '0005_auto_20150302_1323'),
]
operations = [
migrations.CreateModel(
name='GeneralDonation',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('created', model_utils.fields.AutoCreatedField(verbose_name='created', editable=False, default=django.utils.timezone.now)),
('modified', model_utils.fields.AutoLastModifiedField(verbose_name='modified', editable=False, default=django.utils.timezone.now)),
('pledge', models.ForeignKey(verbose_name='Pledge', to='contributions.Pledge', related_name='+')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| mayapurmedia/tovp | tovp/promotions/migrations/0006_generaldonation.py | Python | mit | 1,102 |
import python_building
import python_control
import time
# Scale the clock
def scale_time(seconds):
# 1 second real time = 1 hours simulated time
# return 30.0*seconds/3600.0
# Run in real time
return seconds
# This is a cludge. It should return the same value
# as the control period from the control object.
PERIOD = scale_time(30*60)
class MPC:
def __init__(self):
# Setup the actuator and control modules
self.bldg = python_building.Building()
self.cntrl = python_control.Control(self.bldg.get_num_zones())
self.cntrl.set_max_units(self.bldg.get_num_zones()/2)
def set_outdoor_temp(self,degF):
self.bldg.set_outdoor_temp(degF)
def get_outdoor_temp(self):
return self.bldg.get_outdoor_temp()
def run_control(self,simHrs):
self.bldg.advance(simHrs)
for zone in range(0,self.bldg.get_num_zones()):
self.cntrl.set_upper_limit(zone,self.bldg.get_high_temp_limit(zone))
self.cntrl.set_lower_limit(zone,self.bldg.get_low_temp_limit(zone))
self.cntrl.set_zone_temp(zone,self.bldg.get_indoor_temp(zone))
self.cntrl.set_outside_temp(self.bldg.get_outdoor_temp())
self.cntrl.run_control()
for zone in range(0,self.bldg.get_num_zones()):
self.bldg.set_hvac_mode(zone,self.cntrl.get_hvac_command(zone))
def cleanup(self):
self.bldg.cleanup()
self.cntrl.cleanup()
| afisher1/volttron-applications | contrib/MpcAgent/src/MPC.py | Python | bsd-3-clause | 1,310 |
# -*- coding: utf-8 -*-
"""
babel.core
~~~~~~~~~~
Core locale representation and locale data access.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import os
from babel import localedata
from babel._compat import pickle, string_types
from babel.plural import PluralRule
__all__ = ['UnknownLocaleError', 'Locale', 'default_locale', 'negotiate_locale',
'parse_locale']
_global_data = None
_default_plural_rule = PluralRule({})
def _raise_no_data_error():
raise RuntimeError('The babel data files are not available. '
'This usually happens because you are using '
'a source checkout from Babel and you did '
'not build the data files. Just make sure '
'to run "python setup.py import_cldr" before '
'installing the library.')
def get_global(key):
"""Return the dictionary for the given key in the global data.
The global data is stored in the ``babel/global.dat`` file and contains
information independent of individual locales.
>>> get_global('zone_aliases')['UTC']
u'Etc/GMT'
>>> get_global('zone_territories')['Europe/Berlin']
u'DE'
The keys available are:
- ``currency_fractions``
- ``language_aliases``
- ``likely_subtags``
- ``parent_exceptions``
- ``script_aliases``
- ``territory_aliases``
- ``territory_currencies``
- ``territory_languages``
- ``territory_zones``
- ``variant_aliases``
- ``win_mapping``
- ``zone_aliases``
- ``zone_territories``
.. note:: The internal structure of the data may change between versions.
.. versionadded:: 0.9
:param key: the data key
"""
global _global_data
if _global_data is None:
dirname = os.path.join(os.path.dirname(__file__))
filename = os.path.join(dirname, 'global.dat')
if not os.path.isfile(filename):
_raise_no_data_error()
fileobj = open(filename, 'rb')
try:
_global_data = pickle.load(fileobj)
finally:
fileobj.close()
return _global_data.get(key, {})
LOCALE_ALIASES = {
'ar': 'ar_SY', 'bg': 'bg_BG', 'bs': 'bs_BA', 'ca': 'ca_ES', 'cs': 'cs_CZ',
'da': 'da_DK', 'de': 'de_DE', 'el': 'el_GR', 'en': 'en_US', 'es': 'es_ES',
'et': 'et_EE', 'fa': 'fa_IR', 'fi': 'fi_FI', 'fr': 'fr_FR', 'gl': 'gl_ES',
'he': 'he_IL', 'hu': 'hu_HU', 'id': 'id_ID', 'is': 'is_IS', 'it': 'it_IT',
'ja': 'ja_JP', 'km': 'km_KH', 'ko': 'ko_KR', 'lt': 'lt_LT', 'lv': 'lv_LV',
'mk': 'mk_MK', 'nl': 'nl_NL', 'nn': 'nn_NO', 'no': 'nb_NO', 'pl': 'pl_PL',
'pt': 'pt_PT', 'ro': 'ro_RO', 'ru': 'ru_RU', 'sk': 'sk_SK', 'sl': 'sl_SI',
'sv': 'sv_SE', 'th': 'th_TH', 'tr': 'tr_TR', 'uk': 'uk_UA'
}
class UnknownLocaleError(Exception):
"""Exception thrown when a locale is requested for which no locale data
is available.
"""
def __init__(self, identifier):
"""Create the exception.
:param identifier: the identifier string of the unsupported locale
"""
Exception.__init__(self, 'unknown locale %r' % identifier)
#: The identifier of the locale that could not be found.
self.identifier = identifier
class Locale(object):
"""Representation of a specific locale.
>>> locale = Locale('en', 'US')
>>> repr(locale)
"Locale('en', territory='US')"
>>> locale.display_name
u'English (United States)'
A `Locale` object can also be instantiated from a raw locale string:
>>> locale = Locale.parse('en-US', sep='-')
>>> repr(locale)
"Locale('en', territory='US')"
`Locale` objects provide access to a collection of locale data, such as
territory and language names, number and date format patterns, and more:
>>> locale.number_symbols['decimal']
u'.'
If a locale is requested for which no locale data is available, an
`UnknownLocaleError` is raised:
>>> Locale.parse('en_XX')
Traceback (most recent call last):
...
UnknownLocaleError: unknown locale 'en_XX'
For more information see :rfc:`3066`.
"""
def __init__(self, language, territory=None, script=None, variant=None):
"""Initialize the locale object from the given identifier components.
>>> locale = Locale('en', 'US')
>>> locale.language
'en'
>>> locale.territory
'US'
:param language: the language code
:param territory: the territory (country or region) code
:param script: the script code
:param variant: the variant code
:raise `UnknownLocaleError`: if no locale data is available for the
requested locale
"""
#: the language code
self.language = language
#: the territory (country or region) code
self.territory = territory
#: the script code
self.script = script
#: the variant code
self.variant = variant
self.__data = None
identifier = str(self)
if not localedata.exists(identifier):
raise UnknownLocaleError(identifier)
@classmethod
def default(cls, category=None, aliases=LOCALE_ALIASES):
"""Return the system default locale for the specified category.
>>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LC_MESSAGES']:
... os.environ[name] = ''
>>> os.environ['LANG'] = 'fr_FR.UTF-8'
>>> Locale.default('LC_MESSAGES')
Locale('fr', territory='FR')
The following fallbacks to the variable are always considered:
- ``LANGUAGE``
- ``LC_ALL``
- ``LC_CTYPE``
- ``LANG``
:param category: one of the ``LC_XXX`` environment variable names
:param aliases: a dictionary of aliases for locale identifiers
"""
# XXX: use likely subtag expansion here instead of the
# aliases dictionary.
locale_string = default_locale(category, aliases=aliases)
return cls.parse(locale_string)
@classmethod
def negotiate(cls, preferred, available, sep='_', aliases=LOCALE_ALIASES):
"""Find the best match between available and requested locale strings.
>>> Locale.negotiate(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
Locale('de', territory='DE')
>>> Locale.negotiate(['de_DE', 'en_US'], ['en', 'de'])
Locale('de')
>>> Locale.negotiate(['de_DE', 'de'], ['en_US'])
You can specify the character used in the locale identifiers to separate
the differnet components. This separator is applied to both lists. Also,
case is ignored in the comparison:
>>> Locale.negotiate(['de-DE', 'de'], ['en-us', 'de-de'], sep='-')
Locale('de', territory='DE')
:param preferred: the list of locale identifers preferred by the user
:param available: the list of locale identifiers available
:param aliases: a dictionary of aliases for locale identifiers
"""
identifier = negotiate_locale(preferred, available, sep=sep,
aliases=aliases)
if identifier:
return Locale.parse(identifier, sep=sep)
@classmethod
def parse(cls, identifier, sep='_', resolve_likely_subtags=True):
"""Create a `Locale` instance for the given locale identifier.
>>> l = Locale.parse('de-DE', sep='-')
>>> l.display_name
u'Deutsch (Deutschland)'
If the `identifier` parameter is not a string, but actually a `Locale`
object, that object is returned:
>>> Locale.parse(l)
Locale('de', territory='DE')
This also can perform resolving of likely subtags which it does
by default. This is for instance useful to figure out the most
likely locale for a territory you can use ``'und'`` as the
language tag:
>>> Locale.parse('und_AT')
Locale('de', territory='AT')
:param identifier: the locale identifier string
:param sep: optional component separator
:param resolve_likely_subtags: if this is specified then a locale will
have its likely subtag resolved if the
locale otherwise does not exist. For
instance ``zh_TW`` by itself is not a
locale that exists but Babel can
automatically expand it to the full
form of ``zh_hant_TW``. Note that this
expansion is only taking place if no
locale exists otherwise. For instance
there is a locale ``en`` that can exist
by itself.
:raise `ValueError`: if the string does not appear to be a valid locale
identifier
:raise `UnknownLocaleError`: if no locale data is available for the
requested locale
"""
if identifier is None:
return None
elif isinstance(identifier, Locale):
return identifier
elif not isinstance(identifier, string_types):
raise TypeError('Unxpected value for identifier: %r' % (identifier,))
parts = parse_locale(identifier, sep=sep)
input_id = get_locale_identifier(parts)
def _try_load(parts):
try:
return cls(*parts)
except UnknownLocaleError:
return None
def _try_load_reducing(parts):
# Success on first hit, return it.
locale = _try_load(parts)
if locale is not None:
return locale
# Now try without script and variant
locale = _try_load(parts[:2])
if locale is not None:
return locale
locale = _try_load(parts)
if locale is not None:
return locale
if not resolve_likely_subtags:
raise UnknownLocaleError(input_id)
# From here onwards is some very bad likely subtag resolving. This
# whole logic is not entirely correct but good enough (tm) for the
# time being. This has been added so that zh_TW does not cause
# errors for people when they upgrade. Later we should properly
# implement ICU like fuzzy locale objects and provide a way to
# maximize and minimize locale tags.
language, territory, script, variant = parts
language = get_global('language_aliases').get(language, language)
territory = get_global('territory_aliases').get(territory, (territory,))[0]
script = get_global('script_aliases').get(script, script)
variant = get_global('variant_aliases').get(variant, variant)
if territory == 'ZZ':
territory = None
if script == 'Zzzz':
script = None
parts = language, territory, script, variant
# First match: try the whole identifier
new_id = get_locale_identifier(parts)
likely_subtag = get_global('likely_subtags').get(new_id)
if likely_subtag is not None:
locale = _try_load_reducing(parse_locale(likely_subtag))
if locale is not None:
return locale
# If we did not find anything so far, try again with a
# simplified identifier that is just the language
likely_subtag = get_global('likely_subtags').get(language)
if likely_subtag is not None:
language2, _, script2, variant2 = parse_locale(likely_subtag)
locale = _try_load_reducing((language2, territory, script2, variant2))
if locale is not None:
return locale
raise UnknownLocaleError(input_id)
def __eq__(self, other):
for key in ('language', 'territory', 'script', 'variant'):
if not hasattr(other, key):
return False
return (self.language == other.language) and \
(self.territory == other.territory) and \
(self.script == other.script) and \
(self.variant == other.variant)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.language, self.territory, self.script, self.variant))
def __repr__(self):
parameters = ['']
for key in ('territory', 'script', 'variant'):
value = getattr(self, key)
if value is not None:
parameters.append('%s=%r' % (key, value))
parameter_string = '%r' % self.language + ', '.join(parameters)
return 'Locale(%s)' % parameter_string
def __str__(self):
return get_locale_identifier((self.language, self.territory,
self.script, self.variant))
@property
def _data(self):
if self.__data is None:
self.__data = localedata.LocaleDataDict(localedata.load(str(self)))
return self.__data
def get_display_name(self, locale=None):
"""Return the display name of the locale using the given locale.
The display name will include the language, territory, script, and
variant, if those are specified.
>>> Locale('zh', 'CN', script='Hans').get_display_name('en')
u'Chinese (Simplified, China)'
:param locale: the locale to use
"""
if locale is None:
locale = self
locale = Locale.parse(locale)
retval = locale.languages.get(self.language)
if self.territory or self.script or self.variant:
details = []
if self.script:
details.append(locale.scripts.get(self.script))
if self.territory:
details.append(locale.territories.get(self.territory))
if self.variant:
details.append(locale.variants.get(self.variant))
details = filter(None, details)
if details:
retval += ' (%s)' % u', '.join(details)
return retval
display_name = property(get_display_name, doc="""\
The localized display name of the locale.
>>> Locale('en').display_name
u'English'
>>> Locale('en', 'US').display_name
u'English (United States)'
>>> Locale('sv').display_name
u'svenska'
:type: `unicode`
""")
def get_language_name(self, locale=None):
"""Return the language of this locale in the given locale.
>>> Locale('zh', 'CN', script='Hans').get_language_name('de')
u'Chinesisch'
.. versionadded:: 1.0
:param locale: the locale to use
"""
if locale is None:
locale = self
locale = Locale.parse(locale)
return locale.languages.get(self.language)
language_name = property(get_language_name, doc="""\
The localized language name of the locale.
>>> Locale('en', 'US').language_name
u'English'
""")
def get_territory_name(self, locale=None):
"""Return the territory name in the given locale."""
if locale is None:
locale = self
locale = Locale.parse(locale)
return locale.territories.get(self.territory)
territory_name = property(get_territory_name, doc="""\
The localized territory name of the locale if available.
>>> Locale('de', 'DE').territory_name
u'Deutschland'
""")
def get_script_name(self, locale=None):
"""Return the script name in the given locale."""
if locale is None:
locale = self
locale = Locale.parse(locale)
return locale.scripts.get(self.script)
script_name = property(get_script_name, doc="""\
The localized script name of the locale if available.
>>> Locale('sr', 'ME', script='Latn').script_name
u'latinica'
""")
@property
def english_name(self):
"""The english display name of the locale.
>>> Locale('de').english_name
u'German'
>>> Locale('de', 'DE').english_name
u'German (Germany)'
:type: `unicode`"""
return self.get_display_name(Locale('en'))
#{ General Locale Display Names
@property
def languages(self):
"""Mapping of language codes to translated language names.
>>> Locale('de', 'DE').languages['ja']
u'Japanisch'
See `ISO 639 <http://www.loc.gov/standards/iso639-2/>`_ for
more information.
"""
return self._data['languages']
@property
def scripts(self):
"""Mapping of script codes to translated script names.
>>> Locale('en', 'US').scripts['Hira']
u'Hiragana'
See `ISO 15924 <http://www.evertype.com/standards/iso15924/>`_
for more information.
"""
return self._data['scripts']
@property
def territories(self):
"""Mapping of script codes to translated script names.
>>> Locale('es', 'CO').territories['DE']
u'Alemania'
See `ISO 3166 <http://www.iso.org/iso/en/prods-services/iso3166ma/>`_
for more information.
"""
return self._data['territories']
@property
def variants(self):
"""Mapping of script codes to translated script names.
>>> Locale('de', 'DE').variants['1901']
u'Alte deutsche Rechtschreibung'
"""
return self._data['variants']
#{ Number Formatting
@property
def currencies(self):
"""Mapping of currency codes to translated currency names. This
only returns the generic form of the currency name, not the count
specific one. If an actual number is requested use the
:func:`babel.numbers.get_currency_name` function.
>>> Locale('en').currencies['COP']
u'Colombian Peso'
>>> Locale('de', 'DE').currencies['COP']
u'Kolumbianischer Peso'
"""
return self._data['currency_names']
@property
def currency_symbols(self):
"""Mapping of currency codes to symbols.
>>> Locale('en', 'US').currency_symbols['USD']
u'$'
>>> Locale('es', 'CO').currency_symbols['USD']
u'US$'
"""
return self._data['currency_symbols']
@property
def number_symbols(self):
"""Symbols used in number formatting.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('fr', 'FR').number_symbols['decimal']
u','
"""
return self._data['number_symbols']
@property
def decimal_formats(self):
"""Locale patterns for decimal number formatting.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').decimal_formats[None]
<NumberPattern u'#,##0.###'>
"""
return self._data['decimal_formats']
@property
def currency_formats(self):
"""Locale patterns for currency number formatting.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').currency_formats['standard']
<NumberPattern u'\\xa4#,##0.00'>
>>> Locale('en', 'US').currency_formats['accounting']
<NumberPattern u'\\xa4#,##0.00'>
"""
return self._data['currency_formats']
@property
def percent_formats(self):
"""Locale patterns for percent number formatting.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').percent_formats[None]
<NumberPattern u'#,##0%'>
"""
return self._data['percent_formats']
@property
def scientific_formats(self):
"""Locale patterns for scientific number formatting.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').scientific_formats[None]
<NumberPattern u'#E0'>
"""
return self._data['scientific_formats']
#{ Calendar Information and Date Formatting
@property
def periods(self):
"""Locale display names for day periods (AM/PM).
>>> Locale('en', 'US').periods['am']
u'AM'
"""
return self._data['periods']
@property
def days(self):
"""Locale display names for weekdays.
>>> Locale('de', 'DE').days['format']['wide'][3]
u'Donnerstag'
"""
return self._data['days']
@property
def months(self):
"""Locale display names for months.
>>> Locale('de', 'DE').months['format']['wide'][10]
u'Oktober'
"""
return self._data['months']
@property
def quarters(self):
"""Locale display names for quarters.
>>> Locale('de', 'DE').quarters['format']['wide'][1]
u'1. Quartal'
"""
return self._data['quarters']
@property
def eras(self):
"""Locale display names for eras.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').eras['wide'][1]
u'Anno Domini'
>>> Locale('en', 'US').eras['abbreviated'][0]
u'BC'
"""
return self._data['eras']
@property
def time_zones(self):
"""Locale display names for time zones.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').time_zones['Europe/London']['long']['daylight']
u'British Summer Time'
>>> Locale('en', 'US').time_zones['America/St_Johns']['city']
u'St. John\u2019s'
"""
return self._data['time_zones']
@property
def meta_zones(self):
"""Locale display names for meta time zones.
Meta time zones are basically groups of different Olson time zones that
have the same GMT offset and daylight savings time.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').meta_zones['Europe_Central']['long']['daylight']
u'Central European Summer Time'
.. versionadded:: 0.9
"""
return self._data['meta_zones']
@property
def zone_formats(self):
"""Patterns related to the formatting of time zones.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').zone_formats['fallback']
u'%(1)s (%(0)s)'
>>> Locale('pt', 'BR').zone_formats['region']
u'Hor\\xe1rio %s'
.. versionadded:: 0.9
"""
return self._data['zone_formats']
@property
def first_week_day(self):
"""The first day of a week, with 0 being Monday.
>>> Locale('de', 'DE').first_week_day
0
>>> Locale('en', 'US').first_week_day
6
"""
return self._data['week_data']['first_day']
@property
def weekend_start(self):
"""The day the weekend starts, with 0 being Monday.
>>> Locale('de', 'DE').weekend_start
5
"""
return self._data['week_data']['weekend_start']
@property
def weekend_end(self):
"""The day the weekend ends, with 0 being Monday.
>>> Locale('de', 'DE').weekend_end
6
"""
return self._data['week_data']['weekend_end']
@property
def min_week_days(self):
"""The minimum number of days in a week so that the week is counted as
the first week of a year or month.
>>> Locale('de', 'DE').min_week_days
4
"""
return self._data['week_data']['min_days']
@property
def date_formats(self):
"""Locale patterns for date formatting.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').date_formats['short']
<DateTimePattern u'M/d/yy'>
>>> Locale('fr', 'FR').date_formats['long']
<DateTimePattern u'd MMMM y'>
"""
return self._data['date_formats']
@property
def time_formats(self):
"""Locale patterns for time formatting.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').time_formats['short']
<DateTimePattern u'h:mm a'>
>>> Locale('fr', 'FR').time_formats['long']
<DateTimePattern u'HH:mm:ss z'>
"""
return self._data['time_formats']
@property
def datetime_formats(self):
"""Locale patterns for datetime formatting.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en').datetime_formats['full']
u"{1} 'at' {0}"
>>> Locale('th').datetime_formats['medium']
u'{1} {0}'
"""
return self._data['datetime_formats']
@property
def datetime_skeletons(self):
"""Locale patterns for formatting parts of a datetime.
>>> Locale('en').datetime_skeletons['MEd']
<DateTimePattern u'E, M/d'>
>>> Locale('fr').datetime_skeletons['MEd']
<DateTimePattern u'E dd/MM'>
>>> Locale('fr').datetime_skeletons['H']
<DateTimePattern u"HH 'h'">
"""
return self._data['datetime_skeletons']
@property
def interval_formats(self):
"""Locale patterns for interval formatting.
.. note:: The format of the value returned may change between
Babel versions.
How to format date intervals in Finnish when the day is the
smallest changing component:
>>> Locale('fi_FI').interval_formats['MEd']['d']
[u'E d. \u2013 ', u'E d.M.']
.. seealso::
The primary API to use this data is :py:func:`babel.dates.format_interval`.
:rtype: dict[str, dict[str, list[str]]]
"""
return self._data['interval_formats']
@property
def plural_form(self):
"""Plural rules for the locale.
>>> Locale('en').plural_form(1)
'one'
>>> Locale('en').plural_form(0)
'other'
>>> Locale('fr').plural_form(0)
'one'
>>> Locale('ru').plural_form(100)
'many'
"""
return self._data.get('plural_form', _default_plural_rule)
@property
def list_patterns(self):
"""Patterns for generating lists
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en').list_patterns['start']
u'{0}, {1}'
>>> Locale('en').list_patterns['end']
u'{0}, and {1}'
>>> Locale('en_GB').list_patterns['end']
u'{0} and {1}'
"""
return self._data['list_patterns']
@property
def ordinal_form(self):
"""Plural rules for the locale.
>>> Locale('en').ordinal_form(1)
'one'
>>> Locale('en').ordinal_form(2)
'two'
>>> Locale('en').ordinal_form(3)
'few'
>>> Locale('fr').ordinal_form(2)
'other'
>>> Locale('ru').ordinal_form(100)
'other'
"""
return self._data.get('ordinal_form', _default_plural_rule)
def default_locale(category=None, aliases=LOCALE_ALIASES):
"""Returns the system default locale for a given category, based on
environment variables.
>>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE']:
... os.environ[name] = ''
>>> os.environ['LANG'] = 'fr_FR.UTF-8'
>>> default_locale('LC_MESSAGES')
'fr_FR'
The "C" or "POSIX" pseudo-locales are treated as aliases for the
"en_US_POSIX" locale:
>>> os.environ['LC_MESSAGES'] = 'POSIX'
>>> default_locale('LC_MESSAGES')
'en_US_POSIX'
The following fallbacks to the variable are always considered:
- ``LANGUAGE``
- ``LC_ALL``
- ``LC_CTYPE``
- ``LANG``
:param category: one of the ``LC_XXX`` environment variable names
:param aliases: a dictionary of aliases for locale identifiers
"""
varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG')
for name in filter(None, varnames):
locale = os.getenv(name)
if locale:
if name == 'LANGUAGE' and ':' in locale:
# the LANGUAGE variable may contain a colon-separated list of
# language codes; we just pick the language on the list
locale = locale.split(':')[0]
if locale.split('.')[0] in ('C', 'POSIX'):
locale = 'en_US_POSIX'
elif aliases and locale in aliases:
locale = aliases[locale]
try:
return get_locale_identifier(parse_locale(locale))
except ValueError:
pass
def negotiate_locale(preferred, available, sep='_', aliases=LOCALE_ALIASES):
"""Find the best match between available and requested locale strings.
>>> negotiate_locale(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
'de_DE'
>>> negotiate_locale(['de_DE', 'en_US'], ['en', 'de'])
'de'
Case is ignored by the algorithm, the result uses the case of the preferred
locale identifier:
>>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at'])
'de_DE'
>>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at'])
'de_DE'
By default, some web browsers unfortunately do not include the territory
in the locale identifier for many locales, and some don't even allow the
user to easily add the territory. So while you may prefer using qualified
locale identifiers in your web-application, they would not normally match
the language-only locale sent by such browsers. To workaround that, this
function uses a default mapping of commonly used langauge-only locale
identifiers to identifiers including the territory:
>>> negotiate_locale(['ja', 'en_US'], ['ja_JP', 'en_US'])
'ja_JP'
Some browsers even use an incorrect or outdated language code, such as "no"
for Norwegian, where the correct locale identifier would actually be "nb_NO"
(Bokmål) or "nn_NO" (Nynorsk). The aliases are intended to take care of
such cases, too:
>>> negotiate_locale(['no', 'sv'], ['nb_NO', 'sv_SE'])
'nb_NO'
You can override this default mapping by passing a different `aliases`
dictionary to this function, or you can bypass the behavior althogher by
setting the `aliases` parameter to `None`.
:param preferred: the list of locale strings preferred by the user
:param available: the list of locale strings available
:param sep: character that separates the different parts of the locale
strings
:param aliases: a dictionary of aliases for locale identifiers
"""
available = [a.lower() for a in available if a]
for locale in preferred:
ll = locale.lower()
if ll in available:
return locale
if aliases:
alias = aliases.get(ll)
if alias:
alias = alias.replace('_', sep)
if alias.lower() in available:
return alias
parts = locale.split(sep)
if len(parts) > 1 and parts[0].lower() in available:
return parts[0]
return None
def parse_locale(identifier, sep='_'):
"""Parse a locale identifier into a tuple of the form ``(language,
territory, script, variant)``.
>>> parse_locale('zh_CN')
('zh', 'CN', None, None)
>>> parse_locale('zh_Hans_CN')
('zh', 'CN', 'Hans', None)
The default component separator is "_", but a different separator can be
specified using the `sep` parameter:
>>> parse_locale('zh-CN', sep='-')
('zh', 'CN', None, None)
If the identifier cannot be parsed into a locale, a `ValueError` exception
is raised:
>>> parse_locale('not_a_LOCALE_String')
Traceback (most recent call last):
...
ValueError: 'not_a_LOCALE_String' is not a valid locale identifier
Encoding information and locale modifiers are removed from the identifier:
>>> parse_locale('it_IT@euro')
('it', 'IT', None, None)
>>> parse_locale('en_US.UTF-8')
('en', 'US', None, None)
>>> parse_locale('de_DE.iso885915@euro')
('de', 'DE', None, None)
See :rfc:`4646` for more information.
:param identifier: the locale identifier string
:param sep: character that separates the different components of the locale
identifier
:raise `ValueError`: if the string does not appear to be a valid locale
identifier
"""
if '.' in identifier:
# this is probably the charset/encoding, which we don't care about
identifier = identifier.split('.', 1)[0]
if '@' in identifier:
# this is a locale modifier such as @euro, which we don't care about
# either
identifier = identifier.split('@', 1)[0]
parts = identifier.split(sep)
lang = parts.pop(0).lower()
if not lang.isalpha():
raise ValueError('expected only letters, got %r' % lang)
script = territory = variant = None
if parts:
if len(parts[0]) == 4 and parts[0].isalpha():
script = parts.pop(0).title()
if parts:
if len(parts[0]) == 2 and parts[0].isalpha():
territory = parts.pop(0).upper()
elif len(parts[0]) == 3 and parts[0].isdigit():
territory = parts.pop(0)
if parts:
if len(parts[0]) == 4 and parts[0][0].isdigit() or \
len(parts[0]) >= 5 and parts[0][0].isalpha():
variant = parts.pop()
if parts:
raise ValueError('%r is not a valid locale identifier' % identifier)
return lang, territory, script, variant
def get_locale_identifier(tup, sep='_'):
"""The reverse of :func:`parse_locale`. It creates a locale identifier out
of a ``(language, territory, script, variant)`` tuple. Items can be set to
``None`` and trailing ``None``\s can also be left out of the tuple.
>>> get_locale_identifier(('de', 'DE', None, '1999'))
'de_DE_1999'
.. versionadded:: 1.0
:param tup: the tuple as returned by :func:`parse_locale`.
:param sep: the separator for the identifier.
"""
tup = tuple(tup[:4])
lang, territory, script, variant = tup + (None,) * (4 - len(tup))
return sep.join(filter(None, (lang, script, territory, variant)))
| srisankethu/babel | babel/core.py | Python | bsd-3-clause | 35,002 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
class BuildOptions:
"""Options for the build."""
class Target:
exe = "exe"
shared_python_library = "shared_python_library"
def __init__(self):
# Folders to add to lib search
self.lib_paths = None
# Folders, within project, to add to include path
self.include_folders = None
# External includes, to be added as system includes (if
# possible)
self.system_include_folders = None
# Cpp-files to compile
self.source_files = None
# Source folders to search
self.source_folders = None
# gcc, clang or msvc
self.compiler = None
# msw or linux
self.platform = None
# Path to nsis-executable for building installers on Windows.
self.makensis_exe = None
# Folder for compiled object files (release).
self.obj_root_release = None
# Folder for compiled object files (debug).
self.obj_root_debug = None
# Name for the built application, when release (without extension)
self.out_name_release = None
# Name for the built application, when debug (without extension)
self.out_name_debug = None
# Root folder for the project
self.project_root = None
self.parallell_compiles = None
self.extra_resource_root = None
self.wx_root = None
self.test_dir = None
self.check_deps = True
self.create_build_info = True
self.extra_objs = []
self.forced_include = None
self.msw_subsystem = None
self.__setattr__ = self.setattr
self.optional = ["makensis_exe",
"extra_resource_root",
"test_dir",
"forced_include",
"msw_subsystem"]
self.debug_compile = None
# Time various build steps?
self.timed = False
self.target_type = self.Target.exe
def get_obj_root(self):
assert(self.debug_compile is not None)
root = (self.obj_root_debug if self.debug_compile
else self.obj_root_release)
assert(root is not None)
return root
def get_out_name(self):
assert(self.debug_compile is not None)
return (self.out_name_debug if self.debug_compile
else self.out_name_release)
def get_out_path(self):
# Gets the linker out path
out_path = os.path.join(self.project_root, self.get_out_name())
out_path += self.link_extension()
return out_path
def set_debug_compile(self, debug_compile):
self.debug_compile = debug_compile
def setattr(self, name, value):
assert(name in self.__dict__)
self.__dict__[name] = value
def verify(self):
vars = [item for item in self.__dict__ if not item.startswith("_")]
for var in vars:
if (self.__dict__[var] is None and var not in self.optional):
print('%s not initialized.' % var)
exit(1)
if self.msw_subsystem is None and self.platform == "msw":
print("msw_subsystem not initialized.")
exit(1)
if self.wx_root is None and self.platform == "linux":
print("wx_root not initialized.")
exit(1)
def link_extension(self):
"""File extension for the linked target"""
T = self.Target
if self.target_type == T.exe:
if self.platform == 'msw':
return ".exe"
else:
return ""
elif self.target_type == T.shared_python_library:
if self.platform == 'msw':
return ".pyd"
else:
raise NotImplementedError("Shared library not implemented for %s"
% self.platform)
| lukas-ke/faint-graphics-editor | build-sys/build_sys/opts.py | Python | apache-2.0 | 4,671 |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 28 08:34:11 2018
@author: trieu,butenko
"""
import numpy as np
d = {
'refinement_frequency': [0],
'num_ref_freqs': 1,
'rel_div_CSF': -1,
'Adaptive_frac_div': 1.0,
'Min_Scaling': 1.0,
'CSF_ref_reg': 0.0,
'rel_div': 10.0,
'rel_div_current': 1.0,
}
| andreashorn/lead_dbs | ext_libs/OSS-DBS/OSS_platform/GUI_tree_files/pop_up_control/dictionaries/dict_mesh_refinement.py | Python | gpl-3.0 | 331 |
from utils.donations.tests import *
| jumoconnect/openjumo | jumodjango/utils/tests/__init__.py | Python | mit | 37 |
from __future__ import annotations
from abc import ABCMeta, abstractmethod
import enum
from typing import (
Any,
Mapping,
Union,
)
from . import AbstractPlugin, BasePluginContext
__all__ = (
'AbstractStatReporterPlugin',
'AbstractErrorReporterPlugin',
'StatsPluginContext',
'ErrorPluginContext',
'INCREMENT',
'GAUGE',
)
class StatMetricTypes(enum.Enum):
INCREMENT = 0
GAUGE = 1
INCREMENT = StatMetricTypes.INCREMENT
GAUGE = StatMetricTypes.GAUGE
class AbstractStatReporterPlugin(AbstractPlugin, metaclass=ABCMeta):
async def init(self, context: Any = None) -> None:
pass
async def cleanup(self) -> None:
pass
@abstractmethod
async def report_metric(
self,
metric_type: StatMetricTypes,
metric_name: str,
value: Union[float, int] = None,
) -> None:
pass
class AbstractErrorReporterPlugin(AbstractPlugin, metaclass=ABCMeta):
async def init(self, context: Any = None) -> None:
pass
async def cleanup(self) -> None:
pass
@abstractmethod
async def capture_exception(
self,
exc_instance: Exception = None,
context: Mapping[str, Any] = None,
) -> None:
pass
@abstractmethod
async def capture_message(self, message: str) -> None:
pass
class StatsPluginContext(BasePluginContext[AbstractStatReporterPlugin]):
plugin_group = 'backendai_stats_monitor_v20'
async def report_metric(
self,
metric_type: StatMetricTypes,
metric_name: str,
value: Union[float, int] = None,
) -> None:
for plugin_instance in self.plugins.values():
await plugin_instance.report_metric(metric_type, metric_name, value)
class ErrorPluginContext(BasePluginContext[AbstractErrorReporterPlugin]):
plugin_group = 'backendai_error_monitor_v20'
async def capture_exception(
self,
exc_instance: Exception = None,
context: Mapping[str, Any] = None,
) -> None:
for plugin_instance in self.plugins.values():
await plugin_instance.capture_exception(exc_instance, context)
async def capture_message(self, message: str) -> None:
for plugin_instance in self.plugins.values():
await plugin_instance.capture_message(message)
| lablup/sorna-common | src/ai/backend/common/plugin/monitor.py | Python | lgpl-3.0 | 2,344 |
# -*- coding: utf-8 -*-
"""UnitTests for cactidbadapter."""
import unittest
from cactidbadapter import CactiDBAdapter
class UnitTests(unittest.TestCase):
"""Class UnitTests.
Unit test for cactidbadapter.
"""
def setUp(self):
"""Setup."""
self.obj = CactiDBAdapter(user='root',
password='',
host='localhost',
port=3306)
def test_attrs(self):
"""test cactidbadapter."""
# check default values
obj = CactiDBAdapter()
self.assertEqual(obj.database, 'cacti')
self.assertEqual(obj.user, 'root')
self.assertEqual(obj.password, '')
self.assertEqual(obj.host, 'localhost')
self.assertEqual(obj.port, 3306)
self.assertEqual(obj.charset, 'utf8mb4')
# check specified values
obj = CactiDBAdapter(user='admin',
password='password',
host='localhost',
database='aaaaa',
port=12345)
self.assertEqual(obj.database, 'aaaaa')
self.assertEqual(obj.user, 'admin')
self.assertEqual(obj.password, 'password')
self.assertEqual(obj.host, 'localhost')
self.assertEqual(obj.port, 12345)
self.assertEqual(obj.charset, 'utf8mb4')
def test_get_host(self):
"""Get host from cacti db."""
hostname = '127.0.0.1'
hosts = self.obj.get_host()
self.assertEqual(len(hosts), 3)
self.assertEqual(hosts[0]['hostname'], hostname)
hosts = self.obj.get_host(condition='hostname = "%s"' % hostname)
self.assertEqual(hosts[0]['hostname'], hostname)
def test_host_columns(self):
"""Check column values."""
vals = self.obj.host_columns()
self.assertEqual(type(vals), list)
def test_host_snmp_cache_columns(self):
"""Check column values."""
vals = self.obj.host_snmp_cache_columns()
self.assertEqual(type(vals), list)
def test_host_snmp_cache_field_names(self):
"""Check field_name values."""
vals = self.obj.host_snmp_cache_field_names()
self.assertEqual(type(vals), list)
def test_get_snmp_cache(self):
"""Get fetched snmp values from cacti db."""
condition = 'field_name = "ifindex"'
vals = self.obj.get_snmp_cache(condition=condition)
for val in vals:
if val['field_value'] == '1':
self.assertEqual(val['description'], 'Localhost')
self.assertEqual(val['hostname'], '127.0.0.1')
self.assertEqual(val['field_name'], 'ifIndex')
self.assertEqual(val['field_value'], '1')
condition = 'field_name = "ifIP"'
vals = self.obj.get_snmp_cache(condition=condition)
for val in vals:
if val['field_value'] == '10.0.2.15':
self.assertEqual(val['description'], 'Localhost')
self.assertEqual(val['hostname'], '127.0.0.1')
self.assertEqual(val['field_name'], 'ifIP')
self.assertEqual(val['field_value'], '10.0.2.15')
condition = 'field_name = "ifIP" or field_name = "ifName"'
vals = self.obj.get_snmp_cache(condition=condition)
for val in vals:
if val['field_value'] == '10.0.2.15':
self.assertEqual(val['description'], 'Localhost')
self.assertEqual(val['hostname'], '127.0.0.1')
self.assertEqual(val['field_name'], 'ifIP')
self.assertEqual(val['field_value'], '10.0.2.15')
elif val['field_value'] == 'lo':
self.assertEqual(val['description'], 'Localhost')
self.assertEqual(val['hostname'], '127.0.0.1')
self.assertEqual(val['field_name'], 'ifName')
self.assertEqual(val['field_value'], 'lo')
# condition
hostname = '127.0.0.1'
condition = ('field_name = "ifIP"'
' or field_name = "ifName"'
' and hostname = "%s"' % hostname)
vals = self.obj.get_snmp_cache(condition=condition)
for val in vals:
self.assertEqual(val['hostname'], hostname)
# limit check
condition = 'field_name = "ifIP"'
vals = self.obj.get_snmp_cache(condition=condition, limit=1)
self.assertEqual(len(vals), 1)
vals = self.obj.get_snmp_cache(condition=condition, limit=2)
self.assertEqual(len(vals), 2)
def test_get_ifip(self):
"""Get fetched snmp ifIP values from cacti db."""
vals = self.obj.get_ifip()
for val in vals:
if val['field_value'] == '127.0.0.1':
self.assertEqual(val['id'], 1)
self.assertEqual(val['hostname'], '127.0.0.1')
self.assertEqual(val['description'], 'Localhost')
self.assertEqual(val['field_name'], 'ifIP')
self.assertEqual(val['oid'],
'.1.3.6.1.2.1.4.20.1.2.127.0.0.1')
if val['field_value'] == '10.0.2.15':
self.assertEqual(val['id'], 1)
self.assertEqual(val['hostname'], '127.0.0.1')
self.assertEqual(val['description'], 'Localhost')
self.assertEqual(val['field_name'], 'ifIP')
self.assertEqual(val['oid'],
'.1.3.6.1.2.1.4.20.1.2.10.0.2.15')
if val['field_value'] == '192.168.56.2':
self.assertEqual(val['id'], 1)
self.assertEqual(val['hostname'], '127.0.0.1')
self.assertEqual(val['description'], 'Localhost')
self.assertEqual(val['field_name'], 'ifIP')
self.assertEqual(val['oid'],
'.1.3.6.1.2.1.4.20.1.2.192.168.56.2')
def test_get_sysdescr(self):
"""Get fetched snmp sysDescr values from cacti db."""
vals = self.obj.get_sysdescr()
for val in vals:
self.assertEqual(val['oid'], '.1.3.6.1.2.1.1.1.0')
def test_get_sysobjectid(self):
"""Get fetched snmp sysObjectID values from cacti db."""
vals = self.obj.get_sysobjectid()
for val in vals:
self.assertEqual(val['oid'], '.1.3.6.1.2.1.1.2.0')
def test_get_sysuptime(self):
"""Get fetched snmp sysUpTime values from cacti db."""
vals = self.obj.get_sysuptime()
for val in vals:
self.assertEqual(val['oid'], '.1.3.6.1.2.1.1.3.0')
def test_get_syscontact(self):
"""Get fetched snmp sysContact values from cacti db."""
vals = self.obj.get_syscontact()
for val in vals:
self.assertEqual(val['oid'], '.1.3.6.1.2.1.1.4.0')
def test_get_sysname(self):
"""Get fetched snmp sysName values from cacti db."""
vals = self.obj.get_sysname()
for val in vals:
self.assertEqual(val['oid'], '.1.3.6.1.2.1.1.5.0')
def test_get_syslocation(self):
"""Get fetched snmp sysLocation values from cacti db."""
vals = self.obj.get_syslocation()
for val in vals:
self.assertEqual(val['oid'], '.1.3.6.1.2.1.1.6.0')
def test_get_sysservices(self):
"""Get fetched snmp sysServices values from cacti db."""
vals = self.obj.get_sysservices()
for val in vals:
self.assertEqual(val['oid'], '.1.3.6.1.2.1.1.7.0')
| mtoshi/cactidbadapter | tests/test_cactidbadapter.py | Python | mit | 7,545 |
# Copyright (c) 2013 Martin Abente Lahaye. - tch@sugarlabs.org
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
import pycurl
import urllib
from gi.repository import GObject
from setting import Setting
class Object(GObject.GObject):
__gsignals__ = {
'transfer-completed': (GObject.SignalFlags.RUN_FIRST, None, ([object])),
'transfer-progress': (GObject.SignalFlags.RUN_FIRST, None,
([float, float, str])),
'transfer-failed': (GObject.SignalFlags.RUN_FIRST, None, ([str])),
'transfer-started': (GObject.SignalFlags.RUN_FIRST, None, ([]))}
def _generate_header(self):
return ['X-Sugar-Buddy: %s' % Setting.get_buddy_credential()]
def _update_cb(self, down_total, down_done, up_total, up_done, states):
if 2 in states:
return
total = up_total
done = up_done
mode = 'upload'
if 1 in states:
total = down_total
done = down_done
mode = 'download'
if total == 0:
return
if 0 not in states:
self.emit('transfer-started')
states.append(0)
self.emit('transfer-progress', total, done, mode)
state = states[-1]
if total == done and state in states and len(states) == state + 1:
states.append(state + 1)
def request(self, method, url, params=None, file=None):
c = pycurl.Curl()
if method == 'POST':
c.setopt(c.POST, 1)
if file is not None:
params += [(file['field'], (c.FORM_FILE, file['path']))]
if params is not None:
c.setopt(c.HTTPPOST, params)
else:
c.setopt(c.POSTFIELDS, '')
elif method == 'GET':
c.setopt(c.HTTPGET, 1)
if params:
url += '?%s' % urllib.urlencode(params)
elif method == 'DELETE':
c.setopt(pycurl.CUSTOMREQUEST, 'DELETE')
else:
raise Exception('Object', 'Invalid method')
# XXX hack to trace transfer states
states = []
def pre_update_cb(*args):
args = list(args) + [states]
self._update_cb(*args)
#XXX hack to write multiple responses
buffer = []
def __write_cb(data):
buffer.append(data)
c.setopt(c.HTTPHEADER, self._generate_header())
c.setopt(pycurl.SSL_VERIFYPEER, 0)
c.setopt(pycurl.SSL_VERIFYHOST, 0)
c.setopt(c.URL, url)
c.setopt(c.NOPROGRESS, 0)
c.setopt(c.PROGRESSFUNCTION, pre_update_cb)
c.setopt(c.WRITEFUNCTION, __write_cb)
#c.setopt(c.VERBOSE, True)
try:
c.perform()
except pycurl.error, e:
self.emit('transfer-failed', str(e))
else:
code = c.getinfo(c.HTTP_CODE)
if code != 200:
self.emit('transfer-failed', 'HTTP code %s' % code)
finally:
self.emit('transfer-completed', ''.join(buffer))
c.close()
| tchx84/social-sugar | extensions/web/journalx/journalx/object.py | Python | gpl-2.0 | 3,760 |
#!/usr/bin/env python3
"""{PIPELINE_NAME} pipeline (version: {PIPELINE_VERSION}): creates
pipeline-specific config files to given output directory and runs the
pipeline (unless otherwise requested).
"""
# generic usage {PIPELINE_NAME} and {PIPELINE_VERSION} replaced while
# printing usage
#--- standard library imports
#
import sys
import os
import logging
#--- third-party imports
#
import yaml
#--- project specific imports
#
# add lib dir for this pipeline installation to PYTHONPATH
LIB_PATH = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "lib"))
if LIB_PATH not in sys.path:
sys.path.insert(0, LIB_PATH)
from readunits import get_samples_and_readunits_from_cfgfile
from readunits import get_readunits_from_args
from pipelines import get_pipeline_version
from pipelines import PipelineHandler
from pipelines import default_argparser
from pipelines import logger as aux_logger
from pipelines import get_cluster_cfgfile
import configargparse
__author__ = "Andreas Wilm"
__email__ = "wilma@gis.a-star.edu.sg"
__copyright__ = "2016 Genome Institute of Singapore"
__license__ = "The MIT License (MIT)"
# only dump() and following do not automatically create aliases
yaml.Dumper.ignore_aliases = lambda *args: True
PIPELINE_BASEDIR = os.path.dirname(sys.argv[0])
CFG_DIR = os.path.join(PIPELINE_BASEDIR, "cfg")
# same as folder name. also used for cluster job names
PIPELINE_NAME = "mutect"
# global logger
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
'[{asctime}] {levelname:8s} {filename} {message}', style='{'))
logger.addHandler(handler)
def main():
"""main function
"""
default_parser = default_argparser(CFG_DIR)
parser = configargparse.ArgumentParser(description=__doc__.format(
PIPELINE_NAME=PIPELINE_NAME, PIPELINE_VERSION=get_pipeline_version()),
parents=[default_parser])
parser._optionals.title = "Arguments"
# pipeline specific args
parser.add_argument("--normal-fq1", nargs="+",
help="Normal FastQ file/s (gzip only)."
" Multiple input files supported (auto-sorted)."
" Note: each file (or pair) gets a unique read-group id."
" Collides with --sample-cfg.")
parser.add_argument('--normal-fq2', nargs="+",
help="Normal FastQ file/s (if paired) (gzip only). See also --normal-fq1")
parser.add_argument("--tumor-fq1", nargs="+",
help="Tumor FastQ file/s (gzip only)."
" Multiple input files supported (auto-sorted)."
" Note: each file (or pair) gets a unique read-group id."
" Collides with --sample-cfg.")
parser.add_argument('--tumor-fq2', nargs="+",
help="Tumor FastQ file/s (if paired) (gzip only). See also --tumor-fq1")
parser.add_argument('-t', "--seqtype", required=True,
choices=['WGS', 'WES', 'targeted'],
help="Sequencing type")
parser.add_argument('-l', "--bed",
help="Bed file listing regions of interest."
" Required for WES and targeted sequencing.")
#parser.add_argument('-D', '--dont-mark-dups', action='store_true',
# help="Don't mark duplicate reads")
parser.add_argument('--normal-bam',
help="Advanced: Injects normal BAM (overwrites normal-fq options)."
" WARNING: reference and postprocessing need to match pipeline requirements")
parser.add_argument('--tumor-bam',
help="Advanced: Injects tumor BAM (overwrites tumor-fq options)."
" WARNING: reference and postprocessing need to match pipeline requirements")
default=0.02
parser.add_argument('--frac-cont', default=default, type=float,
help="Estimated level of contamination from a different individual (default = {})".format(default))
args = parser.parse_args()
# Repeateable -v and -q for setting logging level.
# See https://www.reddit.com/r/Python/comments/3nctlm/what_python_tools_should_i_be_using_on_every/
# and https://gist.github.com/andreas-wilm/b6031a84a33e652680d4
# script -vv -> DEBUG
# script -v -> INFO
# script -> WARNING
# script -q -> ERROR
# script -qq -> CRITICAL
# script -qqq -> no logging at all
logger.setLevel(logging.WARN + 10*args.quiet - 10*args.verbose)
aux_logger.setLevel(logging.WARN + 10*args.quiet - 10*args.verbose)
if os.path.exists(args.outdir):
logger.fatal("Output directory %s already exists", args.outdir)
sys.exit(1)
# samples is a dictionary with sample names as key (mostly just
# one) and readunit keys as value. readunits is a dict with
# readunits (think: fastq pairs with attributes) as value
if args.sample_cfg:
if any([args.normal_fq1, args.normal_fq2, args.tumor_fq1, args.tumor_fq2,
args.normal_bam, args.tumor_bam]):
logger.fatal("Config file overrides fastq and sample input arguments."
" Use one or the other")
sys.exit(1)
if not os.path.exists(args.sample_cfg):
logger.fatal("Config file %s does not exist", args.sample_cfg)
sys.exit(1)
samples, readunits = get_samples_and_readunits_from_cfgfile(args.sample_cfg)
else:
samples = dict()
if args.normal_bam:
normal_readunits = dict()
samples["normal"] = []
assert os.path.exists(args.normal_bam)
else:
if not all([args.normal_fq1, args.tumor_fq1]):
logger.fatal("Need at least fq1 and sample without config file")
sys.exit(1)
normal_readunits = get_readunits_from_args(args.normal_fq1, args.normal_fq2)
samples["normal"] = list(normal_readunits.keys())
if args.tumor_bam:
tumor_readunits = dict()
samples["tumor"] = []
assert os.path.exists(args.tumor_bam)
else:
tumor_readunits = get_readunits_from_args(args.tumor_fq1, args.tumor_fq2)
samples["tumor"] = list(tumor_readunits.keys())
readunits = dict(normal_readunits)
readunits.update(tumor_readunits)
assert sorted(samples) == sorted(["normal", "tumor"])
# FIXME how to check
#if not os.path.exists(reffa):
# logger.fatal("Reference '%s' doesn't exist", reffa)
# sys.exit(1)
#
#for p in ['bwa', 'samtools']:
# if not ref_is_indexed(reffa, p):
# logger.fatal("Reference '%s' doesn't appear to be indexed with %s", reffa, p)
# sys.exit(1)
if args.seqtype in ['WES', 'targeted']:
if not args.bed:
logger.fatal("Analysis of exome and targeted sequence runs requires a bed file")
sys.exit(1)
else:
if not os.path.exists(args.bed):
logger.fatal("Bed file %s does not exist", args.sample_cfg)
sys.exit(1)
# turn arguments into cfg_dict that gets merged into pipeline config
#
cfg_dict = dict()
cfg_dict['readunits'] = readunits
cfg_dict['samples'] = samples
cfg_dict['seqtype'] = args.seqtype
cfg_dict['frac_cont'] = args.frac_cont
cfg_dict['intervals'] = os.path.abspath(args.bed) if args.bed else None
# WARNING: this currently only works because these two are the only members in reference dict
# Should normally only write to root level
#cfg_dict['mark_dups'] = not args.dont_mark_dups
pipeline_handler = PipelineHandler(
PIPELINE_NAME, PIPELINE_BASEDIR,
args, cfg_dict,
cluster_cfgfile=get_cluster_cfgfile(CFG_DIR))
pipeline_handler.setup_env()
# inject existing BAM by symlinking (everything upstream is temporary anyway)
for sample, bam in [("normal", args.normal_bam),
("tumor", args.tumor_bam)]:
if bam:
# target as defined in Snakefile!
target = os.path.join(args.outdir, "out", sample,
"{}.bwamem.dedup.realn.recal.bam".format(sample))
os.makedirs(os.path.dirname(target))
os.symlink(os.path.abspath(bam), target)
src_bai = os.path.abspath(bam) + ".bai"
if os.path.exists(src_bai):
os.symlink(src_bai, target + ".bai")
pipeline_handler.submit(args.no_run)
if __name__ == "__main__":
main()
| gis-rpd/pipelines | somatic/mutect/mutect.py | Python | mit | 8,727 |
# Copyright (C) 2016 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes for evaluating uniform distributions.
"""
import numpy
from pycbc.distributions import bounded
class Uniform(bounded.BoundedDist):
"""
A uniform distribution on the given parameters. The parameters are
independent of each other. Instances of this class can be called like
a function. By default, logpdf will be called, but this can be changed
by setting the class's __call__ method to its pdf method.
Parameters
----------
\**params :
The keyword arguments should provide the names of parameters and their
corresponding bounds, as either tuples or a `boundaries.Bounds`
instance.
Attributes
----------
name : 'uniform'
The name of this distribution.
Attributes
----------
params : list of strings
The list of parameter names.
bounds : dict
A dictionary of the parameter names and their bounds.
norm : float
The normalization of the multi-dimensional pdf.
lognorm : float
The log of the normalization.
Examples
--------
Create a 2 dimensional uniform distribution:
>>> dist = prior.Uniform(mass1=(10.,50.), mass2=(10.,50.))
Get the log of the pdf at a particular value:
>>> dist.logpdf(mass1=25., mass2=10.)
-7.3777589082278725
Do the same by calling the distribution:
>>> dist(mass1=25., mass2=10.)
-7.3777589082278725
Generate some random values:
>>> dist.rvs(size=3)
array([(36.90885758394699, 51.294212757995254),
(39.109058546060346, 13.36220145743631),
(34.49594465315212, 47.531953033719454)],
dtype=[('mass1', '<f8'), ('mass2', '<f8')])
Initialize a uniform distribution using a boundaries.Bounds instance,
with cyclic bounds:
>>> dist = distributions.Uniform(phi=Bounds(10, 50, cyclic=True))
Apply boundary conditions to a value:
>>> dist.apply_boundary_conditions(phi=60.)
{'mass1': array(20.0)}
The boundary conditions are applied to the value before evaluating the pdf;
note that the following returns a non-zero pdf. If the bounds were not
cyclic, the following would return 0:
>>> dist.pdf(phi=60.)
0.025
"""
name = 'uniform'
def __init__(self, **params):
super(Uniform, self).__init__(**params)
# compute the norm and save
# temporarily suppress numpy divide by 0 warning
numpy.seterr(divide='ignore')
self._lognorm = -sum([numpy.log(abs(bnd[1]-bnd[0]))
for bnd in self._bounds.values()])
self._norm = numpy.exp(self._lognorm)
numpy.seterr(divide='warn')
@property
def norm(self):
return self._norm
@property
def lognorm(self):
return self._lognorm
def _cdfinv_param(self, param, value):
"""Return the inverse cdf to map the unit interval to parameter bounds.
"""
lower_bound = self._bounds[param][0]
upper_bound = self._bounds[param][1]
return (upper_bound - lower_bound) * value + lower_bound
def _pdf(self, **kwargs):
"""Returns the pdf at the given values. The keyword arguments must
contain all of parameters in self's params. Unrecognized arguments are
ignored.
"""
if kwargs in self:
return self._norm
else:
return 0.
def _logpdf(self, **kwargs):
"""Returns the log of the pdf at the given values. The keyword
arguments must contain all of parameters in self's params. Unrecognized
arguments are ignored.
"""
if kwargs in self:
return self._lognorm
else:
return -numpy.inf
def rvs(self, size=1, param=None):
"""Gives a set of random values drawn from this distribution.
Parameters
----------
size : {1, int}
The number of values to generate; default is 1.
param : {None, string}
If provided, will just return values for the given parameter.
Otherwise, returns random values for each parameter.
Returns
-------
structured array
The random values in a numpy structured array. If a param was
specified, the array will only have an element corresponding to the
given parameter. Otherwise, the array will have an element for each
parameter in self's params.
"""
if param is not None:
dtype = [(param, float)]
else:
dtype = [(p, float) for p in self.params]
arr = numpy.zeros(size, dtype=dtype)
for (p,_) in dtype:
arr[p] = numpy.random.uniform(self._bounds[p][0],
self._bounds[p][1],
size=size)
return arr
@classmethod
def from_config(cls, cp, section, variable_args):
"""Returns a distribution based on a configuration file. The parameters
for the distribution are retrieved from the section titled
"[`section`-`variable_args`]" in the config file.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the distribution
options.
section : str
Name of the section in the configuration file.
variable_args : str
The names of the parameters for this distribution, separated by
``VARARGS_DELIM``. These must appear in the "tag" part
of the section header.
Returns
-------
Uniform
A distribution instance from the pycbc.inference.prior module.
"""
return super(Uniform, cls).from_config(cp, section, variable_args,
bounds_required=True)
__all__ = ['Uniform']
| ahnitz/pycbc | pycbc/distributions/uniform.py | Python | gpl-3.0 | 6,688 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .create_job_parameters import CreateJobParameters
class CreateScopeJobParameters(CreateJobParameters):
"""The parameters used to submit a new Data Lake Analytics Scope job. (Only
for use internally with Scope job type.).
:param type: the job type of the current job (Hive, USql, or Scope (for
internal use only)). Possible values include: 'USql', 'Hive', 'Scope'
:type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType
:param properties: the job specific properties.
:type properties:
~azure.mgmt.datalake.analytics.job.models.CreateJobProperties
:param name: the friendly name of the job to submit.
:type name: str
:param degree_of_parallelism: the degree of parallelism to use for this
job. This must be greater than 0, if set to less than 0 it will default to
1. Default value: 1 .
:type degree_of_parallelism: int
:param priority: the priority value to use for the current job. Lower
numbers have a higher priority. By default, a job has a priority of 1000.
This must be greater than 0.
:type priority: int
:param log_file_patterns: the list of log file name patterns to find in
the logFolder. '*' is the only matching character allowed. Example format:
jobExecution*.log or *mylog*.txt
:type log_file_patterns: list[str]
:param related: the recurring job relationship information properties.
:type related:
~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties
:param tags: the key-value pairs used to add additional metadata to the
job information. (Only for use internally with Scope job type.)
:type tags: dict[str, str]
"""
_validation = {
'type': {'required': True},
'properties': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'JobType'},
'properties': {'key': 'properties', 'type': 'CreateJobProperties'},
'name': {'key': 'name', 'type': 'str'},
'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'},
'priority': {'key': 'priority', 'type': 'int'},
'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'},
'related': {'key': 'related', 'type': 'JobRelationshipProperties'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, type, properties, name, degree_of_parallelism=1, priority=None, log_file_patterns=None, related=None, tags=None):
super(CreateScopeJobParameters, self).__init__(type=type, properties=properties, name=name, degree_of_parallelism=degree_of_parallelism, priority=priority, log_file_patterns=log_file_patterns, related=related)
self.tags = tags
| lmazuel/azure-sdk-for-python | azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_parameters.py | Python | mit | 3,232 |
from django.test import TestCase
from dolphin.middleware import LocalStoreMiddleware
class RequestStoreMiddlewareTest(TestCase):
def test_middleware(self):
req = "Test fake request"
m = LocalStoreMiddleware()
m.process_request(req)
self.assertEquals(m.request(), req)
| coxmediagroup/dolphin | dolphin/tests/middleware.py | Python | mit | 306 |
#requirements: selenium wget python2.7 Geckodriver
import time
import sys
import wget
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
def googlescrape(str):
browser = webdriver.Chrome()
browser.get(url)
time.sleep(3) # sleep for 5 seconds so you can see the results
results = browser.find_elements_by_css_selector('div.g')
if len(results) == 0:
print "No results found"
browser.quit()
else:
for x in range(0,len(results)):
link = results[x].find_element_by_tag_name("a")
href = link.get_attribute("href")
print href
wget.download(href)
browser.quit()
return
if len(sys.argv) == 3:
domain = sys.argv[1]
ftype = sys.argv[2]
url = "https://www.google.com/search?num=100&start=0&hl=em&meta=&q=site:"
url += domain
url += "+filetype:"
url += ftype
url += "&filter=0"
googlescrape(url)
elif len(sys.argv) == 2:
for i in range (0,3):
if i==0:
print "Checking for pdfs..."
ftype = "pdf"
elif i == 1:
print "Checking for docs..."
ftype = "doc"
elif i == 2:
print "Checking for xls..."
ftype = "xls"
domain = sys.argv[1]
url = "https://www.google.com/search?num=100&start=0&hl=em&meta=&q=site:"
url += domain
url += "+filetype:"
url += ftype
url += "&filter=0"
googlescrape(url)
else:
print "Error: Improper number of arguments. Usage: python search.py domain.com pdf"
sys.exit()
| daverstephens/The-SOC-Shop | Threat_Intel/GScraper.py | Python | gpl-2.0 | 1,824 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from glob import glob
from os.path import getsize, isdir, join, abspath
from os import listdir
import re
def parse_memory(s):
m = re.search('^([0-9.]+)([GMK]?)$', s)
if not m:
raise Exception("%s does not match <number>[GMK]?" % s)
sys.exit(1)
number = float(m.group(1))
unit = m.group(2)
multiplier = 1
if unit == 'K': multiplier = 1024
if unit == 'M': multiplier = 1024*1024
if unit == 'G': multiplier = 1024*1024*1024
return float(number) * multiplier
def add_to_file_set(dir, file_set):
dir = abspath(dir)
if dir in file_set:
# avoid symlink loops
return
file_set.add(dir)
if isdir(dir):
for f in listdir(dir):
if f not in ('.', '..'):
add_to_file_set(join(dir, f), file_set)
def get_files(globs):
file_set = set()
if globs:
for g in globs:
for f in glob(g):
add_to_file_set(f, file_set)
return [f for f in file_set if not isdir(f)]
def total_size(files):
total = 0
for f in files:
if not isdir(f):
total += getsize(f)
return total
| spotify/mlockexec | mlockexec/util.py | Python | apache-2.0 | 1,664 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('staff', '0006_auto_20180411_1233'),
]
operations = [
migrations.AddField(
model_name='staff',
name='email',
field=models.CharField(max_length=255, null=True, blank=True),
),
]
| awemulya/fieldsight-kobocat | onadata/apps/staff/migrations/0007_staff_email.py | Python | bsd-2-clause | 420 |
import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
import sys
sys.path.append("/arxiv/source_tree/cosmo-codes/spicy")
import spice_cl as scl
def write_pcl(output_file,S_l,S_l_error=None):
ell=np.arange(0,np.shape(S_l)[0])
if S_l_error==None :
np.savetxt(output_file,np.asarray([ell,S_l]).T,delimiter=",")
else:
np.savetxt(output_file,np.asarray([ell,S_l,S_l_error]).T,delimiter=",")
############## files names and other inputs ####################################
data_dir="/arxiv/projects/LSS/DES_mocks_from_Marc/files_2014_09_22/data_test/"
data_file = data_dir+"filemap_base_counts.fits"
inv_noise_file = data_dir+"filemap_base_noise.fits"
beam_file = "/arxiv/projects/LSS/DES_mocks_from_Marc/window_funcs/window_func_temp_ns128.bl"
output_prefix = "/arxiv/projects/LSS/DES_mocks_from_Marc/files_2014_09_22/data_test/pcl/filemap"
num_samps=100
######################## compute the base power spectrum #######################
C_l_base,N_l_base,S_l_base = scl.compute_pcl_estimate(data_file,inv_noise_file,beam_file,num_samps)
#write the base power spectrum
output_file = output_prefix+"_base.pcl"
write_pcl(output_file,C_l_base)#base has no noise present
######################### compute the power spectrum of models #################
#model = "fix"
models = ["fix","std"]
for model in models:
S_l_model = []
for ind in range(101,126):
data_file = data_dir+"filemap_"+model+"_counts_"+str(ind)+".fits"
inv_noise_file = data_dir+"filemap_"+model+"_noise_"+str(ind)+".fits"
print ""
print "computing cls for ",data_file
print ""
C_l_i,N_l_i,S_l_i = scl.compute_pcl_estimate(data_file,inv_noise_file,beam_file,num_samps)
S_l_model.append(S_l_i)
#now find the mean and std dvn
S_l_model = np.vstack(S_l_model)
S_l_mean = np.zeros(np.shape(S_l_base))
S_l_sigma = np.zeros(np.shape(S_l_base))
for l in range(0,np.shape(S_l_base)[0]):
S_l_mean[l] = np.mean(S_l_model[:,l])
S_l_sigma[l] = sqrt(np.var(S_l_model[:,l]))
#write to file
output_file = output_prefix+"_"+model+".pcl"
write_pcl(output_file,S_l_mean,S_l_sigma)
############################### polots #########################################
ell=np.arange(0,np.shape(S_l_base)[0])
plt.clf()
plt.plot(ell,ell*(ell+1.)*C_l_base,label="base",color='r')# remember base has no noise present
plt.errorbar(ell,ell*(ell+1.)*S_l_mean,yerr=ell*(ell+1.)*S_l_sigma,label=model,color='gray',fmt='+')
plt.xlim(2,256)
plt.xlabel(r"$\ell$")
plt.ylabel(r"$\ell(\ell+1)C_{\ell}$")
plt.grid()
plt.legend(loc=0)
output_file = output_prefix+"_"+model+"_plots.png"
plt.savefig(output_file)
| tbs1980/cosmo-codes | mock_des_cls/source/make_cl_estimates.py | Python | mpl-2.0 | 2,749 |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Shell script tool to run puppet inside of the given container image.
# Uses the config file at /var/lib/container-puppet/container-puppet.json as a source for a JSON
# array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings
# that can be used to generate config files or run ad-hoc puppet modules
# inside of a container.
import glob
import json
import logging
import os
import subprocess
import sys
import tempfile
import time
import multiprocessing
from paunch import runner as containers_runner
logger = None
sh_script = '/var/lib/container-puppet/container-puppet.sh'
container_cli = os.environ.get('CONTAINER_CLI', 'docker')
container_log_stdout_path = os.environ.get('CONTAINER_LOG_STDOUT_PATH',
'/var/log/containers/stdouts')
cli_cmd = '/usr/bin/' + container_cli
def get_logger():
global logger
if logger is None:
logger = logging.getLogger()
ch = logging.StreamHandler(sys.stdout)
if os.environ.get('DEBUG') in ['True', 'true'] :
logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s: '
'%(process)s -- %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
log = get_logger()
log.info('Running container-puppet')
config_volume_prefix = os.path.abspath(os.environ.get('CONFIG_VOLUME_PREFIX',
'/var/lib/config-data'))
log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix)
if not os.path.exists(config_volume_prefix):
os.makedirs(config_volume_prefix)
if container_cli == 'docker':
cli_dcmd = ['--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro']
env = {}
RUNNER = containers_runner.DockerRunner(
'container-puppet', cont_cmd='docker', log=log)
elif container_cli == 'podman':
# podman doesn't allow relabeling content in /usr and
# doesn't support named volumes
cli_dcmd = ['--security-opt', 'label=disable',
'--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro']
# podman need to find dependent binaries that are in environment
env = {'PATH': os.environ['PATH']}
RUNNER = containers_runner.PodmanRunner(
'container-puppet', cont_cmd='podman', log=log)
else:
log.error('Invalid container_cli: %s' % container_cli)
sys.exit(1)
# Controls whether puppet is bind mounted in from the host
# NOTE: we require this to support the tarball extracted (Deployment archive)
# puppet modules but our containers now also include puppet-tripleo so we
# could use either
if os.environ.get('MOUNT_HOST_PUPPET', 'true') == 'true':
cli_dcmd.extend(['--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro'])
# this is to match what we do in deployed-server
def short_hostname():
subproc = subprocess.Popen(['hostname', '-s'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
return cmd_stdout.decode('utf-8').rstrip()
def pull_image(name):
subproc = subprocess.Popen([cli_cmd, 'inspect', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
cmd_stdout, cmd_stderr = subproc.communicate()
retval = subproc.returncode
if retval == 0:
log.info('Image already exists: %s' % name)
return
retval = -1
count = 0
log.info('Pulling image: %s' % name)
while retval != 0:
count += 1
subproc = subprocess.Popen([cli_cmd, 'pull', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
cmd_stdout, cmd_stderr = subproc.communicate()
retval = subproc.returncode
if retval != 0:
time.sleep(3)
log.warning('%s pull failed: %s' % (container_cli, cmd_stderr))
log.warning('retrying pulling image: %s' % name)
if count >= 5:
log.error('Failed to pull image: %s' % name)
break
if cmd_stdout:
log.debug(cmd_stdout)
if cmd_stderr:
log.debug(cmd_stderr)
def match_config_volumes(prefix, config):
# Match the mounted config volumes - we can't just use the
# key as e.g "novacomute" consumes config-data/nova
try:
volumes = config.get('volumes', [])
except AttributeError:
log.error('Error fetching volumes. Prefix: %s - Config: %s' % (prefix, config))
raise
return sorted([os.path.dirname(v.split(":")[0]) for v in volumes if
v.startswith(prefix)])
def get_config_hash(config_volume):
hashfile = "%s.md5sum" % config_volume
log.debug("Looking for hashfile %s for config_volume %s" % (hashfile, config_volume))
hash_data = None
if os.path.isfile(hashfile):
log.debug("Got hashfile %s for config_volume %s" % (hashfile, config_volume))
with open(hashfile) as f:
hash_data = f.read().rstrip()
return hash_data
def rm_container(name):
if os.environ.get('SHOW_DIFF', None):
log.info('Diffing container: %s' % name)
subproc = subprocess.Popen([cli_cmd, 'diff', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
cmd_stdout, cmd_stderr = subproc.communicate()
if cmd_stdout:
log.debug(cmd_stdout)
if cmd_stderr:
log.debug(cmd_stderr)
log.info('Removing container: %s' % name)
subproc = subprocess.Popen([cli_cmd, 'rm', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
cmd_stdout, cmd_stderr = subproc.communicate()
if cmd_stdout:
log.debug(cmd_stdout)
if cmd_stderr and \
cmd_stderr != 'Error response from daemon: ' \
'No such container: {}\n'.format(name):
log.debug(cmd_stderr)
process_count = int(os.environ.get('PROCESS_COUNT',
multiprocessing.cpu_count()))
config_file = os.environ.get('CONFIG', '/var/lib/container-puppet/container-puppet.json')
log.debug('CONFIG: %s' % config_file)
# If specified, only this config_volume will be used
config_volume_only = os.environ.get('CONFIG_VOLUME', None)
with open(config_file) as f:
json_data = json.load(f)
# To save time we support configuring 'shared' services at the same
# time. For example configuring all of the heat services
# in a single container pass makes sense and will save some time.
# To support this we merge shared settings together here.
#
# We key off of config_volume as this should be the same for a
# given group of services. We are also now specifying the container
# in which the services should be configured. This should match
# in all instances where the volume name is also the same.
configs = {}
for service in (json_data or []):
if service is None:
continue
if isinstance(service, dict):
service = [
service.get('config_volume'),
service.get('puppet_tags'),
service.get('step_config'),
service.get('config_image'),
service.get('volumes', []),
service.get('privileged', False),
]
config_volume = service[0] or ''
puppet_tags = service[1] or ''
manifest = service[2] or ''
config_image = service[3] or ''
volumes = service[4] if len(service) > 4 else []
privileged = service[5] if len(service) > 5 else False
if not manifest or not config_image:
continue
log.debug('config_volume %s' % config_volume)
log.debug('puppet_tags %s' % puppet_tags)
log.debug('manifest %s' % manifest)
log.debug('config_image %s' % config_image)
log.debug('volumes %s' % volumes)
log.debug('privileged %s' % privileged)
# We key off of config volume for all configs.
if config_volume in configs:
# Append puppet tags and manifest.
log.debug("Existing service, appending puppet tags and manifest")
if puppet_tags:
configs[config_volume][1] = '%s,%s' % (configs[config_volume][1],
puppet_tags)
if manifest:
configs[config_volume][2] = '%s\n%s' % (configs[config_volume][2],
manifest)
if configs[config_volume][3] != config_image:
log.warning("Config containers do not match even though"
" shared volumes are the same!")
if volumes:
configs[config_volume][4].extend(volumes)
else:
if not config_volume_only or (config_volume_only == config_volume):
log.debug("Adding new service")
configs[config_volume] = service
else:
log.debug("Ignoring %s due to $CONFIG_VOLUME=%s" %
(config_volume, config_volume_only))
log.info('Service compilation completed.')
if not os.path.exists(sh_script):
with open(sh_script, 'w') as script_file:
os.chmod(script_file.name, 0o755)
script_file.write("""#!/bin/bash
set -ex
mkdir -p /etc/puppet
cp -dR /tmp/puppet-etc/* /etc/puppet
rm -Rf /etc/puppet/ssl # not in use and causes permission errors
echo "{\\"step\\": $STEP}" > /etc/puppet/hieradata/docker_puppet.json
TAGS=""
if [ -n "$PUPPET_TAGS" ]; then
TAGS="--tags \"$PUPPET_TAGS\""
fi
CHECK_MODE=""
if [ -d "/tmp/puppet-check-mode" ]; then
mkdir -p /etc/puppet/check-mode
cp -a /tmp/puppet-check-mode/* /etc/puppet/check-mode
CHECK_MODE="--hiera_config /etc/puppet/check-mode/hiera.yaml"
fi
# Create a reference timestamp to easily find all files touched by
# puppet. The sync ensures we get all the files we want due to
# different timestamp.
origin_of_time=/var/lib/config-data/${NAME}.origin_of_time
touch $origin_of_time
sync
export NET_HOST="${NET_HOST:-false}"
set +e
if [ "$NET_HOST" == "false" ]; then
export FACTER_hostname=$HOSTNAME
fi
# $::deployment_type in puppet-tripleo
export FACTER_deployment_type=containers
export FACTER_uuid=$(cat /sys/class/dmi/id/product_uuid | tr '[:upper:]' '[:lower:]')
/usr/bin/puppet apply --summarize \
--detailed-exitcodes \
--color=false \
--logdest syslog \
--logdest console \
--modulepath=/etc/puppet/modules:/usr/share/openstack-puppet/modules \
$TAGS \
$CHECK_MODE \
/etc/config.pp
rc=$?
set -e
if [ $rc -ne 2 -a $rc -ne 0 ]; then
exit $rc
fi
# Disables archiving
if [ -z "$NO_ARCHIVE" ]; then
archivedirs=("/etc" "/root" "/opt" "/var/lib/ironic/tftpboot" "/var/lib/ironic/httpboot" "/var/www" "/var/spool/cron" "/var/lib/nova/.ssh")
rsync_srcs=""
for d in "${archivedirs[@]}"; do
if [ -d "$d" ]; then
rsync_srcs+=" $d"
fi
done
# On stack update, if a password was changed in a config file,
# some services (e.g. mysql) must change their internal state
# (e.g. password in mysql DB) when paunch restarts them; and
# they need the old password to achieve that.
# For those services, we update the config hash to notify
# paunch that a restart is needed, but we do not update the
# password file in container-puppet if the file already existed
# before and let the service regenerate it instead.
password_files="/root/.my.cnf"
exclude_files=""
for p in $password_files; do
if [ -f "$p" -a -f "/var/lib/config-data/${NAME}$p" ]; then
exclude_files+=" --exclude=$p"
fi
done
rsync -a -R --delay-updates --delete-after $exclude_files $rsync_srcs /var/lib/config-data/${NAME}
# Also make a copy of files modified during puppet run
# This is useful for debugging
echo "Gathering files modified after $(stat -c '%y' $origin_of_time)"
mkdir -p /var/lib/config-data/puppet-generated/${NAME}
rsync -a -R -0 --delay-updates --delete-after $exclude_files \
--files-from=<(find $rsync_srcs -newer $origin_of_time -not -path '/etc/puppet*' -print0) \
/ /var/lib/config-data/puppet-generated/${NAME}
# Write a checksum of the config-data dir, this is used as a
# salt to trigger container restart when the config changes
# note: while being excluded from the output, password files
# are still included in checksum computation
additional_checksum_files=""
excluded_original_passwords=""
for p in $password_files; do
if [ -f "$p" ]; then
additional_checksum_files+=" $p"
excluded_original_passwords+=" --exclude=/var/lib/config-data/*${p}"
fi
done
# We need to exclude the swift ring backups as those change over time and
# containers do not need to restart if they change
EXCLUDE=--exclude='*/etc/swift/backups/*'\ --exclude='*/etc/libvirt/passwd.db'\ ${excluded_original_passwords}
# We need to repipe the tar command through 'tar xO' to force text
# output because otherwise the sed command cannot work. The sed is
# needed because puppet puts timestamps as comments in cron and
# parsedfile resources, hence triggering a change at every redeploy
tar -c --mtime='1970-01-01' $EXCLUDE -f - /var/lib/config-data/${NAME} $additional_checksum_files | tar xO | \
sed '/^#.*HEADER.*/d' | md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum
tar -c --mtime='1970-01-01' $EXCLUDE -f - /var/lib/config-data/puppet-generated/${NAME} $additional_checksum_files --mtime='1970-01-01' | tar xO \
| sed '/^#.*HEADER.*/d' | md5sum | awk '{print $1}' > /var/lib/config-data/puppet-generated/${NAME}.md5sum
fi
""")
def mp_puppet_config(*args):
(config_volume,puppet_tags,manifest,config_image,volumes,privileged,check_mode) = args[0]
log = get_logger()
log.info('Starting configuration of %s using image %s' % (config_volume,
config_image))
log.debug('config_volume %s' % config_volume)
log.debug('puppet_tags %s' % puppet_tags)
log.debug('manifest %s' % manifest)
log.debug('config_image %s' % config_image)
log.debug('volumes %s' % volumes)
log.debug('privileged %s' % privileged)
log.debug('check_mode %s' % check_mode)
with tempfile.NamedTemporaryFile() as tmp_man:
with open(tmp_man.name, 'w') as man_file:
man_file.write('include ::tripleo::packages\n')
man_file.write(manifest)
uname = RUNNER.unique_container_name('container-puppet-%s' %
config_volume)
rm_container(uname)
pull_image(config_image)
common_dcmd = [cli_cmd, 'run',
'--user', 'root',
'--name', uname,
'--env', 'PUPPET_TAGS=%s' % puppet_tags,
'--env', 'NAME=%s' % config_volume,
'--env', 'HOSTNAME=%s' % short_hostname(),
'--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''),
'--env', 'STEP=%s' % os.environ.get('STEP', '6'),
'--env', 'NET_HOST=%s' % os.environ.get('NET_HOST', 'false'),
'--log-driver', 'json-file',
'--volume', '/etc/localtime:/etc/localtime:ro',
'--volume', '%s:/etc/config.pp:ro' % tmp_man.name,
'--volume', '/etc/puppet/:/tmp/puppet-etc/:ro',
# OpenSSL trusted CA injection
'--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro',
'--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro',
'--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro',
'--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro',
'--volume', '%s:/var/lib/config-data/:rw' % config_volume_prefix,
# Syslog socket for puppet logs
'--volume', '/dev/log:/dev/log:rw']
if privileged:
common_dcmd.push('--privileged')
if container_cli == 'podman':
log_path = os.path.join(container_log_stdout_path, uname)
logging = ['--log-opt',
'path=%s.log' % log_path]
common_dcmd.extend(logging)
dcmd = common_dcmd + cli_dcmd
if check_mode:
dcmd.extend([
'--volume',
'/etc/puppet/check-mode:/tmp/puppet-check-mode:ro'])
for volume in volumes:
if volume:
dcmd.extend(['--volume', volume])
dcmd.extend(['--entrypoint', sh_script])
if container_cli == 'docker':
# NOTE(flaper87): Always copy the DOCKER_* environment variables as
# they contain the access data for the docker daemon.
for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()):
env[k] = os.environ.get(k)
if os.environ.get('NET_HOST', 'false') == 'true':
log.debug('NET_HOST enabled')
dcmd.extend(['--net', 'host', '--volume',
'/etc/hosts:/etc/hosts:ro'])
# script injection as the last mount to make sure it's accessible
# https://github.com/containers/libpod/issues/1844
dcmd.extend(['--volume', '%s:%s:ro' % (sh_script, sh_script)])
dcmd.append(config_image)
# https://github.com/containers/libpod/issues/1844
# This block will run "container_cli" run 5 times before to fail.
retval = -1
count = 0
log.debug('Running %s command: %s' % (container_cli, ' '.join(dcmd)))
while count < 3:
if count == 0:
cmd = dcmd
else:
cmd = [cli_cmd, 'start', '-a', uname]
count += 1
subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env,
universal_newlines=True)
cmd_stdout, cmd_stderr = subproc.communicate()
retval = subproc.returncode
# puppet with --detailed-exitcodes will return 0 for success and no changes
# and 2 for success and resource changes. Other numbers are failures
if retval in [0,2]:
if cmd_stdout:
log.debug('%s run succeeded: %s' % (cmd, cmd_stdout))
if cmd_stderr:
log.warning(cmd_stderr)
# only delete successful runs, for debugging
rm_container(uname)
break
time.sleep(3)
log.error('%s run failed after %s attempt(s): %s' % (cmd,
cmd_stderr,
count))
log.warning('Retrying running container: %s' % config_volume)
else:
if cmd_stdout:
log.debug(cmd_stdout)
if cmd_stderr:
log.debug(cmd_stderr)
log.error('Failed running container for %s' % config_volume)
log.info('Finished processing puppet configs for %s' % (config_volume))
return retval
# Holds all the information for each process to consume.
# Instead of starting them all linearly we run them using a process
# pool. This creates a list of arguments for the above function
# to consume.
process_map = []
check_mode = int(os.environ.get('CHECK_MODE', 0))
log.debug('CHECK_MODE: %s' % check_mode)
for config_volume in configs:
service = configs[config_volume]
puppet_tags = service[1] or ''
manifest = service[2] or ''
config_image = service[3] or ''
volumes = service[4] if len(service) > 4 else []
privileged = service[5] if len(service) > 5 else False
if puppet_tags:
puppet_tags = "file,file_line,concat,augeas,cron,%s" % puppet_tags
else:
puppet_tags = "file,file_line,concat,augeas,cron"
process_map.append([config_volume, puppet_tags, manifest, config_image,
volumes, privileged, check_mode])
for p in process_map:
log.debug('- %s' % p)
# Fire off processes to perform each configuration. Defaults
# to the number of CPUs on the system.
log.info('Starting multiprocess configuration steps. Using %d processes.' %
process_count)
p = multiprocessing.Pool(process_count)
returncodes = list(p.map(mp_puppet_config, process_map))
config_volumes = [pm[0] for pm in process_map]
success = True
for returncode, config_volume in zip(returncodes, config_volumes):
if returncode not in [0, 2]:
log.error('ERROR configuring %s' % config_volume)
success = False
# Update the startup configs with the config hash we generated above
startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json')
log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs)
infiles = glob.glob(startup_configs)
for infile in infiles:
with open(infile) as f:
infile_data = json.load(f)
for k, v in iter(infile_data.items()):
config_volumes = match_config_volumes(config_volume_prefix, v)
config_hashes = [get_config_hash(volume_path) for volume_path in config_volumes]
config_hashes = filter(None, config_hashes)
config_hash = '-'.join(config_hashes)
if config_hash:
env = v.get('environment', [])
env.append("TRIPLEO_CONFIG_HASH=%s" % config_hash)
log.debug("Updating config hash for %s, config_volume=%s hash=%s" % (k, config_volume, config_hash))
infile_data[k]['environment'] = env
outfile = os.path.join(os.path.dirname(infile), "hashed-" + os.path.basename(infile))
with open(outfile, 'w') as out_f:
os.chmod(out_f.name, 0o600)
json.dump(infile_data, out_f, indent=2)
if not success:
sys.exit(1)
| dprince/tripleo-heat-templates | docker/container-puppet.py | Python | apache-2.0 | 23,955 |
#!/usr/bin/env python3
# This file is part of textland.
#
# Copyright 2014 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Textland is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3,
# as published by the Free Software Foundation.
#
# Textland is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Textland. If not, see <http://www.gnu.org/licenses/>.
from textland import DrawingContext
from textland import EVENT_KEYBOARD
from textland import EVENT_RESIZE
from textland import Event
from textland import IApplication
from textland import Size
from textland import TextImage
from textland import get_display
class DemoApp(IApplication):
def __init__(self):
self.image = TextImage(Size(0, 0))
def consume_event(self, event: Event):
if event.kind == EVENT_RESIZE:
self.image = TextImage(event.data) # data is the new size
elif event.kind == EVENT_KEYBOARD and event.data.key == 'q':
raise StopIteration
self.repaint(event)
return self.image
def repaint(self, event: Event) -> None:
ctx = DrawingContext(self.image)
if self.image.size.width < 65 or self.image.size.height < 18:
self._paint_resize_msg(ctx)
else:
self._paint_color_table(ctx)
def _paint_color_table(self, ctx: DrawingContext) -> None:
CELL_WIDTH = 4
NUM_COLORS = 16
for fg in range(NUM_COLORS):
for bg in range(NUM_COLORS):
ctx.attributes.reset()
ctx.border(
0, self.image.size.width - (NUM_COLORS * CELL_WIDTH) - 1,
0, self.image.size.height - NUM_COLORS - 2)
ctx.move_to(1 + fg * CELL_WIDTH, 1 + bg)
ctx.attributes.fg = fg
ctx.attributes.bg = bg
ctx.print("{:X}+{:X}".format(fg, bg))
def _paint_resize_msg(self, ctx: DrawingContext) -> None:
text = "Please enlarge this window"
ctx.move_to(
(self.image.size.width - len(text)) // 2,
self.image.size.height // 2)
ctx.print(text)
def main():
display = get_display()
display.run(DemoApp())
if __name__ == "__main__":
main()
| zyga/textland | demo6.py | Python | gpl-3.0 | 2,579 |
#!/usr/bin/env python
"""
"""
__author__ = "David Nolden<david.kde@art-master.de>, Ka-Ping Yee <ping@lfw.org>"
__version__ = "6 April 2006"
import sys, imp, os, stat, re, types, cgi
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rstrip
import pydoc
def cleanlinks(string):
"""Changes the links to work with the pydoc:-notation"""
finalstring = ""
string = str(string).replace(".html","")
pos = 0
mark = "<a href=\""
l = len(mark)
while(pos != -1):
opos = pos
pos = string.find(mark, pos)
if(pos == -1):
finalstring += string[opos:]
break
finalstring += string[opos:pos+l]
pos+=l
if(string[pos] == '#' or string.find(":/",pos, pos+10) != -1): #leave local jumps or external references untouched
continue
finalstring += "pydoc:"
if(string[pos] == "." and string[pos+1] == "\""):
pos += 1
finalstring += "modules"
return finalstring
#This maximum depth was introduced because the program needs a very long time to
#complete the task in big file-system-trees(like my home), and can be invoked by a simple pydoc:.
#and cannot be stopped through the browser(just by killing python)
__maxdepth = 4
def writedocs(path, pkgpath='', depth=0, notprocessed=[]):
if(path == "/."):
writedoc(path)
return
depth+=1
if os.path.isdir(path):
if(depth > __maxdepth):
notprocessed.append(path)
return
dir = path
for file in os.listdir(dir):
path = os.path.join(dir, file)
if os.path.isdir(path):
writedocs(path, file + '.' + pkgpath, depth)
if os.path.isfile(path):
writedocs(path, pkgpath, depth)
if os.path.isfile(path):
modname = pydoc.inspect.getmodulename(path)
if modname:
writedoc(pkgpath + modname)
if(depth == 1):
if(len(notprocessed) != 0):
print "<br> the following paths were not processed because they are deeper than the maximum depth of " + str(__maxdepth) + ":<br>"
for x in notprocessed:
print cgi.escape(x) + " <br>"
def writedoc(key,top=False):
"""Write HTML documentation to a file in the current directory."""
if(type(key) == str and (key == "modules" or key == "/.")):
heading = pydoc.html.heading(
'<br><big><big><strong> '
'Python: Index of Modules'
'</strong></big></big>',
'#ffffff', '#7799ee')
builtins = []
for name in sys.builtin_module_names:
builtins.append('<a href="%s">%s</a>' % (cgi.escape(name,quote=True), cgi.escape(name)))
indices = ['<p>Built-in modules: ' + cgi.escape(join(builtins, ', '))]
seen = {}
for dir in pydoc.pathdirs():
indices.append(pydoc.html.index(dir, seen))
print cleanlinks(heading + join(indices))
return
if(type(key) != types.ModuleType):
object = pydoc.locate(key)
if(object == None and top):
print "could not locate module/object for key " + \
cgi.escape(key) + "<br><a href=\"pydoc:modules\">go to index</a>";
else:
object = key
if object:
print cleanlinks(pydoc.html.page(pydoc.describe(object), pydoc.html.document(object)))
if __name__ == '__main__':
import getopt
class BadUsage: pass
try:
opts, args = getopt.getopt(sys.argv[1:], 'k:p:w')
print "<html>"
print "<head><meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">"
print "</head><body>"
if args:
for arg in args:
try:
if os.path.isdir(arg): writedocs(arg)
if os.path.isfile(arg):
arg = pydoc.importfile(arg)
writedoc(arg, True)
except pydoc.ErrorDuringImport, value:
print 'problem in %s - %s' % (
cgi.escape(value.filename), cgi.escape(value.exc))
else:
raise BadUsage
except (getopt.error, BadUsage):
print "need parameters\n"
| iegor/kdevelop | languages/python/kde_pydoc.py | Python | gpl-2.0 | 4,348 |
#!/usr/bin/env python
"""Unit tests for M2Crypto.Engine."""
from M2Crypto import Engine
from tests import unittest
class EngineTestCase(unittest.TestCase):
privkey = 'tests/rsa.priv.pem'
bad_id = '1bea1edfeb97'
def tearDown(self):
Engine.cleanup()
def test_by_id_junk(self):
with self.assertRaises(ValueError):
Engine.Engine(self.bad_id)
with self.assertRaises(ValueError):
Engine.Engine()
def test_by_id_openssl(self):
Engine.load_openssl()
e = Engine.Engine('openssl')
self.assertEqual(e.get_name(), 'Software engine support')
self.assertEqual(e.get_id(), 'openssl')
def test_by_id_dynamic(self):
Engine.load_dynamic()
Engine.Engine('dynamic')
def test_engine_ctrl_cmd_string(self):
Engine.load_dynamic()
e = Engine.Engine('dynamic')
e.ctrl_cmd_string('ID', 'TESTID')
def test_load_private(self):
Engine.load_openssl()
e = Engine.Engine('openssl')
e.set_default()
e.load_private_key(self.privkey)
def test_load_certificate(self):
Engine.load_openssl()
e = Engine.Engine('openssl')
e.set_default()
try:
with self.assertRaises(Engine.EngineError):
e.load_certificate('/dev/null')
except SystemError:
pass
def suite():
return unittest.makeSuite(EngineTestCase)
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| Edzvu/Edzvu.github.io | M2Crypto-0.35.2/tests/test_engine.py | Python | mit | 1,518 |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
from django.utils.html import format_html
# Représente un type de liste : BDE, BDA, BDS...
# nom : nom du type
# deux_tours : booléen qui indique s'il y aura deux tours pour ce type de liste
class TypeListe(models.Model):
nom = models.CharField(max_length=10, verbose_name='Nom du type')
deux_tours = models.BooleanField(default=False, verbose_name='Deux tours requis ?')
def __str__(self):
return self.nom
# Représente une liste
class Liste(models.Model):
type = models.ForeignKey(TypeListe, verbose_name='Type de liste')
nom = models.CharField(max_length=50, verbose_name='Nom')
liste_logo = models.ImageField(upload_to='logos', blank=True, verbose_name='Logo')
liste_couleur = models.CharField(max_length=7, default='#000', verbose_name='Couleur (hexa)')
est_vote_blanc = models.BooleanField(default=False, verbose_name='Vote blanc')
class Meta:
ordering = ['type', 'nom']
def __str__(self):
return "{} ({})".format(self.nom, self.type)
# Utilisé pour dessiner un rectangle de la couleur de la liste en CSS
def color_box(self):
return format_html('<div id="rectange_couleur" style="background-color: {}; width:80px; height:15px; border-radius:5px;"></div>'.format(self.liste_couleur))
color_box.short_description = 'Couleur'
# Renvoie l'url du logo si spécifiée ou le logo par défaut sinon
def logo(self):
if self.liste_logo and hasattr(self.liste_logo, 'url'):
return self.liste_logo.url
else:
return '/media/logos/blank.png'
logo.short_description = 'Logo'
# Nombre de votes obtenus au premier tour
# déterminé par un count sur les votes
def get_nombre_votes_1(self):
return Vote.objects.filter(liste=self, est_second_tour=False).count()
get_nombre_votes_1.short_description = '# votes tour 1'
# Nombre de votes obtenus au second tour
# déterminé par un count sur les votes
def get_nombre_votes_2(self):
return Vote.objects.filter(liste=self, est_second_tour=True).count()
get_nombre_votes_2.short_description = '# votes tour 2'
class Votant(models.Model):
prenom = models.CharField(max_length=100, verbose_name='Prénom')
nom = models.CharField(max_length=100, verbose_name='Nom')
annee = models.CharField(max_length=2, verbose_name='Année')
login = models.CharField(max_length=8, verbose_name='Login')
apprenti = models.BooleanField(default=False, verbose_name='Apprenti')
phelmag = models.BooleanField(default=False, verbose_name='Phelmag')
a_vote = models.BooleanField(default=False, verbose_name='A voté')
# Pour le lien avec les utilisateurs Django, non modifiable depuis l'admin
user = models.OneToOneField(User, null=True, blank=True, verbose_name='Utilisateur associé', editable=False)
class Meta:
ordering = ['annee', 'nom']
def __str__(self):
return self.login
# Vote pour une liste
# On lie la liste et le votant et on enregistre l'IP et la date.
# est_second_tour indique si le vote est pour un second tour.
class Vote(models.Model):
liste = models.ForeignKey(Liste, verbose_name='Liste')
votant = models.ForeignKey(Votant, verbose_name='Votant', null=True)
ip = models.IPAddressField(verbose_name='Adresse IP')
date = models.DateTimeField(verbose_name='Date et heure')
est_second_tour = models.BooleanField(default=False, verbose_name='Second tour')
def __str__(self):
return '({0} -> {1} on {2} with IP {3})'.format(self.votant, self.liste, self.date, self.ip)
# Affichage en couleur du nom de la liste dans la zone d'admin
def color_liste(self):
return format_html('<span style="color: {};">{}</span>'.format(self.liste.liste_couleur, self.liste.nom))
color_liste.short_description = 'Nom'
# Raccourci pour le type de liste
def vote_type(self):
return self.liste.type
vote_type.short_description = 'Type de vote' | mbahri/vote_ensimag | votes/models.py | Python | mit | 4,084 |
import cProfile
import json
from darglint.docstring.docstring import Docstring
if __name__ == '__main__':
with open('integration_tests/max_golden.json', 'r') as fin:
data = json.load(fin)
assert len(data) == 1
golden = data[0]
print(golden['docstring'])
print()
assert isinstance(golden['docstring'], str)
cProfile.run('Docstring.from_google(golden["docstring"])')
| terrencepreilly/darglint | integration_tests/max_golden_profile.py | Python | mit | 402 |
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
from oslo_log import log as logging
from oslo_serialization import jsonutils
from neutron._i18n import _LE
from neutron.agent.linux import async_process
from neutron.agent.ovsdb import api as ovsdb
LOG = logging.getLogger(__name__)
OVSDB_ACTION_INITIAL = 'initial'
OVSDB_ACTION_INSERT = 'insert'
OVSDB_ACTION_DELETE = 'delete'
OVSDB_ACTION_NEW = 'new'
class OvsdbMonitor(async_process.AsyncProcess):
"""Manages an invocation of 'ovsdb-client monitor'."""
def __init__(self, table_name, columns=None, format=None,
respawn_interval=None):
cmd = ['ovsdb-client', 'monitor', table_name]
if columns:
cmd.append(','.join(columns))
if format:
cmd.append('--format=%s' % format)
super(OvsdbMonitor, self).__init__(cmd, run_as_root=True,
respawn_interval=respawn_interval,
log_output=True,
die_on_error=True)
class SimpleInterfaceMonitor(OvsdbMonitor):
"""Monitors the Interface table of the local host's ovsdb for changes.
The has_updates() method indicates whether changes to the ovsdb
Interface table have been detected since the monitor started or
since the previous access.
"""
def __init__(self, respawn_interval=None):
super(SimpleInterfaceMonitor, self).__init__(
'Interface',
columns=['name', 'ofport', 'external_ids'],
format='json',
respawn_interval=respawn_interval,
)
self.new_events = {'added': [], 'removed': []}
@property
def has_updates(self):
"""Indicate whether the ovsdb Interface table has been updated.
If the monitor process is not active an error will be logged since
it won't be able to communicate any update. This situation should be
temporary if respawn_interval is set.
"""
if not self.is_active():
LOG.error(_LE("Interface monitor is not active"))
else:
self.process_events()
return bool(self.new_events['added'] or self.new_events['removed'])
def get_events(self):
self.process_events()
events = self.new_events
self.new_events = {'added': [], 'removed': []}
return events
def process_events(self):
devices_added = []
devices_removed = []
dev_to_ofport = {}
for row in self.iter_stdout():
json = jsonutils.loads(row).get('data')
for ovs_id, action, name, ofport, external_ids in json:
if external_ids:
external_ids = ovsdb.val_to_py(external_ids)
if ofport:
ofport = ovsdb.val_to_py(ofport)
device = {'name': name,
'ofport': ofport,
'external_ids': external_ids}
if action in (OVSDB_ACTION_INITIAL, OVSDB_ACTION_INSERT):
devices_added.append(device)
elif action == OVSDB_ACTION_DELETE:
devices_removed.append(device)
elif action == OVSDB_ACTION_NEW:
dev_to_ofport[name] = ofport
self.new_events['added'].extend(devices_added)
self.new_events['removed'].extend(devices_removed)
# update any events with ofports received from 'new' action
for event in self.new_events['added']:
event['ofport'] = dev_to_ofport.get(event['name'], event['ofport'])
def start(self, block=False, timeout=5):
super(SimpleInterfaceMonitor, self).start()
if block:
with eventlet.timeout.Timeout(timeout):
while not self.is_active():
eventlet.sleep()
| wolverineav/neutron | neutron/agent/linux/ovsdb_monitor.py | Python | apache-2.0 | 4,444 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox import IsA
from tuskar_ui import api as tuskar
from tuskar_ui.test import helpers as test
class ResourceManagementTests(test.BaseAdminViewTests):
def setUp(self):
super(ResourceManagementTests, self).setUp()
@test.create_stubs({
tuskar.ResourceClass: (
'get',
'list',
'list_racks',
'nodes'),
tuskar.FlavorTemplate: (
'list',),
tuskar.Node: (
'list',),
tuskar.Rack: (
'list',)})
def test_index(self):
# ResourceClass stubs
resource_classes = self.tuskar_resource_classes.list()
resource_class = self.tuskar_resource_classes.first()
nodes = []
racks = []
tuskar.ResourceClass.nodes = nodes
tuskar.ResourceClass.list_racks = racks
tuskar.ResourceClass.list(
IsA(http.HttpRequest)).\
AndReturn(resource_classes)
tuskar.ResourceClass.get(
IsA(http.HttpRequest), resource_class.id).\
AndReturn(resource_class)
# ResourceClass stubs end
# Rack stubs
racks = self.tuskar_racks.list()
tuskar.Rack.list(IsA(http.HttpRequest)).AndReturn(racks)
tuskar.Node.list(IsA(http.HttpRequest)).AndReturn(nodes)
# Rack stubs end
# FlavorTemplate stubs
flavors = self.tuskar_flavors.list()
tuskar.FlavorTemplate.list(IsA(http.HttpRequest)).AndReturn(
flavors)
# FlavorTemplate stubs end
self.mox.ReplayAll()
url = reverse('horizon:infrastructure:resource_management:index')
res = self.client.get(url)
self.assertTemplateUsed(
res, 'infrastructure/resource_management/index.html')
# FlavorTemplate asserts
self.assertItemsEqual(res.context['flavor_templates_table'].data,
flavors)
# FlavorTemplate asserts end
# ResourceClass asserts
self.assertItemsEqual(res.context['resource_classes_table'].data,
resource_classes)
# ResourceClass asserts end
# Rack asserts
self.assertItemsEqual(res.context['racks_table'].data, racks)
# Rack asserts end
| jtomasek/tuskar-ui-1 | tuskar_ui/infrastructure/resource_management/tests.py | Python | apache-2.0 | 2,961 |
# subSystemBonusMinmatarElectronicScanProbeStrength
#
# Used by:
# Subsystem: Loki Electronics - Emergent Locus Analyzer
type = "passive"
def handler(fit, module, context):
fit.modules.filteredChargeBoost(lambda mod: mod.charge.group.name == "Scanner Probe",
"baseSensorStrength",
module.getModifiedItemAttr("subsystemBonusMinmatarElectronic"),
skill="Minmatar Electronic Systems")
| Ebag333/Pyfa | eos/effects/subsystembonusminmatarelectronicscanprobestrength.py | Python | gpl-3.0 | 496 |
from languages import languages
from client import Client
def main():
return
if __name__ == '__main__':
main() | scruwys/easy_translate | easy_translate/__init__.py | Python | mit | 116 |
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
hashlib backwards-compatibility module for older (pre-2.5) Python versions
This does not not NOT (repeat, *NOT*) provide complete hashlib
functionality. It only wraps the portions of MD5 functionality used
by SCons, in an interface that looks like hashlib (or enough for our
purposes, anyway). In fact, this module will raise an ImportError if
the underlying md5 module isn't available.
"""
__revision__ = "src/engine/SCons/compat/_scons_hashlib.py 4577 2009/12/27 19:43:56 scons"
import md5
import string
class md5obj:
md5_module = md5
def __init__(self, name, string=''):
if not name in ('MD5', 'md5'):
raise ValueError, "unsupported hash type"
self.name = 'md5'
self.m = self.md5_module.md5()
def __repr__(self):
return '<%s HASH object @ %#x>' % (self.name, id(self))
def copy(self):
import copy
result = copy.copy(self)
result.m = self.m.copy()
return result
def digest(self):
return self.m.digest()
def update(self, arg):
return self.m.update(arg)
if hasattr(md5.md5(), 'hexdigest'):
def hexdigest(self):
return self.m.hexdigest()
else:
# Objects created by the underlying md5 module have no native
# hexdigest() method (*cough* 1.5.2 *cough*), so provide an
# equivalent lifted from elsewhere.
def hexdigest(self):
h = string.hexdigits
r = ''
for c in self.digest():
i = ord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
new = md5obj
def md5(string=''):
return md5obj('md5', string)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| barnone/EigenD | tools/packages/SCons/compat/_scons_hashlib.py | Python | gpl-3.0 | 2,950 |
# -*- coding: utf-8 -*-
"""
GIS_TOOLS
==================
This module contains tools to help project between coordinate systems. The
module will first use GDAL if installed. If GDAL is not installed then
pyproj is used. A test has been made for new versions of GDAL which swap the
input lat and lon when using transferPoint, so the user should not have to
worry about which version they have.
Main functions are:
* project_point_ll2utm
* project_point_utm2ll
These can take in a point or an array or list of points to project.
latitude and longitude can be input as:
* 'DD:mm:ss.ms'
* 'DD.decimal_degrees'
* float(DD.decimal_degrees)
Created on Fri Apr 14 14:47:48 2017
Revised: 5/2020 JP
@author: jrpeacock
"""
# ==============================================================================
# Imports
# ==============================================================================
import numpy as np
from mtpy.utils.mtpylog import MtPyLog
from mtpy.utils import HAS_GDAL, EPSG_DICT, NEW_GDAL
if HAS_GDAL:
from osgeo import osr
from osgeo.ogr import OGRERR_NONE
else:
import pyproj
_logger = MtPyLog.get_mtpy_logger(__name__)
if NEW_GDAL:
_logger.info('INFO: GDAL version 3 detected')
# =============================================================================
# GIS Error container
# =============================================================================
class GISError(Exception):
pass
# ==============================================================================
# Make sure lat and lon are in decimal degrees
# ==============================================================================
def _assert_minutes(minutes):
assert 0 <= minutes < 60., \
'minutes needs to be <60 and >0, currently {0:.0f}'.format(minutes)
return minutes
def _assert_seconds(seconds):
assert 0 <= seconds < 60., \
'seconds needs to be <60 and >0, currently {0:.3f}'.format(seconds)
return seconds
def convert_position_str2float(position_str):
"""
Convert a position string in the format of DD:MM:SS to decimal degrees
:type position_str: string [ 'DD:MM:SS.ms' | 'DD.degrees' ]
:param position_str: degrees of latitude or longitude
:rtype: float
:return: latitude or longitude in decimal degrees
:Example: ::
>>> from mtpy.utils import gis_tools
>>> gis_tools.convert_position_str2float('-118:34:56.3')
-118.58230555555555
"""
if position_str in [None, 'None']:
return None
if ':' in position_str:
if position_str.count(':') != 2:
msg = '{0} not correct format.\n'.format(position_str) +\
'Position needs to be DD:MM:SS.ms'
raise GISError(msg)
p_list = position_str.split(':')
deg = float(p_list[0])
minutes = _assert_minutes(float(p_list[1]))
sec = _assert_seconds(float(p_list[2]))
sign = np.sign(deg)
position_value = sign * (abs(deg) + minutes / 60. + sec / 3600.)
else:
try:
position_value = float(position_str)
except ValueError:
msg = '{0} not correct format.\n'.format(position_str) +\
'Position needs to be DD.decimal_degrees'
raise GISError(msg)
return position_value
def assert_lat_value(latitude):
"""
make sure latitude is in decimal degrees
"""
if latitude in [None, 'None']:
return None
try:
lat_value = float(latitude)
except TypeError:
return None
except ValueError:
lat_value = convert_position_str2float(latitude)
if abs(lat_value) >= 90:
raise GISError('|Latitude = {0:.5f}| > 90, unacceptable!'.format(
lat_value))
return lat_value
def assert_lon_value(longitude):
"""
make sure longitude is in decimal degrees
"""
if longitude in [None, 'None']:
return None
try:
lon_value = float(longitude)
except TypeError:
return None
except ValueError:
lon_value = convert_position_str2float(longitude)
if abs(lon_value) >= 180:
raise GISError('|Longitude = {0:.5f}| > 180, unacceptable!'.format(
lon_value))
return lon_value
def assert_elevation_value(elevation):
"""
make sure elevation is a floating point number
"""
try:
elev_value = float(elevation)
except (ValueError, TypeError):
elev_value = 0.0
_logger.warn('{0} is not a number, setting elevation to 0'.format(elevation))
return elev_value
def convert_position_float2str(position):
"""
convert position float to a string in the format of DD:MM:SS
:type position: float
:param position: decimal degrees of latitude or longitude
:rtype: float
:return: latitude or longitude in DD:MM.SS.ms
:Example: ::
>>> import mtpy.utils.gis_tools as gis_tools
>>> gis_tools.convert_position_float2str(-118.34563)
'-118:34:56.30'
"""
if not isinstance(position, float):
raise GISError('Given value is not a float')
deg = int(position)
minutes = (abs(position) - abs(deg)) * 60.
# need to round seconds to 4 decimal places otherwise machine precision
# keeps the 60 second roll over and the string is incorrect.
sec = np.round((minutes - int(minutes)) * 60., 4)
if sec >= 60.:
minutes += 1
sec = 0
if int(minutes) == 60:
deg += 1
minutes = 0
return '{0:.0f}:{1:02.0f}:{2:05.2f}'.format(deg, int(minutes), sec)
# ==============================================================================
# Project a point
# ==============================================================================
def get_utm_zone(latitude, longitude):
"""
Get utm zone from a given latitude and longitude
:param latitude: latitude in [ 'DD:mm:ss.ms' | 'DD.decimal' | float ]
:type latitude: [ string | float ]
:param longitude: longitude in [ 'DD:mm:ss.ms' | 'DD.decimal' | float ]
:type longitude: [ string | float ]
:return: zone number
:rtype: int
:return: is northern
:rtype: [ True | False ]
:return: UTM zone
:rtype: string
:Example: ::
>>> lat, lon = ('-34:17:57.99', 149.2010301)
>>> zone_number, is_northing, utm_zone = gis_tools.get_utm_zone(lat, lon)
>>> print(zone_number, is_northing, utm_zone)
(55, False, '55H')
"""
latitude = assert_lat_value(latitude)
longitude = assert_lon_value(longitude)
zone_number = (int(1 + (longitude + 180.0) / 6.0))
is_northern = bool(latitude >= 0)
n_str = utm_letter_designator(latitude)
return zone_number, is_northern, '{0:02.0f}{1}'.format(zone_number, n_str)
def utm_letter_designator(latitude):
"""
Get the UTM zone letter designation for a given latitude
:param latitude: latitude in [ 'DD:mm:ss.ms' | 'DD.decimal' | float ]
:type latitude: [ string | float ]
:return: UTM zone letter designation
:rtype: string
:Example: ::
>>> gis_utils.utm_letter_designator('-34:17:57.99')
H
"""
latitude = assert_lat_value(latitude)
letter_dict = {'C': (-80, -72),
'D': (-72, -64),
'E': (-64, -56),
'F': (-56, -48),
'G': (-48, -40),
'H': (-40, -32),
'J': (-32, -24),
'K': (-24, -16),
'L': (-16, -8),
'M': (-8, 0),
'N': (0, 8),
'P': (8, 16),
'Q': (16, 24),
'R': (24, 32),
'S': (32, 40),
'T': (40, 48),
'U': (48, 56),
'V': (56, 64),
'W': (64, 72),
'X': (72, 84)}
for key, value in letter_dict.items():
if value[1] >= latitude >= value[0]:
return key
return 'Z'
def split_utm_zone(utm_zone):
"""
Split utme zone into zone number and is northing
:param utm_zone: utm zone string as {0-9}{0-9}{C-X} or {+,-}{0-9}{0-9}
:type utm_zone: [ string | int ]
:return: utm zone number
:rtype: int
:return: is_northern
:rtype: boolean [ True | False ]
:Example: ::
>>> gis_tools.split_utm_zone('11S')
11, True
"""
utm_zone = validate_utm_zone(utm_zone)
if isinstance(utm_zone, int):
# std UTM code returned by gdal
is_northern = False if utm_zone < 0 else True
zone_number = abs(utm_zone)
elif isinstance(utm_zone, str):
zone_number = int(utm_zone[0:-1])
is_northern = True if utm_zone[-1].lower() > 'n' else False
else:
msg = "utm_zone type {0}, {1} not supported".format(type(utm_zone),
str(utm_zone))
raise NotImplementedError(msg)
return zone_number, is_northern
def utm_zone_to_epsg(zone_number, is_northern):
"""
get epsg code (WGS84 datum) for a given utm zone
:param zone_number: UTM zone number
:type zone_number: int
:param is_northing: Boolean of UTM is in northern hemisphere
:type is_northing: [ True | False ]
:return: EPSG number
:rtype: int
:Example: ::
>>> gis_tools.utm_zone_to_epsg(55, False)
32755
"""
for key in list(EPSG_DICT.keys()):
val = EPSG_DICT[key]
if ('+zone={:<2}'.format(zone_number) in val) and \
('+datum=WGS84' in val):
if is_northern:
if '+south' not in val:
return key
else:
if '+south' in val:
return key
def get_epsg(latitude, longitude):
"""
get epsg code for the utm projection (WGS84 datum) of a given latitude
and longitude pair
:param latitude: latitude in [ 'DD:mm:ss.ms' | 'DD.decimal' | float ]
:type latitude: [ string | float ]
:param longitude: longitude in [ 'DD:mm:ss.ms' | 'DD.decimal' | float ]
:type longitude: [ string | float ]
:return: EPSG number
:rtype: int
:Example: ::
>>> gis_tools.get_epsg(-34.299442, '149:12:03.71')
32755
"""
zone_number, is_northern, utm_str = get_utm_zone(latitude, longitude)
return utm_zone_to_epsg(zone_number, is_northern)
def _get_gdal_coordinate_system(datum):
"""
Get coordinate function from GDAL give a datum or EPSG number
:param datum: can be a well know datum or an EPSG number
:type: [ int | string ]
:return: spatial reference coordinate system
:rtype: osr.SpatialReference
"""
# set lat lon coordinate system
cs = osr.SpatialReference()
if isinstance(datum, int):
ogrerr = cs.ImportFromEPSG(datum)
if ogrerr != OGRERR_NONE:
raise GISError("GDAL/osgeo ogr error code: {}".format(ogrerr))
elif isinstance(datum, str):
ogrerr = cs.SetWellKnownGeogCS(datum)
if ogrerr != OGRERR_NONE:
raise GISError("GDAL/osgeo ogr error code: {}".format(ogrerr))
else:
raise GISError("""datum {0} not understood, needs to be EPSG as int
or a well known datum as a string""".format(datum))
return cs
def validate_epsg(epsg):
"""
Make sure epsg is an integer
:param epsg: EPSG number
:type epsg: [ int | str ]
:return: EPSG number
:rtype: int
"""
if isinstance(epsg, int):
return epsg
else:
if epsg is None:
return None
try:
epsg = int(epsg)
return epsg
except ValueError:
raise GISError('EPSG must be an integer')
def validate_utm_zone(utm_zone):
"""
Make sure utm zone is a valid string
:param utm_zone: UTM zone as {0-9}{0-9}{C-X} or {+, -}{0-9}{0-9}
:type utm_zone: [ int | string ]
:return: valid UTM zone
:rtype: [ int | string ]
"""
if utm_zone is None:
return None
# JP: if its unicode then its already a valid string in python 3
if isinstance(utm_zone, (np.bytes_, bytes)):
utm_zone = utm_zone.decode('UTF-8')
elif isinstance(utm_zone, (float, int)):
utm_zone = int(utm_zone)
else:
utm_zone = str(utm_zone)
return utm_zone
def validate_input_values(values, location_type=None):
"""
make sure the input values for lat, lon, easting, northing will be an
numpy array with a float data type
can input a string as a comma separated list
:param values: values to project, can be given as:
* float
* string of a single value or a comma separate string '34.2, 34.5'
* list of floats or string
* numpy.ndarray
:type values: [ float | string | list | numpy.ndarray ]
:return: array of floats
:rtype: numpy.ndarray(dtype=float)
"""
if isinstance(values, (int, float)):
values = np.array([values], dtype=np.float)
elif isinstance(values, (list, tuple)):
values = np.array(values, dtype=np.float)
elif isinstance(values, str):
values = [ss.strip() for ss in values.strip().split(',')]
values = np.array(values)
elif isinstance(values, np.ndarray):
values = values.astype(np.float)
# Flatten to 1D
values = values.flatten()
if location_type in ['lat', 'latitude']:
for ii, value in enumerate(values):
try:
values[ii] = assert_lat_value(value)
except GISError as error:
raise GISError('{0}\n Bad input value at index {1}'.format(
error, ii))
values = values.astype(np.float)
if location_type in ['lon', 'longitude']:
for ii, value in enumerate(values):
try:
values[ii] = assert_lon_value(value)
except GISError as error:
raise GISError('{0}\n Bad input value at index {1}'.format(
error, ii))
values = values.astype(np.float)
return values
def _get_gdal_projection_ll2utm(datum, utm_zone, epsg):
"""
Get the GDAL transfrom point function for given datum, utm_zone, epsg to
transform a latitude and longitude point to UTM coordinates.
..note:: Have to input either UTM zone or EPSG number
:param datum: well known datum
:type datum: string
:param utm_zone: utm_zone {0-9}{0-9}{C-X} or {+, -}{0-9}{0-9}
:type utm_zone: [ string | int ]
:param epsg: EPSG number
:type epsg: [ int | string ]
:return: tranform point function
:rtype: osr.TransformPoint function
"""
if utm_zone is None and epsg is None:
raise GISError('Need to input either UTM zone or EPSG number')
ll_cs = _get_gdal_coordinate_system(datum)
# project point on to EPSG coordinate system if given
if isinstance(epsg, int):
utm_cs = _get_gdal_coordinate_system(validate_epsg(epsg))
# otherwise project onto given datum
elif epsg is None:
utm_cs = _get_gdal_coordinate_system(datum)
zone_number, is_northern = split_utm_zone(utm_zone)
utm_cs.SetUTM(zone_number, is_northern)
return osr.CoordinateTransformation(ll_cs, utm_cs).TransformPoint
def _get_gdal_projection_utm2ll(datum, utm_zone, epsg):
"""
Get the GDAL transfrom point function for given datum, utm_zone, epsg to
transform a UTM point to latitude and longitude.
..note:: Have to input either UTM zone or EPSG number
:param datum: well known datum
:type datum: string
:param utm_zone: utm_zone {0-9}{0-9}{C-X} or {+, -}{0-9}{0-9}
:type utm_zone: [ string | int ]
:param epsg: EPSG number
:type epsg: [ int | string ]
:return: tranform point function
:rtype: osr.TransformPoint function
"""
if utm_zone is None and epsg is None:
raise GISError('Need to input either UTM zone or EPSG number')
# zone_number, is_northern = split_utm_zone(utm_zone)
if epsg is not None:
utm_cs = _get_gdal_coordinate_system(validate_epsg(epsg))
else:
zone_number, is_northern = split_utm_zone(utm_zone)
utm_cs = _get_gdal_coordinate_system(datum)
utm_cs.SetUTM(zone_number, is_northern)
ll_cs = utm_cs.CloneGeogCS()
return osr.CoordinateTransformation(utm_cs, ll_cs).TransformPoint
def _get_pyproj_projection(datum, utm_zone, epsg):
"""
Get the pyproj transfrom point function for given datum, utm_zone, epsg to
transform either a UTM point to latitude and longitude, or latitude
and longitude point to UTM.
..note:: Have to input either UTM zone or EPSG number
:param datum: well known datum
:type datum: string
:param utm_zone: utm_zone {0-9}{0-9}{C-X} or {+, -}{0-9}{0-9}
:type utm_zone: [ string | int ]
:param epsg: EPSG number
:type epsg: [ int | string ]
:return: pyproj transform function
:rtype: pyproj.Proj function
"""
if utm_zone is None and epsg is None:
raise GISError('Need to input either UTM zone or EPSG number')
if isinstance(epsg, int):
pp = pyproj.Proj('+init=EPSG:%d' % (epsg))
elif epsg is None:
zone_number, is_northern = split_utm_zone(utm_zone)
zone = 'north' if is_northern else 'south'
proj_str = '+proj=utm +zone=%d +%s +datum=%s' % (zone_number, zone,
datum)
pp = pyproj.Proj(proj_str)
return pp
def project_point_ll2utm(lat, lon, datum='WGS84', utm_zone=None, epsg=None):
"""
Project a point that is in latitude and longitude to the specified
UTM coordinate system.
:param latitude: latitude in [ 'DD:mm:ss.ms' | 'DD.decimal' | float ]
:type latitude: [ string | float ]
:param longitude: longitude in [ 'DD:mm:ss.ms' | 'DD.decimal' | float ]
:type longitude: [ string | float ]
:param datum: well known datum
:type datum: string
:param utm_zone: utm_zone {0-9}{0-9}{C-X} or {+, -}{0-9}{0-9}
:type utm_zone: [ string | int ]
:param epsg: EPSG number defining projection
(see http://spatialreference.org/ref/ for moreinfo)
Overrides utm_zone if both are provided
:type epsg: [ int | string ]
:return: project point(s)
:rtype: tuple if a single point, np.recarray if multiple points
* tuple is (easting, northing,utm_zone)
* recarray has attributes (easting, northing, utm_zone, elevation)
:Single Point: ::
>>> gis_tools.project_point_ll2utm('-34:17:57.99', '149.2010301')
(702562.6911014864, 6202448.5654573515, '55H')
:Multiple Points: ::
>>> lat = np.arange(20, 40, 5)
>>> lon = np.arange(-110, -90, 5)
>>> gis_tools.project_point_ll2utm(lat, lon, datum='NAD27')
rec.array([( -23546.69921068, 2219176.82320353, 0., '13R'),
( 500000. , 2764789.91224626, 0., '13R'),
( 982556.42985037, 3329149.98905941, 0., '13R'),
(1414124.6019547 , 3918877.48599922, 0., '13R')],
dtype=[('easting', '<f8'), ('northing', '<f8'),
('elev', '<f8'), ('utm_zone', '<U3')])
"""
if lat is None or lon is None:
return None, None, None
# make sure the lat and lon are in decimal degrees
lat = validate_input_values(lat, location_type='lat')
lon = validate_input_values(lon, location_type='lon')
if utm_zone in [None, 'none', 'None']:
# get the UTM zone in the datum coordinate system, otherwise
zone_number, is_northern, utm_zone = get_utm_zone(lat.mean(),
lon.mean())
epsg = validate_epsg(epsg)
if HAS_GDAL:
ll2utm = _get_gdal_projection_ll2utm(datum, utm_zone, epsg)
else:
ll2utm = _get_pyproj_projection(datum, utm_zone, epsg)
# return different results depending on if lat/lon are iterable
projected_point = np.zeros_like(lat, dtype=[('easting', np.float),
('northing', np.float),
('elev', np.float),
('utm_zone', 'U3')])
for ii in range(lat.size):
if NEW_GDAL:
point = ll2utm(lat[ii], lon[ii])
else:
point = ll2utm(lon[ii], lat[ii])
projected_point['easting'][ii] = point[0]
projected_point['northing'][ii] = point[1]
if HAS_GDAL:
projected_point['elev'][ii] = point[2]
projected_point['utm_zone'][ii] = utm_zone
# if just projecting one point, then return as a tuple so as not to break
# anything. In the future we should adapt to just return a record array
if len(projected_point) == 1:
return (projected_point['easting'][0],
projected_point['northing'][0],
projected_point['utm_zone'][0])
else:
return np.rec.array(projected_point)
def project_point_utm2ll(easting, northing, utm_zone, datum='WGS84', epsg=None):
"""
Project a point that is in UTM to the specified geographic coordinate
system.
:param easting: easting in meters
:type easting: float
:param northing: northing in meters
:type northing: float
:param datum: well known datum
:type datum: string
:param utm_zone: utm_zone {0-9}{0-9}{C-X} or {+, -}{0-9}{0-9}
:type utm_zone: [ string | int ]
:param epsg: EPSG number defining projection
(see http://spatialreference.org/ref/ for moreinfo)
Overrides utm_zone if both are provided
:type epsg: [ int | string ]
:return: project point(s)
:rtype: tuple if a single point, np.recarray if multiple points
* tuple is (easting, northing,utm_zone)
* recarray has attributes (easting, northing, utm_zone, elevation)
:Single Point: ::
>>> gis_tools.project_point_utm2ll(670804.18810336,
... 4429474.30215206,
... datum='WGS84',
... utm_zone='11T',
... epsg=26711)
(40.000087, -114.999128)
:Multiple Points: ::
>>> gis_tools.project_point_utm2ll([670804.18810336, 680200],
... [4429474.30215206, 4330200],
... datum='WGS84', utm_zone='11T',
... epsg=26711)
rec.array([(40.000087, -114.999128), (39.104208, -114.916058)],
dtype=[('latitude', '<f8'), ('longitude', '<f8')])
"""
easting = validate_input_values(easting)
northing = validate_input_values(northing)
epsg = validate_epsg(epsg)
if HAS_GDAL:
utm2ll = _get_gdal_projection_utm2ll(datum, utm_zone, epsg)
else:
utm2ll = _get_pyproj_projection(datum, utm_zone, epsg)
# return different results depending on if lat/lon are iterable
projected_point = np.zeros_like(easting,
dtype=[('latitude', np.float),
('longitude', np.float)])
for ii in range(easting.size):
if HAS_GDAL:
point = utm2ll(easting[ii], northing[ii], 0.0)
try:
assert_lat_value(point[0])
projected_point['latitude'][ii] = round(point[0], 6)
projected_point['longitude'][ii] = round(point[1], 6)
except GISError:
projected_point['latitude'][ii] = round(point[1], 6)
projected_point['longitude'][ii] = round(point[0], 6)
else:
point = utm2ll(easting[ii], northing[ii], inverse=True)
projected_point['latitude'][ii] = round(point[1], 6)
projected_point['longitude'][ii] = round(point[0], 6)
# if just projecting one point, then return as a tuple so as not to break
# anything. In the future we should adapt to just return a record array
if len(projected_point) == 1:
return (projected_point['latitude'][0],
projected_point['longitude'][0])
else:
return np.rec.array(projected_point)
def epsg_project(x, y, epsg_from, epsg_to, proj_str=None):
"""
project some xy points using the pyproj modules
Parameters
----------
x : integer or float
x coordinate of point
y : integer or float
y coordinate of point
epsg_from : int
epsg code of x, y points provided. To provide custom projection, set
to 0 and provide proj_str
epsg_to : TYPE
epsg code to project to. To provide custom projection, set
to 0 and provide proj_str
proj_str : str
Proj4 string to provide to pyproj if using custom projection. This proj
string will be applied if epsg_from or epsg_to == 0.
The default is None.
Returns
-------
xp, yp
x and y coordinates of projected point.
"""
try:
import pyproj
except ImportError:
print("please install pyproj")
return
# option to add custom projection
# print("epsg",epsg_from,epsg_to,"proj_str",proj_str)
if 0 in [epsg_from,epsg_to]:
EPSG_DICT[0] = proj_str
try:
p1 = pyproj.Proj(EPSG_DICT[epsg_from])
p2 = pyproj.Proj(EPSG_DICT[epsg_to])
except KeyError:
print("Surface or data epsg either not in dictionary or None")
return
return pyproj.transform(p1, p2, x, y)
def utm_wgs84_conv(lat, lon):
"""
Bidirectional UTM-WGS84 converter https://github.com/Turbo87/utm/blob/master/utm/conversion.py
:param lat:
:param lon:
:return: tuple(e, n, zone, lett)
"""
import utm # pip install utm
tup = utm.from_latlon(lat, lon)
(new_lat, new_lon) = utm.to_latlon(tup[0], tup[1], tup[2], tup[3])
# print (new_lat,new_lon) # should be same as the input param
# checking correctess
if abs(lat - new_lat) > 1.0 * np.e - 10:
print("Warning: lat and new_lat should be equal!")
if abs(lon - new_lon) > 1.0 * np.e - 10:
print("Warning: lon and new_lon should be equal!")
return tup
#################################################################
# Example usages of this script/module
# python gis_tools.py
# =================================================================
if __name__ == "__main__":
my_lat = -35.0
my_lon = 149.5
utm_point = project_point_ll2utm(my_lat, my_lon)
print("project_point_ll2utm(mylat, mylon) =: ", utm_point)
| MTgeophysics/mtpy | mtpy/utils/gis_tools.py | Python | gpl-3.0 | 26,855 |
# import resource
import resource
import numpy as np
import helpers.Time as t
import helpers.load_data as data
import helpers.SplitMatrix as splitmatrix
from sklearn import linear_model
import pandas as pd
import helpers.NumbersRounder as rounder
ratings = data.load_ratings()
nfolds = 5
np.random.seed(17)
seqs = [x % nfolds for x in range(len(ratings))]
np.random.shuffle(seqs)
def global_average():
# allocate memory for results:
err_train = np.zeros(nfolds)
err_test = np.zeros(nfolds)
print("Naiv Approach_1_:_Global_Average")
print("_________________________________")
print("\n")
start = t.start()
# for each fold:
for fold in range(nfolds):
train_set = np.array([x != fold for x in seqs])
test_set = np.array([x == fold for x in seqs])
train = ratings[train_set]
test = ratings[test_set]
# First naiv approach... global
# calculate model parameters: mean rating over the training set:
gmr = np.mean(train[:, 2])
# apply the model to the train set:
err_train[fold] = np.sqrt(np.mean((train[:, 2] - gmr) ** 2))
# apply the model to the test set:
err_test[fold] = np.sqrt(np.mean((test[:, 2] - gmr) ** 2))
# print errors:
print("Fold " + str(fold) + ": RMSE_train=" + str(err_train[fold]) + "; RMSE_test=" + str(err_test[fold]))
elapsed = t.start() - start
mem_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
# print the final conclusion:
print("\n")
print("Mean error on TRAIN: " + str(np.mean(err_train)))
print("Mean error on TEST: " + str(np.mean(err_test)))
print("Time: " + str(elapsed % 60) + " seconds")
print("Memory: " + str(mem_usage) + " kilobytes")
print("\n")
print("Global Average :" + str(gmr))
print("=============================================================")
print("=============================================================")
print("\n")
def user_average():
ratings_df = pd.DataFrame(ratings, columns=['user_id', 'movie_id', 'rating'], dtype=int)
# implement the means for each user
mean_user_all = np.mean(ratings_df.groupby(['user_id'])['rating'].mean())
# allocate memory for results:
err_train = np.zeros(nfolds)
err_test = np.zeros(nfolds)
print("Naiv Approach_2_:_User_Average")
print("_________________________________")
print("\n")
start = t.start()
# for each fold:
for fold in range(nfolds):
train_sel = np.array([x != fold for x in seqs])
test_sel = np.array([x == fold for x in seqs])
# make DataFrames for train and test
train_df = pd.DataFrame(ratings_df.iloc[train_sel],
columns=['user_id', 'movie_id', 'rating'],
dtype=int) # .iloc : indexing with np.array in pd.DataFrame)
test_df = pd.DataFrame(ratings_df.iloc[test_sel],
columns=['user_id', 'movie_id', 'rating'],
dtype=int)
# Count the occur frequency of each User in the train & test.
times_u_train = np.bincount(train_df['user_id'])
times_u_test = np.bincount(test_df['user_id'])
# Vector of means Implementation for each User
mean_u_train = np.array(train_df.groupby(['user_id'])['rating'].mean())
# After the vector of means Implementation we make equal vectors.
m_utrain_rep = np.repeat(mean_u_train, times_u_train[1:len(times_u_train)])
m_utest_rep = np.repeat(mean_u_train, times_u_test[1:len(times_u_test)])
# apply the model to the train set:f you want to see the results for the first Naiv Approach press 1")
err_train[fold] = np.sqrt(np.mean((train_df.iloc[:, 2] - m_utrain_rep) ** 2))
# apply the model to the test set:
err_test[fold] = np.sqrt(np.mean((test_df.iloc[:, 2] - m_utest_rep) ** 2))
# print errors for each fold:
print("Fold " + str(fold) + ": RMSE_train=" + str(err_train[fold]) + "; RMSE_test=" + str(err_test[fold]))
elapsed = t.start() - start
mem_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
# print the final conclusion:
print("\n")
print("Mean error on TRAIN: " + str(np.mean(err_train)))
print("Mean error on TEST: " + str(np.mean(err_test)))
print("Time: " + str(elapsed % 60) + " seconds")
print("Memory: " + str(mem_usage) + " kilobytes")
print("\n")
print("Mean of all user ratings is : " + str(mean_user_all))
print("=============================================================")
print("=============================================================")
print("\n")
def item_average():
ratings_df = pd.DataFrame(ratings, columns=['user_id', 'movie_id', 'rating'], dtype=int)
ratings_df = ratings_df.sort(['movie_id'])
# implement the means for each user
mean_movie_all = np.mean(ratings_df.groupby(['movie_id'])['rating'].mean())
# allocate memory for results:
err_train = np.zeros(nfolds)
err_test = np.zeros(nfolds)
seqs = [x % nfolds for x in range(len(ratings))]
np.random.shuffle(seqs)
print("Naiv Approach_3_:_Movie_Average")
print("_________________________________")
print("\n")
start = t.start()
for fold in range(nfolds):
train_sel = np.array([x != fold for x in seqs])
test_sel = np.array([x == fold for x in seqs])
# make DataFrames for train and test
train_df = pd.DataFrame(ratings_df.iloc[train_sel],
columns=['user_id', 'movie_id', 'rating'],
dtype=int) # .iloc : indexing with np.array in pd.DataFrame)
test_df = pd.DataFrame(ratings_df.iloc[test_sel],
columns=['user_id', 'movie_id', 'rating'],
dtype=int)
# Count the occur frequency of each User in the train & test.
times_u_train = np.bincount(train_df['user_id'])
times_u_test = np.bincount(test_df['user_id'])
# Vector of means Implementation for each User
mean_u_train = np.array(train_df.groupby(['user_id'])['rating'].mean())
# After the vector of means Implementation we make equal vectors.
m_utrain_rep = np.repeat(mean_u_train, times_u_train[1:len(times_u_train)])
m_utest_rep = np.repeat(mean_u_train, times_u_test[1:len(times_u_test)])
# apply the model to the train set:
err_train[fold] = np.sqrt(np.mean((train_df.iloc[:, 2] - m_utrain_rep) ** 2))
# apply the model to the test set:
err_test[fold] = np.sqrt(np.mean((test_df.iloc[:, 2] - m_utest_rep) ** 2))
# print errors for each fold:
print("Fold " + str(fold) + ": RMSE_train=" + str(err_train[fold]) + "; RMSE_test=" + str(err_test[fold]))
elapsed = t.start() - start
mem_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
# print the final conclusion:
print("\n")
print("Mean error on TRAIN: " + str(np.mean(err_train)))
print("Mean error on TEST: " + str(np.mean(err_test)))
print("Time: " + str(elapsed % 60) + " seconds")
print("Memory: " + str(mem_usage) + " kilobytes")
print("\n")
print("Mean of all movies ratings is : " + str(mean_movie_all))
print("=============================================================")
print("\n")
def user_item_average():
# allocate memory for results:
err_train = np.zeros(nfolds)
err_test = np.zeros(nfolds)
start = t.start()
# for each fold:
for fold in range(nfolds):
np.random.shuffle(ratings)
train_set = np.array([x != fold for x in seqs])
test_set = np.array([x == fold for x in seqs])
train = ratings[train_set]
test = ratings[test_set]
train_avg_rating = np.zeros((len(train), 2))
test_avg_rating = np.zeros((len(test), 2))
regr = linear_model.LinearRegression()
regr.fit(train_avg_rating, train[:, 2])
train_reg_pre = rounder.rounder(regr.coef_[0] * train_avg_rating[:, 0] + regr.coef_[1] * train_avg_rating[:, 1] + regr.intercept_)
test_reg_pre = rounder.rounder(regr.coef_[0] * test_avg_rating[:, 0] + regr.coef_[1] * test_avg_rating[:, 1] + regr.intercept_)
regr_error_train = np.sqrt(np.mean((train[:, 2] - train_reg_pre) ** 2))
regr_error_test = np.sqrt(np.mean((test[:, 2] - test_reg_pre) ** 2))
print("Coefficients:", regr.coef_, regr.intercept_)
elapsed = t.start() - start
mem_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
# print errors:
print("Error on Training set:", regr_error_train)
print("Error on Test set:", regr_error_test)
print("Time: " + str(elapsed % 60) + " seconds")
print("Memory: " + str(mem_usage) + " kilobytes")
def mf_gradient_descent():
"""
Matrix factorization with gradient descent
:param data:
:param users:
:param movies:
:return:
"""
num_factors = 10
steps = 75
learn_rate = 0.005
regularization = 0.05 # lambda
users = np.max(ratings[:, 0])
movies = np.max(ratings[:, 1])
start = t.start()
for fold in range(nfolds):
print("fold", fold)
train_set = np.array([ratings[x] for x in np.arange(len(ratings)) if (x % nfolds) != fold])
test_set = np.array([ratings[x] for x in np.arange(len(ratings)) if (x % nfolds) == fold])
# Convert the data set to the IxJ matrix
x_data = splitmatrix.split_matrix(train_set, users, movies)
x_hat = np.zeros(users, movies) # The matrix of predicted train_set
E = np.zeros(users, movies) # The error values
# initialize to random matrices
U = np.random.rand(users, num_factors)
M = np.random.rand(num_factors, movies)
elapsed = 0
for step in np.arange(steps):
start = t.start()
for idx in np.arange(len(train_set)):
user_id = train_set[idx, 0] - 1
item_id = train_set[idx, 1] - 1
actual = train_set[idx, 2]
error = actual - np.sum(U[user_id, :] * M[:, item_id])
# Update U and M
for k in np.arange(num_factors):
U[user_id, k] += learn_rate * (2 * error * M[k, item_id] - regularization * U[user_id, k])
M[k, item_id] += learn_rate * (2 * error * U[user_id, k] - regularization * M[k, item_id])
elapsed += t.start() - start
x_hat = np.dot(U, M)
E = x_data - x_hat
intermediate_error = np.sqrt(np.mean(E[np.where(np.isnan(E) == False)] ** 2))
print("Iteration", step, "out of", steps, "done. Error:", intermediate_error)
mem_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
# Apply U and M one last time and return the result
x_hat = np.dot(U, M)
x_train = splitmatrix.split_matrix(train_set, users, movies)
x_test = splitmatrix.split_matrix(test_set, users, movies)
e_train = x_train - x_hat
e = x_test - x_hat
MF_error_train = np.sqrt(np.mean(e_train[np.where(np.isnan(e_train) == False)] ** 2))
MF_error_test = np.sqrt(np.mean(e[np.where(np.isnan(e) == False)] ** 2))
print('Error on MF-GD training set :', MF_error_train)
print('Error on MF-GD test set:', MF_error_test)
print("Time: " + str(elapsed % 60) + " seconds")
print("Memory: " + str(mem_usage) + " kilobytes")
if __name__ == "__main__":
global_average()
user_average()
item_average()
user_item_average()
mf_gradient_descent()
| Kyziridis/recommender_system | recommender.py | Python | gpl-3.0 | 11,740 |
from ..utils import type_from_ast, is_valid_literal_value
from ..error import GraphQLError
from ..type.definition import is_composite_type, is_input_type, is_leaf_type, GraphQLNonNull
from ..language import ast
from ..language.visitor import Visitor, visit
from ..language.printer import print_ast
class ValidationRule(Visitor):
def __init__(self, context):
self.context = context
class UniqueOperationNames(ValidationRule):
def __init__(self, context):
super(UniqueOperationNames, self).__init__(context)
self.known_operation_names = {}
def enter_OperationDefinition(self, node, *args):
operation_name = node.name
if not operation_name:
return
if operation_name.value in self.known_operation_names:
return GraphQLError(
self.duplicate_operation_name_message(operation_name.value),
[self.known_operation_names[operation_name.value], operation_name]
)
self.known_operation_names[operation_name.value] = operation_name
@staticmethod
def duplicate_operation_name_message(operation_name):
return 'There can only be one operation named "{}".'.format(operation_name)
class LoneAnonymousOperation(ValidationRule):
operation_count = 0
def __init__(self, context):
super(LoneAnonymousOperation, self).__init__(context)
def enter_Document(self, node, *args):
self.operation_count = \
sum(1 for definition in node.definitions if isinstance(definition, ast.OperationDefinition))
def enter_OperationDefinition(self, node, *args):
if not node.name and self.operation_count > 1:
return GraphQLError(self.anonymous_operation_not_alone_message(), [node])
@staticmethod
def anonymous_operation_not_alone_message():
return 'This anonymous operation must be the only defined operation.'
class KnownTypeNames(ValidationRule):
def enter_NamedType(self, node, *args):
type_name = node.name.value
type = self.context.get_schema().get_type(type_name)
if not type:
return GraphQLError(self.unknown_type_message(type_name), [node])
@staticmethod
def unknown_type_message(type):
return 'Unknown type "{}".'.format(type)
class FragmentsOnCompositeTypes(ValidationRule):
def enter_InlineFragment(self, node, *args):
type = self.context.get_type()
if type and not is_composite_type(type):
return GraphQLError(
self.inline_fragment_on_non_composite_error_message(print_ast(node.type_condition)),
[node.type_condition]
)
def enter_FragmentDefinition(self, node, *args):
type = self.context.get_type()
if type and not is_composite_type(type):
return GraphQLError(
self.fragment_on_non_composite_error_message(node.name.value, print_ast(node.type_condition)),
[node.type_condition]
)
@staticmethod
def inline_fragment_on_non_composite_error_message(type):
return 'Fragment cannot condition on non composite type "{}".'.format(type)
@staticmethod
def fragment_on_non_composite_error_message(frag_name, type):
return 'Fragment "{}" cannot condition on non composite type "{}".'.format(frag_name, type)
class VariablesAreInputTypes(ValidationRule):
def enter_VariableDefinition(self, node, *args):
type = type_from_ast(self.context.get_schema(), node.type)
if type and not is_input_type(type):
return GraphQLError(
self.non_input_type_on_variable_message(node.variable.name.value, print_ast(node.type)),
[node.type]
)
@staticmethod
def non_input_type_on_variable_message(variable_name, type_name):
return 'Variable "${}" cannot be non-input type "{}".'.format(variable_name, type_name)
class ScalarLeafs(ValidationRule):
def enter_Field(self, node, *args):
type = self.context.get_type()
if not type:
return
if is_leaf_type(type):
if node.selection_set:
return GraphQLError(
self.no_subselection_allowed_message(node.name.value, type),
[node.selection_set]
)
elif not node.selection_set:
return GraphQLError(
self.required_subselection_message(node.name.value, type),
[node]
)
@staticmethod
def no_subselection_allowed_message(field, type):
return 'Field "{}" of type "{}" must not have a sub selection.'.format(field, type)
@staticmethod
def required_subselection_message(field, type):
return 'Field "{}" of type "{}" must have a sub selection.'.format(field, type)
class FieldsOnCorrectType(ValidationRule):
def enter_Field(self, node, *args):
type = self.context.get_parent_type()
if not type:
return
field_def = self.context.get_field_def()
if not field_def:
return GraphQLError(
self.undefined_field_message(node.name.value, type.name),
[node]
)
@staticmethod
def undefined_field_message(field_name, type):
return 'Cannot query field "{}" on "{}".'.format(field_name, type)
class UniqueFragmentNames(ValidationRule):
def __init__(self, context):
super(UniqueFragmentNames, self).__init__(context)
self.known_fragment_names = {}
def enter_FragmentDefinition(self, node, *args):
fragment_name = node.name.value
if fragment_name in self.known_fragment_names:
return GraphQLError(
self.duplicate_fragment_name_message(fragment_name),
[self.known_fragment_names[fragment_name], node.name]
)
self.known_fragment_names[fragment_name] = node.name
@staticmethod
def duplicate_fragment_name_message(field):
return 'There can only be one fragment named "{}".'.format(field)
class KnownFragmentNames(ValidationRule):
def enter_FragmentSpread(self, node, *args):
fragment_name = node.name.value
fragment = self.context.get_fragment(fragment_name)
if not fragment:
return GraphQLError(
self.unknown_fragment_message(fragment_name),
[node.name]
)
@staticmethod
def unknown_fragment_message(fragment_name):
return 'Unknown fragment "{}".'.format(fragment_name)
class NoUnusedFragments(ValidationRule):
def __init__(self, context):
super(NoUnusedFragments, self).__init__(context)
self.fragment_definitions = []
self.spreads_within_operation = []
self.fragment_adjacencies = {}
self.spread_names = set()
def enter_OperationDefinition(self, *args):
self.spread_names = set()
self.spreads_within_operation.append(self.spread_names)
def enter_FragmentDefinition(self, node, *args):
self.fragment_definitions.append(node)
self.spread_names = set()
self.fragment_adjacencies[node.name.value] = self.spread_names
def enter_FragmentSpread(self, node, *args):
self.spread_names.add(node.name.value)
def leave_Document(self, *args):
fragment_names_used = set()
def reduce_spread_fragments(spreads):
for fragment_name in spreads:
if fragment_name in fragment_names_used:
continue
fragment_names_used.add(fragment_name)
if fragment_name in self.fragment_adjacencies:
reduce_spread_fragments(self.fragment_adjacencies[fragment_name])
for spreads in self.spreads_within_operation:
reduce_spread_fragments(spreads)
errors = [
GraphQLError(
self.unused_fragment_message(fragment_definition.name.value),
[fragment_definition]
)
for fragment_definition in self.fragment_definitions
if fragment_definition.name.value not in fragment_names_used
]
if errors:
return errors
@staticmethod
def unused_fragment_message(fragment_name):
return 'Fragment "{}" is never used.'.format(fragment_name)
class PossibleFragmentSpreads(ValidationRule):
pass
class NoFragmentCycles(ValidationRule):
def __init__(self, context):
super(NoFragmentCycles, self).__init__(context)
self.spreads_in_fragment = {
node.name.value: self.gather_spreads(node)
for node in context.get_ast().definitions
if isinstance(node, ast.FragmentDefinition)
}
self.known_to_lead_to_cycle = set()
def enter_FragmentDefinition(self, node, *args):
errors = []
initial_name = node.name.value
spread_path = []
# This will convert the ast.FragmentDefinition to something that we can add
# to a set. Otherwise we get a `unhashable type: dict` error.
# This makes it so that we can define a way to uniquely identify a FragmentDefinition
# within a set.
fragment_node_to_hashable = lambda fs: (fs.loc['start'], fs.loc['end'], fs.name.value)
def detect_cycle_recursive(fragment_name):
spread_nodes = self.spreads_in_fragment[fragment_name]
for spread_node in spread_nodes:
if fragment_node_to_hashable(spread_node) in self.known_to_lead_to_cycle:
continue
if spread_node.name.value == initial_name:
cycle_path = spread_path + [spread_node]
self.known_to_lead_to_cycle |= set(map(fragment_node_to_hashable, cycle_path))
errors.append(GraphQLError(
self.cycle_error_message(initial_name, [s.name.value for s in spread_path]),
cycle_path
))
continue
if any(spread is spread_node for spread in spread_path):
continue
spread_path.append(spread_node)
detect_cycle_recursive(spread_node.name.value)
spread_path.pop()
detect_cycle_recursive(initial_name)
if errors:
return errors
@staticmethod
def cycle_error_message(fragment_name, spread_names):
via = ' via {}'.format(', '.join(spread_names)) if spread_names else ''
return 'Cannot spread fragment "{}" within itself{}.'.format(fragment_name, via)
@classmethod
def gather_spreads(cls, node):
visitor = cls.CollectFragmentSpreadNodesVisitor()
visit(node, visitor)
return visitor.collect_fragment_spread_nodes()
class CollectFragmentSpreadNodesVisitor(Visitor):
def __init__(self):
self.spread_nodes = []
def enter_FragmentSpread(self, node, *args):
self.spread_nodes.append(node)
def collect_fragment_spread_nodes(self):
return self.spread_nodes
class NoUndefinedVariables(ValidationRule):
visit_spread_fragments = True
operation = None
def __init__(self, context):
self.visited_fragment_names = set()
self.defined_variable_names = set()
super(NoUndefinedVariables, self).__init__(context)
@staticmethod
def undefined_var_message(var_name):
return 'Variable "${}" is not defined.'.format(var_name)
@staticmethod
def undefined_var_by_op_message(var_name, op_name):
return 'Variable "${}" is not defined by operation "{}".'.format(
var_name, op_name
)
def enter_OperationDefinition(self, node, *args):
self.operation = node
self.visited_fragment_names = set()
self.defined_variable_names = set()
def enter_VariableDefinition(self, node, *args):
self.defined_variable_names.add(node.variable.name.value)
def enter_Variable(self, variable, key, parent, path, ancestors):
var_name = variable.name.value
if var_name not in self.defined_variable_names:
within_fragment = any(isinstance(node, ast.FragmentDefinition) for node in ancestors)
if within_fragment and self.operation and self.operation.name:
return GraphQLError(
self.undefined_var_by_op_message(var_name, self.operation.name.value),
[variable, self.operation]
)
return GraphQLError(
self.undefined_var_message(var_name),
[variable]
)
def enter_FragmentSpread(self, spread_ast, *args):
if spread_ast.name.value in self.visited_fragment_names:
return False
self.visited_fragment_names.add(spread_ast.name.value)
class NoUnusedVariables(ValidationRule):
visited_fragment_names = None
variable_definitions = None
variable_name_used = None
visit_spread_fragments = True
def __init__(self, context):
super(NoUnusedVariables, self).__init__(context)
def enter_OperationDefinition(self, *args):
self.visited_fragment_names = set()
self.variable_definitions = []
self.variable_name_used = set()
def leave_OperationDefinition(self, *args):
errors = [
GraphQLError(
self.unused_variable_message(variable_definition.variable.name.value),
[variable_definition]
)
for variable_definition in self.variable_definitions
if variable_definition.variable.name.value not in self.variable_name_used
]
if errors:
return errors
def enter_VariableDefinition(self, node, *args):
if self.variable_definitions is not None:
self.variable_definitions.append(node)
return False
def enter_Variable(self, node, *args):
if self.variable_name_used is not None:
self.variable_name_used.add(node.name.value)
def enter_FragmentSpread(self, node, *args):
if self.visited_fragment_names is not None:
spread_name = node.name.value
if spread_name in self.visited_fragment_names:
return False
self.visited_fragment_names.add(spread_name)
@staticmethod
def unused_variable_message(variable_name):
return 'Variable "${}" is never used.'.format(variable_name)
class KnownDirectives(ValidationRule):
def enter_Directive(self, node, key, parent, path, ancestors):
directive_def = next((
definition for definition in self.context.get_schema().get_directives()
if definition.name == node.name.value
), None)
if not directive_def:
return GraphQLError(
self.unknown_directive_message(node.name.value),
[node]
)
applied_to = ancestors[-1]
if isinstance(applied_to, ast.OperationDefinition) and not directive_def.on_operation:
return GraphQLError(
self.misplaced_directive_message(node.name.value, 'operation'),
[node]
)
if isinstance(applied_to, ast.Field) and not directive_def.on_field:
return GraphQLError(
self.misplaced_directive_message(node.name.value, 'field'),
[node]
)
if (isinstance(applied_to, (ast.FragmentSpread, ast.InlineFragment, ast.FragmentDefinition)) and
not directive_def.on_fragment):
return GraphQLError(
self.misplaced_directive_message(node.name.value, 'fragment'),
[node]
)
@staticmethod
def unknown_directive_message(directive_name):
return 'Unknown directive "{}".'.format(directive_name)
@staticmethod
def misplaced_directive_message(directive_name, placement):
return 'Directive "{}" may not be used on "{}".'.format(directive_name, placement)
class KnownArgumentNames(ValidationRule):
def enter_Argument(self, node, key, parent, path, ancestors):
argument_of = ancestors[-1]
if isinstance(argument_of, ast.Field):
field_def = self.context.get_field_def()
if not field_def:
return
field_arg_def = next((arg for arg in field_def.args if arg.name == node.name.value), None)
if not field_arg_def:
parent_type = self.context.get_parent_type()
assert parent_type
return GraphQLError(
self.unknown_arg_message(node.name.value, field_def.name, parent_type.name),
[node]
)
elif isinstance(argument_of, ast.Directive):
directive = self.context.get_directive()
if not directive:
return
directive_arg_def = next((arg for arg in directive.args if arg.name == node.name.value), None)
if not directive_arg_def:
return GraphQLError(
self.unknown_directive_arg_message(node.name.value, directive.name),
[node]
)
@staticmethod
def unknown_arg_message(arg_name, field_name, type):
return 'Unknown argument "{}" on field "{}" of type "{}".'.format(arg_name, field_name, type)
@staticmethod
def unknown_directive_arg_message(arg_name, directive_name):
return 'Unknown argument "{}" on directive "@{}".'.format(arg_name, directive_name)
class UniqueArgumentNames(ValidationRule):
def __init__(self, context):
super(UniqueArgumentNames, self).__init__(context)
self.known_arg_names = {}
def enter_Field(self, *args):
self.known_arg_names = {}
def enter_Directive(self, *args):
self.known_arg_names = {}
def enter_Argument(self, node, *args):
arg_name = node.name.value
if arg_name in self.known_arg_names:
return GraphQLError(
self.duplicate_arg_message(arg_name),
[self.known_arg_names[arg_name], node.name]
)
self.known_arg_names[arg_name] = node.name
@staticmethod
def duplicate_arg_message(field):
return 'There can only be one argument named "{}".'.format(field)
class ArgumentsOfCorrectType(ValidationRule):
def enter_Argument(self, node, *args):
arg_def = self.context.get_argument()
if arg_def and not is_valid_literal_value(arg_def.type, node.value):
return GraphQLError(
self.bad_value_message(node.name.value, arg_def.type,
print_ast(node.value)),
[node.value]
)
@staticmethod
def bad_value_message(arg_name, type, value):
return 'Argument "{}" expected type "{}" but got: {}.'.format(arg_name, type, value)
class ProvidedNonNullArguments(ValidationRule):
def leave_Field(self, node, *args):
field_def = self.context.get_field_def()
if not field_def:
return False
errors = []
arg_asts = node.arguments or []
arg_ast_map = {arg.name.value: arg for arg in arg_asts}
for arg_def in field_def.args:
arg_ast = arg_ast_map.get(arg_def.name, None)
if not arg_ast and isinstance(arg_def.type, GraphQLNonNull):
errors.append(GraphQLError(
self.missing_field_arg_message(node.name.value, arg_def.name, arg_def.type),
[node]
))
if errors:
return errors
def leave_Directive(self, node, *args):
directive_def = self.context.get_directive()
if not directive_def:
return False
errors = []
arg_asts = node.arguments or []
arg_ast_map = {arg.name.value: arg for arg in arg_asts}
for arg_def in directive_def.args:
arg_ast = arg_ast_map.get(arg_def.name, None)
if not arg_ast and isinstance(arg_def.type, GraphQLNonNull):
errors.append(GraphQLError(
self.missing_directive_arg_message(node.name.value, arg_def.name, arg_def.type),
[node]
))
if errors:
return errors
@staticmethod
def missing_field_arg_message(name, arg_name, type):
return 'Field "{}" argument "{}" of type "{}" is required but not provided.'.format(name, arg_name, type)
@staticmethod
def missing_directive_arg_message(name, arg_name, type):
return 'Directive "{}" argument "{}" of type "{}" is required but not provided.'.format(name, arg_name, type)
class DefaultValuesOfCorrectType(ValidationRule):
def enter_VariableDefinition(self, node, *args):
name = node.variable.name.value
default_value = node.default_value
type = self.context.get_input_type()
if isinstance(type, GraphQLNonNull) and default_value:
return GraphQLError(
self.default_for_non_null_arg_message(name, type, type.of_type),
[default_value]
)
if type and default_value and not is_valid_literal_value(type, default_value):
return GraphQLError(
self.bad_value_for_default_arg_message(name, type, print_ast(default_value)),
[default_value]
)
@staticmethod
def default_for_non_null_arg_message(var_name, type, guess_type):
return 'Variable "${}" of type "{}" is required and will not use the default value. ' \
'Perhaps you meant to use type "{}".'.format(var_name, type, guess_type)
@staticmethod
def bad_value_for_default_arg_message(var_name, type, value):
return 'Variable "${}" of type "{}" has invalid default value: {}.'.format(var_name, type, value)
class VariablesInAllowedPosition(ValidationRule):
pass
class OverlappingFieldsCanBeMerged(ValidationRule):
pass
| elastic-coders/graphqllib | graphql/core/validation/rules.py | Python | mit | 22,135 |
import os
import unittest
from os.path import abspath
from coalib.results.Diff import Diff
from coalib.results.Result import RESULT_SEVERITY, Result
from coalib.results.ResultFilter import (
ensure_files_present,
filter_results,
remove_range,
remove_result_ranges_diffs)
from coalib.results.SourceRange import SourceRange
class ResultFilterTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
result_filter_test_dir = os.path.join(os.path.split(__file__)[0],
'ResultFilterTestFiles')
self.original_file_name = os.path.join(result_filter_test_dir,
'original_file.txt')
self.modified_file_name = os.path.join(result_filter_test_dir,
'modified_file.txt')
def test_simple_cases(self):
class Origin:
pass
origin_instance = Origin()
original_result = Result.from_values(origin=origin_instance,
message="original",
file="original",
severity=RESULT_SEVERITY.NORMAL,
debug_msg="original")
clone_result = Result.from_values(origin="Origin",
message="original",
file="original",
severity=RESULT_SEVERITY.NORMAL,
debug_msg="original")
wrong_origin_result = Result.from_values(
origin="AnotherOrigin",
message="original",
file="original",
severity=RESULT_SEVERITY.NORMAL,
debug_msg="original")
wrong_message_result = Result.from_values(
origin="Origin",
message="another message",
file="original",
severity=RESULT_SEVERITY.NORMAL,
debug_msg="original")
wrong_severity_result = Result.from_values(
origin="Origin",
message="original",
file="original",
severity=RESULT_SEVERITY.INFO,
debug_msg="original")
wrong_debug_msg_result = Result.from_values(
origin="Origin",
message="original",
file="original",
severity=RESULT_SEVERITY.NORMAL,
debug_msg="another debug message")
file_dict = {abspath("original"): []}
self.assertEqual(sorted(filter_results(original_file_dict=file_dict,
modified_file_dict=file_dict,
original_results=[
original_result],
modified_results=[
clone_result,
wrong_origin_result,
wrong_message_result,
wrong_severity_result,
wrong_debug_msg_result])),
sorted([wrong_origin_result,
wrong_message_result,
wrong_severity_result,
wrong_debug_msg_result]))
def test_affected_code(self):
# ORIGINAL SOURCE RANGES:
sr0_pre_change = SourceRange.from_values("file_name",
start_line=4,
start_column=1,
end_line=4,
end_column=6)
sr0_change = SourceRange.from_values("file_name",
start_line=4,
start_column=8,
end_line=4,
end_column=13)
sr0_post_change = SourceRange.from_values("file_name",
start_line=4,
start_column=15,
end_line=4,
end_column=19)
sr0_pre_remove = SourceRange.from_values("file_name",
start_line=6,
start_column=1,
end_line=6,
end_column=6)
sr0_post_remove = SourceRange.from_values("file_name",
start_line=8,
start_column=1,
end_line=8,
end_column=5)
sr0_pre_addition = SourceRange.from_values("file_name",
start_line=10,
start_column=1,
end_line=10,
end_column=6)
sr0_post_addition = SourceRange.from_values("file_name",
start_line=11,
start_column=1,
end_line=11,
end_column=5)
# ORIGINAL RESULTS:
res0_pre_change = Result(origin="origin",
message="message",
affected_code=(sr0_pre_change,))
res0_change = Result(origin="origin",
message="message",
affected_code=(sr0_change,))
res0_post_change = Result(origin="origin",
message="message",
affected_code=(sr0_post_change,))
res0_around_change = Result(origin="origin",
message="message",
affected_code=(sr0_pre_change,
sr0_post_change))
res0_with_change = Result(origin="origin",
message="message",
affected_code=(sr0_pre_change,
sr0_change,
sr0_post_change))
res0_whole_change = Result.from_values(origin="origin",
message="message",
file="file_name",
line=4,
column=1,
end_line=4,
end_column=19)
res0_pre_remove = Result(origin="origin",
message="message",
affected_code=(sr0_pre_remove,))
res0_post_remove = Result(origin="origin",
message="message",
affected_code=(sr0_post_remove,))
res0_around_remove = Result(origin="origin",
message="message",
affected_code=(sr0_pre_remove,
sr0_post_remove))
res0_whole_remove = Result.from_values(origin="origin",
message="message",
file="file_name",
line=6,
column=1,
end_line=8,
end_column=5)
res0_pre_addition = Result(origin="origin",
message="message",
affected_code=(sr0_pre_addition,))
res0_post_addition = Result(origin="origin",
message="message",
affected_code=(sr0_post_addition,))
res0_around_addition = Result(origin="origin",
message="message",
affected_code=(sr0_pre_addition,
sr0_post_addition))
res0_whole_addition = Result.from_values(origin="origin",
message="message",
file="file_name",
line=10,
column=1,
end_line=11,
end_column=5)
# NEW SOURCE RANGES:
sr1_pre_change = SourceRange.from_values("file_name",
start_line=4,
start_column=1,
end_line=4,
end_column=6)
sr1_change = SourceRange.from_values("file_name",
start_line=4,
start_column=8,
end_line=4,
end_column=13)
sr1_post_change = SourceRange.from_values("file_name",
start_line=4,
start_column=15,
end_line=4,
end_column=19)
sr1_pre_remove = SourceRange.from_values("file_name",
start_line=6,
start_column=1,
end_line=6,
end_column=6)
sr1_post_remove = SourceRange.from_values("file_name",
start_line=7,
start_column=1,
end_line=7,
end_column=5)
sr1_pre_addition = SourceRange.from_values("file_name",
start_line=9,
start_column=1,
end_line=9,
end_column=6)
sr1_addition = SourceRange.from_values("file_name",
start_line=10,
start_column=1,
end_line=10,
end_column=8)
sr1_post_addition = SourceRange.from_values("file_name",
start_line=11,
start_column=1,
end_line=11,
end_column=5)
# NEW RESULTS:
res1_pre_change = Result(origin="origin",
message="message",
affected_code=(sr1_pre_change,))
res1_change = Result(origin="origin",
message="message",
affected_code=(sr1_change,))
res1_post_change = Result(origin="origin",
message="message",
affected_code=(sr1_post_change,))
res1_around_change = Result(origin="origin",
message="message",
affected_code=(sr1_pre_change,
sr1_post_change))
res1_with_change = Result(origin="origin",
message="message",
affected_code=(sr1_pre_change,
sr1_change,
sr1_post_change))
res1_whole_change = Result.from_values(origin="origin",
message="message",
file="file_name",
line=4,
column=1,
end_line=4,
end_column=19)
res1_pre_remove = Result(origin="origin",
message="message",
affected_code=(sr1_pre_remove,))
res1_post_remove = Result(origin="origin",
message="message",
affected_code=(sr1_post_remove,))
res1_around_remove = Result(origin="origin",
message="message",
affected_code=(sr1_pre_remove,
sr1_post_remove))
res1_whole_remove = Result.from_values(origin="origin",
message="message",
file="file_name",
line=6,
column=1,
end_line=7,
end_column=5)
res1_pre_addition = Result(origin="origin",
message="message",
affected_code=(sr1_pre_addition,))
res1_addition = Result(origin="origin",
message="message",
affected_code=(sr1_addition,))
res1_post_addition = Result(origin="origin",
message="message",
affected_code=(sr1_post_addition,))
res1_around_addition = Result(origin="origin",
message="message",
affected_code=(sr1_pre_addition,
sr1_post_addition))
res1_with_addition = Result(origin="origin",
message="message",
affected_code=(sr1_pre_addition,
sr1_addition,
sr1_post_addition))
res1_whole_addition = Result.from_values(origin="origin",
message="message",
file="file_name",
line=9,
column=1,
end_line=11,
end_column=5)
original_result_list = [res0_pre_change,
res0_change,
res0_post_change,
res0_around_change,
res0_with_change,
res0_whole_change,
res0_pre_remove,
res0_post_remove,
res0_around_remove,
res0_whole_remove,
res0_pre_addition,
res0_post_addition,
res0_around_addition,
res0_whole_addition]
new_result_list = [res1_pre_change, # FALSE POSITIVE (in-line)
res1_change, # correctly kept
res1_post_change, # FALSE POSITIVE (in-line)
res1_around_change, # FALSE POSITIVE (in-line)
res1_with_change, # correctly kept
res1_whole_change, # correctly kept
res1_pre_remove, # correctly filtered out
res1_post_remove, # FALSE POSITIVE (in-line)
res1_around_remove, # correctly filtered out
res1_whole_remove, # correctly kept
res1_pre_addition, # correctly filtered out
res1_addition, # correctly kept
res1_post_addition, # correctly filtered out
res1_around_addition, # FALSE POSITIVE (close-line)
res1_with_addition, # correctly kept
res1_whole_addition] # correctly kept
unique_new_result_list = [res1_pre_change, # WRONG: line-wise diff
res1_change, # correct
res1_post_change, # WRONG: line-wise diff
res1_around_change, # WRONG: line-wise diff
res1_with_change, # correct
res1_whole_change, # correct
res1_addition, # correct
res1_around_addition, # WRONG: line-wise diff
res1_with_addition, # correct
res1_whole_addition] # correct
with open(self.original_file_name, "r") as original_file:
original_file_dict = {
abspath("file_name"): original_file.readlines()}
with open(self.modified_file_name, "r") as modified_file:
modified_file_dict = {
abspath("file_name"): modified_file.readlines()}
# 'TIS THE IMPORTANT PART
self.assertEqual(sorted(filter_results(original_file_dict,
modified_file_dict,
original_result_list,
new_result_list)),
sorted(unique_new_result_list))
def test_unrelated_file_change(self):
testfile_1 = ['1\n', '2\n']
testfile_2 = ['1\n', '2\n']
testfile_2_new = ['0\n', '1\n', '2\n']
old_result = Result.from_values('origin', 'message', 'tf1', 1)
new_result = Result.from_values('origin', 'message', 'tf1', 1)
tf1 = abspath('tf1')
original_file_dict = {tf1: testfile_1, 'tf2': testfile_2}
modified_file_dict = {tf1: testfile_1, 'tf2': testfile_2_new}
new_results = filter_results(original_file_dict, modified_file_dict,
[old_result], [new_result])
self.assertEqual(new_results, [])
def test_result_range(self):
test_file = ["123456789", "123456789", "123456789", "123456789"]
self.assertEqual(remove_range(test_file,
SourceRange.from_values("file",
1,
1,
1,
1)),
["23456789", "123456789", "123456789", "123456789"])
self.assertEqual(remove_range(test_file,
SourceRange.from_values("file",
1,
9,
1,
9)),
["12345678", "123456789", "123456789", "123456789"])
self.assertEqual(remove_range(test_file,
SourceRange.from_values("file",
1,
3,
1,
7)),
["1289", "123456789", "123456789", "123456789"])
self.assertEqual(remove_range(test_file,
SourceRange.from_values("file",
1,
3,
2,
7)),
["12", "89", "123456789", "123456789"])
self.assertEqual(remove_range(test_file,
SourceRange.from_values("file",
1,
3,
3,
7)),
["12", "89", "123456789"])
self.assertEqual(remove_range(test_file,
SourceRange.from_values("file",
1,
3,
4,
7)),
["12", "89"])
self.assertEqual(remove_range(test_file,
SourceRange.from_values("file",
None,
None,
None,
None)),
[])
self.assertEqual(remove_range(test_file,
SourceRange.from_values("file",
None,
None,
3,
None)),
["123456789"])
self.assertEqual(remove_range(test_file,
SourceRange.from_values("file",
3,
None,
3,
None)),
["123456789", "123456789", "123456789"])
def test_result_range_inline_overlap(self):
test_file = ["123456789\n"]
test_file_dict = {abspath("test_file"): test_file}
source_range1 = SourceRange.from_values("test_file", 1, 1, 1, 4)
source_range2 = SourceRange.from_values("test_file", 1, 2, 1, 3)
source_range3 = SourceRange.from_values("test_file", 1, 3, 1, 6)
test_result = Result("origin",
"message",
(source_range1, source_range2, source_range3))
result_diff = remove_result_ranges_diffs(
[test_result],
test_file_dict)[test_result][abspath("test_file")]
expected_diff = Diff.from_string_arrays(test_file, ["789\n"])
self.assertEqual(result_diff, expected_diff)
def test_result_range_line_wise_overlap(self):
test_file = ["11", "22", "33", "44", "55", "66"]
test_file_dict = {abspath("test_file"): test_file}
source_range1 = SourceRange.from_values("test_file", 2, 2, 5, 1)
source_range2 = SourceRange.from_values("test_file", 3, 1, 4, 1)
test_result = Result("origin",
"message",
(source_range1, source_range2))
result_diff = remove_result_ranges_diffs(
[test_result],
test_file_dict)[test_result][abspath("test_file")]
expected_diff = Diff.from_string_arrays(test_file,
["11", "2", "5", "66"])
self.assertEqual(result_diff, expected_diff)
def test_no_range(self):
test_file = ["abc"]
test_file_dict = {abspath("test_file"): test_file}
test_result = Result("origin",
"message")
result_diff = remove_result_ranges_diffs(
[test_result],
test_file_dict)[test_result][abspath("test_file")]
expected_diff = Diff.from_string_arrays(test_file, ["abc"])
self.assertEqual(result_diff, expected_diff)
def test_removed_file(self):
test_file = ["abc"]
test_file_dict = {"test_file": test_file}
test_mod_file_dict = {}
ensure_files_present(test_file_dict, test_mod_file_dict)
self.assertEqual(
test_mod_file_dict,
{"test_file": []})
def test_added_file(self):
test_file = ["abc"]
test_file_dict = {}
test_mod_file_dict = {"test_file": test_file}
ensure_files_present(test_file_dict, test_mod_file_dict)
self.assertEqual(
test_file_dict,
{"test_file": []})
def test_new_file_with_result(self):
testfile_1 = ['1\n', '2\n']
testfile_2_new = ['0\n', '1\n', '2\n']
tf1 = abspath('tf1')
tf2 = abspath('tf2')
old_result = Result.from_values('origin', 'message', 'tf1', 1)
new_result = Result.from_values('origin', 'message', 'tf2', 1)
original_file_dict = {tf1: testfile_1}
modified_file_dict = {tf1: testfile_1, tf2: testfile_2_new}
new_results = filter_results(original_file_dict, modified_file_dict,
[old_result], [new_result])
self.assertEqual(new_results, [new_result])
def test_delete_file_with_result(self):
testfile_1 = ['1\n', '2\n']
testfile_2 = ['0\n', '1\n', '2\n']
testfile_1_new = ['0\n', '1\n', '2\n']
tf1 = abspath('tf1')
tf2 = abspath('tf2')
old_result_tf1 = Result.from_values('origin', 'message', 'tf1', 1)
old_result_tf2 = Result.from_values('origin', 'message', 'tf2', 1)
new_result = Result.from_values('origin', 'message', 'tf1', 1)
original_file_dict = {tf1: testfile_1, tf2: testfile_2}
modified_file_dict = {tf1: testfile_1_new}
new_results = filter_results(original_file_dict,
modified_file_dict,
[old_result_tf1, old_result_tf2],
[new_result])
self.assertEqual(new_results, [new_result])
| svsn2117/coala | tests/results/ResultFilterTest.py | Python | agpl-3.0 | 28,171 |
"""
sentry.web.urls
~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from django.conf.urls import *
from django.views.defaults import page_not_found
from sentry.conf.settings import KEY
from sentry.web import views, feeds
handler404 = lambda x: page_not_found(x, template_name='sentry/404.html')
def handler500(request):
"""
500 error handler.
Templates: `500.html`
Context: None
"""
from django.template import Context, loader
from django.http import HttpResponseServerError
context = {'request': request}
t = loader.get_template('sentry/500.html')
return HttpResponseServerError(t.render(Context(context)))
urlpatterns = patterns('',
url(r'^_static/(?P<path>.*)$', views.static_media, name='sentry-media'),
# Feeds
url(r'^feeds/%s/messages.xml$' % re.escape(KEY), feeds.MessageFeed(), name='sentry-feed-messages'),
url(r'^feeds/%s/summaries.xml$' % re.escape(KEY), feeds.SummaryFeed(), name='sentry-feed-summaries'),
# JS and API
url(r'^jsapi/$', views.ajax_handler, name='sentry-ajax'),
url(r'^store/$', views.store, name='sentry-store'),
# Normal views
url(r'^login$', views.login, name='sentry-login'),
url(r'^logout$', views.logout, name='sentry-logout'),
url(r'^group/(\d+)$', views.group, name='sentry-group'),
url(r'^group/(\d+)/messages$', views.group_message_list, name='sentry-group-messages'),
url(r'^group/(\d+)/messages/(\d+)$', views.group_message_details, name='sentry-group-message'),
url(r'^group/(\d+)/actions/([\w_-]+)', views.group_plugin_action, name='sentry-group-plugin-action'),
url(r'^search$', views.search, name='sentry-search'),
url(r'^$', views.index, name='sentry'),
)
| primepix/django-sentry | sentry/web/urls.py | Python | bsd-3-clause | 1,832 |
from pageobjects.base import PageObject
from pageobjects.settings import SettingsFooter
class DisksSettings(PageObject, SettingsFooter):
@property
def disks(self):
elements = self.parent.\
find_elements_by_css_selector('div.node-disks > div')
return [Disk(el) for el in elements]
class Disk(PageObject):
XPATH_INFORMATION_ITEM = './/div[@class="disk-map-details-item" and ' \
'div[@class="disk-map-details-name"]="{}"]' \
'/div[@class="disk-map-details-parameter"]'
def __init__(self, element):
PageObject.__init__(self, element)
@property
def volume_os(self):
return Volume(self.parent.find_element_by_css_selector(
'div.volume-group.os > .toggle-volume'))
@property
def volume_image(self):
return Volume(self.parent.find_element_by_css_selector(
'div.volume-group.image > .toggle-volume'))
@property
def volume_storage(self):
return Volume(self.parent.find_element_by_css_selector(
'div.volume-group.vm > .toggle-volume'))
@property
def volume_unallocated(self):
return Volume(self.parent.find_element_by_css_selector(
'div.volume-group.unallocated > .toggle-volume'))
@property
def volume_group_os(self):
return VolumeGroup(self.parent.find_element_by_css_selector(
'div.volume-group-box[data-volume=os]'
))
@property
def volume_group_image(self):
return VolumeGroup(self.parent.find_element_by_css_selector(
'div.volume-group-box[data-volume=image]'
))
@property
def volume_group_storage(self):
return VolumeGroup(self.parent.find_element_by_css_selector(
'div.volume-group-box[data-volume=vm]'
))
@property
def details_panel(self):
return self.parent.find_element_by_css_selector('.disk-map-details')
@property
def name(self):
return self.parent.find_element_by_xpath(
self.XPATH_INFORMATION_ITEM.format('name'))
@property
def model(self):
return self.parent.find_element_by_xpath(
self.XPATH_INFORMATION_ITEM.format('model'))
@property
def disk(self):
return self.parent.find_element_by_xpath(
self.XPATH_INFORMATION_ITEM.format('disk'))
@property
def size(self):
return self.parent.find_element_by_xpath(
self.XPATH_INFORMATION_ITEM.format('size'))
class Volume(PageObject):
def __init__(self, element):
PageObject.__init__(self, element)
@property
def name(self):
return self.parent.find_element_by_css_selector('.volume-group-name')
@property
def size(self):
return self.parent.find_element_by_css_selector('.volume-group-size')
@property
def close_cross(self):
return self.parent.\
find_element_by_xpath('./../div[contains(@class, "close-btn")]')
class VolumeGroup(PageObject):
def __init__(self, element):
PageObject.__init__(self, element)
@property
def name(self):
return self.parent.\
find_element_by_css_selector('.volume-group-box-name')
@property
def use_all(self):
return self.parent.find_element_by_css_selector('.use-all-allowed')
@property
def input(self):
return self.parent.find_element_by_tag_name('input')
| ddepaoli3/fuel-main-dev | fuelweb_ui_test/pageobjects/node_disks_settings.py | Python | apache-2.0 | 3,464 |
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Device classes to interact with targets via RPC."""
import datetime
import logging
from pathlib import Path
from types import ModuleType
from typing import Any, Callable, List, Union, Optional
from pw_hdlc.rpc import HdlcRpcClient, default_channels
import pw_log_tokenized
from pw_log.proto import log_pb2
from pw_rpc import callback_client, console_tools
from pw_status import Status
from pw_tokenizer.detokenize import Detokenizer
from pw_tokenizer.proto import decode_optionally_tokenized
# Internal log for troubleshooting this tool (the console).
_LOG = logging.getLogger('tools')
DEFAULT_DEVICE_LOGGER = logging.getLogger('rpc_device')
class Device:
"""Represents an RPC Client for a device running a Pigweed target.
The target must have and RPC support, RPC logging.
Note: use this class as a base for specialized device representations.
"""
def __init__(self,
channel_id: int,
read,
write,
proto_library: List[Union[ModuleType, Path]],
detokenizer: Optional[Detokenizer],
timestamp_decoder: Optional[Callable[[int], str]],
rpc_timeout_s=5):
self.channel_id = channel_id
self.protos = proto_library
self.detokenizer = detokenizer
self.logger = DEFAULT_DEVICE_LOGGER
self.logger.setLevel(logging.DEBUG) # Allow all device logs through.
self.timestamp_decoder = timestamp_decoder
self._expected_log_sequence_id = 0
callback_client_impl = callback_client.Impl(
default_unary_timeout_s=rpc_timeout_s,
default_stream_timeout_s=None,
)
self.client = HdlcRpcClient(
read,
self.protos,
default_channels(write),
lambda data: self.logger.info("%s", str(data)),
client_impl=callback_client_impl)
# Start listening to logs as soon as possible.
self.listen_to_log_stream()
def info(self) -> console_tools.ClientInfo:
return console_tools.ClientInfo('device', self.rpcs,
self.client.client)
@property
def rpcs(self) -> Any:
"""Returns an object for accessing services on the specified channel."""
return next(iter(self.client.client.channels())).rpcs
def listen_to_log_stream(self):
"""Opens a log RPC for the device's unrequested log stream.
The RPCs remain open until the server cancels or closes them, either
with a response or error packet.
"""
self.rpcs.pw.log.Logs.Listen.open(
on_next=lambda _, log_entries_proto: self.
_log_entries_proto_parser(log_entries_proto),
on_completed=lambda _, status: _LOG.info(
'Log stream completed with status: %s', status),
on_error=lambda _, error: self._handle_log_stream_error(error))
def _handle_log_stream_error(self, error: Status):
"""Resets the log stream RPC on error to avoid losing logs."""
_LOG.error('Log stream error: %s', error)
# Only re-request logs if the RPC was not cancelled by the client.
if error != Status.CANCELLED:
self.listen_to_log_stream()
def _handle_log_drop_count(self, drop_count: int):
message = f'Dropped {drop_count} log'
if drop_count > 1:
message += 's'
self._emit_device_log(logging.WARNING, '', '', '', message)
def _check_for_dropped_logs(self, log_entries_proto: log_pb2.LogEntries):
# Count log messages received that don't use the dropped field.
messages_received = sum(1 if not log_proto.dropped else 0
for log_proto in log_entries_proto.entries)
dropped_log_count = (log_entries_proto.first_entry_sequence_id -
self._expected_log_sequence_id)
self._expected_log_sequence_id = (
log_entries_proto.first_entry_sequence_id + messages_received)
if dropped_log_count > 0:
self._handle_log_drop_count(dropped_log_count)
elif dropped_log_count < 0:
_LOG.error('Log sequence ID is smaller than expected')
def _log_entries_proto_parser(self, log_entries_proto: log_pb2.LogEntries):
self._check_for_dropped_logs(log_entries_proto)
for log_proto in log_entries_proto.entries:
decoded_timestamp = self.decode_timestamp(log_proto.timestamp)
# Parse level and convert to logging module level number.
level = (log_proto.line_level & 0x7) * 10
if self.detokenizer:
message = str(
decode_optionally_tokenized(self.detokenizer,
log_proto.message))
else:
message = log_proto.message.decode("utf-8")
log = pw_log_tokenized.FormatStringWithMetadata(message)
# Handle dropped count.
if log_proto.dropped:
self._handle_log_drop_count(log_proto.dropped)
return
self._emit_device_log(level, '', decoded_timestamp, log.module,
log.message, **dict(log.fields))
def _emit_device_log(self, level: int, source_name: str, timestamp: str,
module_name: str, message: str, **metadata_fields):
# Fields used for console table view
fields = metadata_fields
fields['source_name'] = source_name
fields['timestamp'] = timestamp
fields['msg'] = message
fields['module'] = module_name
# Format used for file or stdout logging.
self.logger.log(level,
'[%s] %s %s%s',
source_name,
timestamp,
f'{module_name} '.lstrip(),
message,
extra=dict(extra_metadata_fields=fields))
def decode_timestamp(self, timestamp: int) -> str:
"""Decodes timestamp to a human-readable value.
Defaults to interpreting the input timestamp as nanoseconds since boot.
Devices can override this to match their timestamp units.
"""
if self.timestamp_decoder:
return self.timestamp_decoder(timestamp)
return str(datetime.timedelta(seconds=timestamp / 1e9))[:-3]
| google/pigweed | pw_system/py/pw_system/device.py | Python | apache-2.0 | 7,016 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bheemboy.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| s-gv/bheemboy | manage.py | Python | mit | 252 |
from js_helper import _do_test_raw
def test_pref_innocuous_branch():
"""
Tests that innocuous preferences created outside of the "extensions." branch
from defaults/preferences/*.js files throw warnings, and that ones created
in proper branches don't.
"""
assert _do_test_raw("""
pref("foo.bar", true);
""", path='defaults/preferences/prefs.js').failed()
assert _do_test_raw("""
user_pref("foo.bar", true);
""", path='defaults/preferences/prefs.js').failed()
assert _do_test_raw("""
pref("extensions.foo-bar", true);
""", path='defaults/preferences/prefs.js').failed()
assert not _do_test_raw("""
pref("extensions.foo-bar.baz", true);
""", path='defaults/preferences/prefs.js').failed()
def test_unicode_pref():
"""Tests that non-ASCII preferences do not cause errors."""
assert not _do_test_raw("""
pref("extensions.foo-bar.\u263a", true);
""", path='defaults/preferences/prefs.js').failed()
assert not _do_test_raw("""
pref("extensions.foo-bar.\\u263a", true);
""", path='defaults/preferences/prefs.js').failed()
def test_pref_dangerous_branch():
"""
Test that preferences created in dangerous branches from
defaults/preferences/*.js files throw warnings.
"""
assert _do_test_raw("""
pref("extensions.getAddons.get.url", "http://evil.com/");
""", path='defaults/preferences/prefs.js').failed()
assert _do_test_raw("""
user_pref("extensions.getAddons.get.url", "http://evil.com/");
""", path='defaults/preferences/prefs.js').failed()
| kmaglione/amo-validator | tests/test_js_prefs.py | Python | bsd-3-clause | 1,581 |
'''
Created on Jun 22, 2012
@author: eric
'''
import unittest
from testbundle.bundle import Bundle
from databundles.identity import * #@UnusedWildImport
from test_base import TestBase
class Test(TestBase):
def setUp(self):
self.copy_or_build_bundle()
self.bundle = Bundle()
self.bundle_dir = self.bundle.bundle_dir
def save_bundle(self):
pass
def restore_bundle(self):
pass
def test_objectnumber(self):
values = ['a17PY5','c17PY50a','d17PY50a0a','b17PY500a']
for v in values:
x = ObjectNumber.parse(v)
self.assertEquals(v, str(x))
dn = DatasetNumber()
base = str(dn)[1:]
tn = TableNumber(dn, 10)
self.assertEquals('c'+base+'0a',str(tn))
cn = ColumnNumber(tn, 20)
self.assertEquals('d'+base+'0a0k',str(cn))
pn = PartitionNumber(dn, 30)
self.assertEquals('b'+base+'00u',str(pn))
return True
self.assertEquals('a1',str(ObjectNumber(1)))
self.assertEquals('b101',str(ObjectNumber(1,1)))
self.assertEquals('c10101',str(ObjectNumber(1,1,1)))
with self.assertRaises(ValueError):
self.assertEquals('aFooBar',str(ObjectNumber('FooBar')))
self.assertEquals('aFooBar',str(ObjectNumber('aFooBar')))
self.assertEquals('aFooBar',str(ObjectNumber(ObjectNumber('aFooBar'))))
on = ObjectNumber('aFooBar')
self.assertEquals('bFooBar00',str(ObjectNumber(on,0)))
self.assertEquals('cFooBar0000',str(ObjectNumber(on,0,0)))
self.assertEquals('bFooBarZZ',str(ObjectNumber(on,3843)))
self.assertEquals('cFooBarZZZZ',str(ObjectNumber(on,3843,3843)))
with self.assertRaises(ValueError):
on = ObjectNumber(on,3844)
print str(on)
with self.assertRaises(ValueError):
on = ObjectNumber(on,3844,3844)
print str(on)
o = ObjectNumber('aFooBar')
self.assertIsNone(o.table);
self.assertIsNone(o.column);
o = ObjectNumber('bFooBar03')
self.assertEquals(3,o.table);
self.assertIsNone(o.column);
o = ObjectNumber('cFooBar0302')
self.assertEquals(3,o.table);
self.assertEquals(2,o.column);
o = ObjectNumber('cFooBar0302',20)
o.type = ObjectNumber.TYPE.TABLE
self.assertEquals(20,o.table);
self.assertEquals('bFooBar0k',str(o))
def test_identity(self):
self.assertEqual('source', self.bundle.identity.source)
self.assertEqual('dataset', self.bundle.identity.dataset)
self.assertEqual('subset', self.bundle.identity.subset)
self.assertEqual('variation', self.bundle.identity.variation)
self.assertEqual('creator', self.bundle.identity.creator)
self.assertEqual(1, int(self.bundle.identity.revision))
self.assertEqual('source-dataset-subset-variation-ca0d',
self.bundle.identity.name)
def test_db_bundle(self):
from databundles.bundle import BuildBundle, DbBundle
b = BuildBundle(self.bundle_dir)
b.clean()
self.assertTrue(b.identity.id_ is not None)
self.assertEquals('source-dataset-subset-variation-ca0d', b.identity.name)
self.assertEquals('source-dataset-subset-variation-ca0d-r1', b.identity.vname)
b.database.create()
db_path = b.database.path
dbb = DbBundle(db_path)
self.assertEqual("source-dataset-subset-variation-ca0d", dbb.identity.name)
self.assertEqual("source-dataset-subset-variation-ca0d-r1", dbb.identity.vname)
self.assertEqual("source-dataset-subset-variation-ca0d", dbb.config.identity.name)
def test_paths(self):
from databundles.bundle import BuildBundle, DbBundle
b = self.bundle
db =DbBundle(b.database.path)
self.assertEqual(b.path, db.path)
self.assertTrue(os.path.exists(b.path))
self.assertEqual( b.database.path, db.database.path)
self.assertTrue(os.path.exists(b.database.path))
self.assertEqual( b.identity.path, db.identity.path)
for p in zip(b.partitions, db.partitions):
self.assertEqual(p[0].path, p[1].path)
self.assertTrue(p[0].path)
def test_schema_direct(self):
'''Test adding tables directly to the schema'''
# If we don't explicitly set the id_, it will change for every run.
self.bundle.config.identity.id_ = 'aTest'
s = self.bundle.schema
s.add_table('table 1', altname='alt name a')
s.add_table('table 2', altname='alt name b')
self.assertRaises(Exception, s.add_table, ('table 1', ))
self.assertIn('c1DxuZ01', [t.id_ for t in self.bundle.schema.tables])
self.assertIn('c1DxuZ02', [t.id_ for t in self.bundle.schema.tables])
self.assertNotIn('cTest03', [t.id_ for t in self.bundle.schema.tables])
t = s.add_table('table 3', altname='alt name')
s.add_column(t,'col 1',altname='altname1')
s.add_column(t,'col 2',altname='altname2')
s.add_column(t,'col 3',altname='altname3')
self.bundle.database.session.commit()
self.assertIn('d1DxuZ0701', [c.id_ for c in t.columns])
self.assertIn('d1DxuZ0702', [c.id_ for c in t.columns])
self.assertIn('d1DxuZ0703', [c.id_ for c in t.columns])
def test_generate_schema(self):
'''Uses the generateSchema method in the bundle'''
from databundles.orm import Column
s = self.bundle.schema
s.clean()
t1 = s.add_table('table1')
s.add_column(t1,name='col1', datatype=Column.DATATYPE_REAL )
s.add_column(t1,name='col2', datatype=Column.DATATYPE_INTEGER )
s.add_column(t1,name='col3', datatype=Column.DATATYPE_TEXT )
t2 = s.add_table('table2')
s.add_column(t2,name='col1' )
s.add_column(t2,name='col2' )
s.add_column(t2,name='col3' )
t3 = s.add_table('table3')
s.add_column(t3,name='col1', datatype=Column.DATATYPE_REAL )
s.add_column(t3,name='col2', datatype=Column.DATATYPE_INTEGER )
s.add_column(t3,name='col3', datatype=Column.DATATYPE_TEXT )
self.bundle.database.session.commit()
def test_names(self):
print self.bundle.identity.path
print self.bundle.database.path
for p in self.bundle.partitions:
print "Part: ",p.identity.path, p.database.path
def test_column_processor(self):
from databundles.orm import Column
from databundles.transform import BasicTransform, CensusTransform
s = self.bundle.schema
s.clean()
t = s.add_table('table3')
s.add_column(t,name='col1', datatype=Column.DATATYPE_INTEGER, default=-1, illegal_value = '999' )
s.add_column(t,name='col2', datatype=Column.DATATYPE_TEXT )
s.add_column(t,name='col3', datatype=Column.DATATYPE_REAL )
self.bundle.database.session.commit()
c1 = t.column('col1')
self.assertEquals(1, BasicTransform(c1)({'col1': ' 1 '}))
with self.assertRaises(ValueError):
print "PROCESSOR '{}'".format(CensusTransform(c1)({'col1': ' B '}))
self.assertEquals(1, CensusTransform(c1)({'col1': ' 1 '}))
self.assertEquals(-1, CensusTransform(c1)({'col1': ' 999 ' }))
self.assertEquals(-3, CensusTransform(c1)({'col1': ' # '}))
self.assertEquals(-2, CensusTransform(c1)({'col1': ' ! '}))
def test_validator(self):
#
# Validators
#
tests =[
( 'tone',True, (None,'VALUE',0,0) ),
( 'tone',True, (None,'VALUE',-1,0) ),
( 'tone',False, (None,'DEFAULT',0,0) ),
( 'tone',False, (None,'DEFAULT',-1,0) ),
( 'ttwo',True, (None,'DEFAULT',0,0) ),
( 'ttwo',True, (None,'DEFAULT',0,3.14) ),
( 'ttwo',False, (None,'DEFAULT',-1,0) ),
( 'tthree',True, (None,'DEFAULT',0,0) ),
( 'tthree',False, (None,'DEFAULT',0,3.14) ),
( 'all',True, (None,'text1','text2',1,2,3,3.14)),
( 'all',False, (None,'text1','text2',-1,-1,3,3.14)),
( 'all',False, (None,'text1','text2',-1,2,3,3.14)),
( 'all',False, (None,'text1','text2',1,-1,3,3.14)),
]
for test in tests:
table_name, truth, row = test
table = self.bundle.schema.table(table_name);
vd =table._get_validator()
if truth:
self.assertTrue(vd(row), "Test not 'true' for table '{}': {}".format(table_name,row))
else:
self.assertFalse(vd(row), "Test not 'false' for table '{}': {}".format(table_name,row))
# Testing the "OR" join of multiple columns.
tests =[
( 'tone',True, (None,'VALUE',0,0) ), #1
( 'tone',True, (None,'VALUE',-1,0) ),
( 'tone',False, (None,'DEFAULT',0,0) ),
( 'tone',False, (None,'DEFAULT',-1,0) ),
( 'ttwo',True, (None,'DEFAULT',0,0) ), #5
( 'ttwo',True, (None,'DEFAULT',0,3.14) ),
( 'ttwo',False, (None,'DEFAULT',-1,0) ),
( 'tthree',True, (None,'DEFAULT',0,0) ), #8
( 'tthree',False, (None,'DEFAULT',0,3.14) ),
( 'all',True, (None,'text1','text2',1,2,3,3.14)), #10
( 'all',False, (None,'text1','text2',-1,-1,3,3.14)), #11
( 'all',True, (None,'text1','text2',-1,2,3,3.14)), #12
( 'all',True, (None,'text1','text2',1,-1,3,3.14)), #13
]
for i, test in enumerate(tests):
table_name, truth, row = test
table = self.bundle.schema.table(table_name);
vd =table._get_validator(and_join=False)
if truth:
self.assertTrue(vd(row), "Test {} not 'true' for table '{}': {}".format(i+1, table_name,row))
else:
self.assertFalse(vd(row), "Test {} not 'false' for table '{}': {}".format(i+1, table_name,row))
# Test the hash functions. This test depends on the d_test values in geoschema.csv
tests =[
( 'tone','A|1|', (None,'A',1,2) ),
( 'ttwo','1|2|', (None,'B',1,2) ),
( 'tthree','C|2|', (None,'C',1,2) )]
import hashlib
for i, test in enumerate(tests):
table_name, hashed_str, row = test
table = self.bundle.schema.table(table_name);
m = hashlib.md5()
m.update(hashed_str)
self.assertEquals(int(m.hexdigest()[:14], 16), table.row_hash(row))
def test_partition(self):
from databundles.partition import PartitionIdentity
## TODO THis does does not test the 'table' parameter of the ParitionId
pid1 = PartitionIdentity(self.bundle.identity, time=1, space=1)
pid2 = PartitionIdentity(self.bundle.identity, time=2, space=2)
pid3 = PartitionIdentity(self.bundle.identity, space=3,)
self.bundle.partitions.new_partition(pid1, data={'pid':'pid1'})
self.bundle.partitions.new_partition(pid2, data={'pid':'pid2'})
self.bundle.partitions.new_partition(pid3, data={'pid':'pid3'})
self.bundle.partitions.new_partition(pid1, data={'pid':'pid1'})
self.bundle.partitions.new_partition(pid2, data={'pid':'pid21'})
self.bundle.partitions.new_partition(pid3, data={'pid':'pid31'})
self.bundle.database.session.commit()
# 4 partitions from the build ( defined in meta/geoschema.csv),
# three we just created.
self.assertEqual(7, len(self.bundle.partitions.all))
p = self.bundle.partitions.new_partition(pid1)
self.assertEquals('pid1',p.data['pid'] )
p = self.bundle.partitions.new_partition(pid2)
self.assertEquals('pid2',p.data['pid'] )
p = self.bundle.partitions.new_partition(pid3)
self.assertEquals('pid3',p.data['pid'] )
p = self.bundle.partitions.find(pid1)
self.assertEquals('pid1',p.data['pid'] )
p = self.bundle.partitions.find(pid2)
self.assertEquals('pid2',p.data['pid'] )
p = self.bundle.partitions.find(pid3)
self.assertEquals('pid3',p.data['pid'] )
p = self.bundle.partitions.find_orm(pid3)
s = self.bundle.database.session
p.data['foo'] = 'bar'
s.commit()
p = self.bundle.partitions.find(pid3)
self.assertEquals('bar',p.data['foo'] )
s.commit()
p.database.create()
p = self.bundle.partitions.find('source-dataset-subset-variation-ca0d-3')
self.assertTrue(p is not None)
self.assertEquals(pid3.name, p.identity.name)
def x_test_tempfile(self):
self.test_generate_schema()
table = self.bundle.schema.tables[0]
print "TABLE", table.name
tf = self.bundle.database.tempfile(table)
print "PATH",tf.path
w = tf.writer
for i in range(10):
w.writerow([i,i,i])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test))
return suite
if __name__ == "__main__":
unittest.TextTestRunner().run(suite()) | treyhunner/databundles | test/test_bundle.py | Python | bsd-3-clause | 14,223 |
try:
from pylibdmtx import pylibdmtx
except ImportError:
pylibdmtx = None
__all__ = ()
else:
__all__=('DataMatrix',)
from reportlab.graphics.barcode.common import Barcode
from reportlab.lib.utils import asBytes
from reportlab.platypus.paraparser import _num as paraparser_num
from reportlab.graphics.widgetbase import Widget
from reportlab.lib.validators import isColor, isString, isColorOrNone, isNumber, isBoxAnchor
from reportlab.lib.attrmap import AttrMap, AttrMapValue
from reportlab.lib.colors import toColor
from reportlab.graphics.shapes import Group, Rect
def _numConv(x):
return x if isinstance(x,(int,float)) else paraparser_num(x)
class _DMTXCheck(object):
@classmethod
def pylibdmtx_check(cls):
if not pylibdmtx:
raise ValueError('The %s class requires package pylibdmtx' % cls.__name__)
class DataMatrix(Barcode,_DMTXCheck):
def __init__(self, value='', **kwds):
self.pylibdmtx_check()
self._recalc = True
self.value = value
self.cellSize = kwds.pop('cellSize','5x5')
self.size = kwds.pop('size','SquareAuto')
self.encoding = kwds.pop('encoding','Ascii')
self.anchor = kwds.pop('anchor','sw')
self.color = kwds.pop('color',(0,0,0))
self.bgColor = kwds.pop('bgColor',None)
self.x = kwds.pop('x',0)
self.y = kwds.pop('y',0)
self.border = kwds.pop('border',5)
@property
def value(self):
return self._value
@value.setter
def value(self,v):
self._value = asBytes(v)
self._recalc = True
@property
def size(self):
return self._size
@size.setter
def size(self,v):
self._size = self._checkVal('size', v, pylibdmtx.ENCODING_SIZE_NAMES)
self._recalc = True
@property
def border(self):
return self._border
@border.setter
def border(self,v):
self._border = _numConv(v)
self._recalc = True
@property
def x(self):
return self._x
@x.setter
def x(self,v):
self._x = _numConv(v)
self._recalc = True
@property
def y(self):
return self._y
@y.setter
def y(self,v):
self._y = _numConv(v)
self._recalc = True
@property
def cellSize(self):
return self._cellSize
@size.setter
def cellSize(self,v):
self._cellSize = v
self._recalc = True
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self,v):
self._encoding = self._checkVal('encoding', v, pylibdmtx.ENCODING_SCHEME_NAMES)
self._recalc = True
@property
def anchor(self):
return self._anchor
@anchor.setter
def anchor(self,v):
self._anchor = self._checkVal('anchor', v, ('n','ne','e','se','s','sw','w','nw','c'))
self._recalc = True
def recalc(self):
if not self._recalc: return
data = self._value
size = self._size
encoding = self._encoding
e = pylibdmtx.encode(data, size=size, scheme=encoding)
iW = e.width
iH = e.height
p = e.pixels
iCellSize = 5
bpp = 3 #bytes per pixel
rowLen = iW*bpp
cellLen = iCellSize*bpp
assert len(p)//rowLen == iH
matrix = list(filter(None,
(''.join(
(('x' if p[j:j+bpp] != b'\xff\xff\xff' else ' ')
for j in range(i,i+rowLen,cellLen))).strip()
for i in range(0,iH*rowLen,rowLen*iCellSize))))
self._nRows = len(matrix)
self._nCols = len(matrix[-1])
self._matrix = '\n'.join(matrix)
cellWidth = self._cellSize
if cellWidth:
cellWidth = cellWidth.split('x')
if len(cellWidth)>2:
raise ValueError('cellSize needs to be distance x distance not %r' % self._cellSize)
elif len(cellWidth)==2:
cellWidth, cellHeight = cellWidth
else:
cellWidth = cellHeight = cellWidth[0]
cellWidth = _numConv(cellWidth)
cellHeight = _numConv(cellHeight)
else:
cellWidth = cellHeight = iCellSize
self._cellWidth = cellWidth
self._cellHeight = cellHeight
self._recalc = False
self._bord = max(self.border,cellWidth,cellHeight)
self._width = cellWidth*self._nCols + 2*self._bord
self._height = cellHeight*self._nRows + 2*self._bord
@property
def matrix(self):
self.recalc()
return self._matrix
@property
def width(self):
self.recalc()
return self._width
@property
def height(self):
self.recalc()
return self._height
@property
def cellWidth(self):
self.recalc()
return self._cellWidth
@property
def cellHeight(self):
self.recalc()
return self._cellHeight
def draw(self):
self.recalc()
canv = self.canv
w = self.width
h = self.height
x = self.x
y = self.y
b = self._bord
anchor = self.anchor
if anchor in ('nw','n','ne'):
y -= h
elif anchor in ('c','e','w'):
y -= h//2
if anchor in ('ne','e','se'):
x -= w
elif anchor in ('n','c','s'):
x -= w//2
canv.saveState()
if self.bgColor:
canv.setFillColor(toColor(self.bgColor))
canv.rect(x, y-h, w, h, fill=1, stroke=0)
canv.setFillColor(toColor(self.color))
canv.setStrokeColor(None)
cellWidth = self.cellWidth
cellHeight = self.cellHeight
yr = y - b - cellHeight
x += b
for row in self.matrix.split('\n'):
xr = x
for c in row:
if c=='x':
canv.rect(xr, yr, cellWidth, cellHeight, fill=1, stroke=0)
xr += cellWidth
yr -= cellHeight
canv.restoreState()
class DataMatrixWidget(Widget,_DMTXCheck):
codeName = "DataMatrix"
_attrMap = AttrMap(
BASE = Widget,
value = AttrMapValue(isString, desc='Datamatrix data'),
x = AttrMapValue(isNumber, desc='x-coord'),
y = AttrMapValue(isNumber, desc='y-coord'),
color = AttrMapValue(isColor, desc='foreground color'),
bgColor = AttrMapValue(isColorOrNone, desc='background color'),
encoding = AttrMapValue(isString, desc='encoding'),
size = AttrMapValue(isString, desc='size'),
cellSize = AttrMapValue(isString, desc='cellSize'),
anchor = AttrMapValue(isBoxAnchor, desc='anchor pooint for x,y'),
)
_defaults = dict(
x = ('0',_numConv),
y = ('0',_numConv),
color = ('black',toColor),
bgColor = (None,lambda _: toColor(_) if _ is not None else _),
encoding = ('Ascii',None),
size = ('SquareAuto',None),
cellSize = ('5x5',None),
anchor = ('sw', None),
)
def __init__(self,value='Hello Cruel World!', **kwds):
self.pylibdmtx_check()
self.value = value
for k,(d,c) in self._defaults.items():
v = kwds.pop(k,d)
if c: v = c(v)
setattr(self,k,v)
def rect(self, x, y, w, h, fill=1, stroke=0):
self._gadd(Rect(x,y,w,h,strokeColor=None,fillColor=self._fillColor))
def saveState(self,*args,**kwds):
pass
restoreState = setStrokeColor = saveState
def setFillColor(self,c):
self._fillColor = c
def draw(self):
m = DataMatrix(value=self.value,**{k: getattr(self,k) for k in self._defaults})
m.canv = self
m.y += m.height
g = Group()
self._gadd = g.add
m.draw()
return g
| piMoll/SEILAPLAN | lib/reportlab/graphics/barcode/dmtx.py | Python | gpl-2.0 | 7,880 |
#! /usr/bin/python
DEBUG=False
import sys
import os.path
if len(sys.argv) != 2:
sys.exit("Z3 directory required as single command-line argument.")
HEADER = '''// THIS FILE IS BUILD AUTOMATICALLY, DO NOT CHANGE!!!
#include<jni.h>
#include<stdlib.h>
#include"z3.h"
// include CPAchecker-specific parts
#include"includes/function.h"
#include"includes/arguments.h"
#include"includes/types.h"
#include"includes/error_handling.h"
'''
def process_text(text):
"""
:param text: String containing the API text
"""
# filter comments and stuff
header_1 = 'interface Z3 {'
header_2 = 'extern "C"'
if header_1 in text:
text = text[text.find(header_1)+len(header_1)+1:]
elif header_2 in text:
text = text[text.find(header_2)+len(header_2)+2:]
deprecatedBlock = False
comment=False
out1 = []
for i,c in enumerate(text):
# skip deprecated functions
if text[i:i+17] == "@name Deprecated ": deprecatedBlock = True
if text[i:i+5] == "/*@}*": deprecatedBlock = False
if text[i] == "/" and text[i+1] == "*": comment=True
if text[i-1] == "/" and text[i-2] == "*": comment=False
if not comment and not deprecatedBlock: out1.append(c)
tmp1 = "".join(out1)
if DEBUG: open("out1","w").write(tmp1)
# make one function per line
out2=[]
for line in tmp1.splitlines():
line = line.strip()
if not line or line == "}" or line == "};": continue
if line.startswith("#") or line.startswith("//"): continue
if line in ["BEGIN_MLAPI_EXCLUDE", "END_MLAPI_EXCLUDE"]: continue
out2.append(line)
tmp2 = "".join(out2)
tmp2 = tmp2.replace(";", ";\n")
if DEBUG: open("out2","w").write(tmp2)
# remove unused information
extBlock = False
out3=[]
for line in tmp2.splitlines():
assert line.endswith(");")
# skip ext-functions
if "Z3_reduce_eq_callback_fptr" in line:
extBlock = True
if "Z3_theory_get_app" in line:
extBlock = False
continue
if extBlock: continue
if "fptr" in line: continue # functionpointers currently unsupported
if "set_error_handler" in line: continue # not supported
if "_interpolation_problem" in line: continue # not supported
if "_check_interpolant" in line: continue # not supported
if line.startswith("typedef"): continue
line = line.replace(" Z3_API ", " ")
line = line.replace("const ", " ")
line = line.replace("const*", " * ")
line = line.replace("*", " * ")
out3.append(line)
# if "__" in line: print line
tmp3 = "\n".join(out3)
if DEBUG: open("out3","w").write(tmp3)
# now do the real replacing and build the new language...
out4=[]
for line in tmp3.splitlines():
spl = line.split()
retval = spl[0]
name0 = spl[1]
assert "(" in name0
name = name0[:name0.find("(")]
#print retval, "\t", name
assert spl[-1].endswith(");")
paramStr = name0[name0.find("(")+1:] + " " + " ".join(spl[2:])
params = [p for p in paramStr.replace(");", "").strip().split(",") if p]
#print "\t\t", paramStr, params
x = "DEFINE_FUNC(" + getType(retval) + ", " + name.replace("Z3_","").replace("_", "_1") + ") "
l = len(params)
isVoidArgCall = (l==0 or (l==1 and params[0] == "void"))
if isVoidArgCall:
x += "WITHOUT_ARGS"
else:
x += "WITH_" + str(l) + "_ARGS("
inputs = []
cleanups = []
typs = []
def checkAndClean(inp, typ, i):
inputs.append( inp + "_" + typ + "(" + str(i+1) + ")")
cleanups.insert(0, "CLEAN_" + inp + "_" + typ + "(" + str(i+1) + ")")
for i, param in enumerate(params):
parts = param.split()
# print(parts)
# f(void) --> nothing to do
if parts[0] == "void":
assert len(parts) == 1
continue
# modifier available
if parts[0].startswith("__"):
mod = parts[0]
if mod == "__in":
assert "[" not in param
# mod + type + * + pname
if "*" in param:
assert parts[2] == "*"
typ = parts[1]
typs.append(getType(typ) + "_pointer")
inp = typ.replace("Z3_", "").upper()
checkAndClean(inp, "POINTER_ARG", i)
# mod + type... + pname
else:
assert len(parts) >= 3
typ = "_".join(parts[1:-1]).replace("__","") # unsigned + __int64
typs.append(getType(typ))
inp = typ.replace("Z3_", "").upper()
checkAndClean(inp, "ARG", i)
# mod + type + pname[]
elif mod.startswith("__in_ecount(") \
and len(parts) == 3 and parts[2].endswith("[]"):
typ = parts[1]
typs.append(getType(typ) + "_array")
inp = typ.replace("Z3_", "").upper()
checkAndClean(inp, "ARRAY_ARG", i)
# mod + type + * + pname --> in java we use an array
elif mod.startswith("__in_ecount("):
assert len(parts) == 4 and parts[2] is "*"
typ = parts[1]
typs.append(getType(typ) + "_array")
inp = typ.replace("Z3_", "").upper()
checkAndClean(inp, "ARRAY_ARG", i)
# special case: string_ptr == string*
elif mod == "__out_opt" and not "*" in param:
assert len(parts) == 3 and parts[1] == "Z3_string_ptr"
typ = "Z3_string"
typs.append(getType(typ) + "_pointer")
inp = typ.replace("Z3_", "").upper()
checkAndClean(inp, "POINTER_ARG", i)
cleanups.insert(0, "SET_" + inp + "_POINTER_ARG(" + str(i+1) + ")")
# mod + type... + * + pname
elif mod == "__out" or mod == "__out_opt":
assert "*" in param and len(parts) >= 4 and parts[-2] == "*"
typ = "_".join(parts[1:-2]).replace("__","") # unsigned + __int64
typs.append(getType(typ) + "_pointer")
inp = typ.replace("Z3_", "").upper()
checkAndClean(inp, "POINTER_ARG", i)
cleanups.insert(0, "SET_" + inp + "_POINTER_ARG(" + str(i+1) + ")")
# "__inout unsigned * core_size"
elif mod.startswith("__inout") and "*" in param:
assert len(parts) == 4 and parts[2] == "*"
typ = parts[1]
typs.append(getType(typ) + "_pointer")
inp = typ.replace("Z3_", "").upper()
checkAndClean(inp, "POINTER_ARG", i)
cleanups.insert(0, "SET_" + inp + "_POINTER_ARG(" + str(i+1) + ")")
# "__inout_ecount(num_constructors) Z3_constructor constructors[]"
elif mod.startswith("__inout_ecount("):
assert len(parts) == 3 and parts[2].endswith("[]")
#lenParam = mod[15 : -1] # value of __inout_ecount()
#pnames = [p.split()[-1] for p in params]
#numLenParam = pnames.index(lenParam)
typ = parts[1]
typs.append(getType(typ) + "_array")
inp = typ.replace("Z3_", "").upper()
checkAndClean(inp, "OUT_ARRAY_ARG", i)
cleanups.insert(0, "SET_" + inp + "_OUT_ARRAY_ARG(" + str(i+1) + ")")
# "__out_ecount(num_sorts) Z3_sort sorts[]"
elif mod.startswith("__out_ecount(") \
and len(parts) == 3 and parts[2].endswith("[]"):
#lenParam = mod[13 : -1] # value of __out_ecount()
#pnames = [p.split()[-1] for p in params]
#numLenParam = pnames.index(lenParam)
typ = parts[1]
typs.append(getType(typ) + "_array")
inp = typ.replace("Z3_", "").upper()
checkAndClean(inp, "OUT_ARRAY_ARG", i)
cleanups.insert(0, "SET_" + inp + "_OUT_ARRAY_ARG(" + str(i+1) + ")")
# "__out_ecount(num_sorts) Z3_sort sorts[]"
elif mod.startswith("__out_ecount("):
assert len(parts) == 4 and parts[2] is "*"
#lenParam = mod[13 : -1] # value of __out_ecount()
#pnames = [p.split()[-1] for p in params]
#numLenParam = pnames.index(lenParam)
typ = parts[1]
typs.append(getType(typ) + "_array")
inp = typ.replace("Z3_", "").upper()
checkAndClean(inp, "OUT_ARRAY_ARG", i)
cleanups.insert(0, "SET_" + inp + "_OUT_ARRAY_ARG(" + str(i+1) + ")")
else:
pass
print parts
# normal param: [type, pname]
else:
try:
assert len(parts) == 2, parts
except:
import pdb; pdb.set_trace()
typ, pname = parts
typs.append(getType(typ))
inp = typ.replace("Z3_", "").upper()
checkAndClean(inp, "ARG", i)
x += ", ".join(typs)
if isVoidArgCall:
x += "\n"
else:
x += ")\n"
# INPUT_ARG
if inputs: x += "\n".join(inputs) + "\n"
# CALL()
if retval == "void":
if isVoidArgCall:
x += "VOID_CALL0("
else:
x += "VOID_CALL" + str(l) + "("
else:
if isVoidArgCall:
x += "CALL0(" + retval + ", "
else:
x += "CALL" + str(l) + "(" + retval + ", "
x += name.replace("Z3_", "") + ")\n"
# FREE_ARG, SET_ARG
if cleanups: x += "\n".join(cleanups) + "\n"
# RETURN_VALUE
simpleRetvals = ["void", "Z3_bool", "Z3_lbool", "Z3_bool_opt", "unsigned", "int", "double", "Z3_error_code", "Z3_goal_prec"]
if retval not in simpleRetvals and not retval.endswith("kind") \
and typs and typs[0] == "J_context":
x += retval.replace("Z3_", "").upper() + "_RETURN_WITH_CONTEXT\n"
else:
x += retval.replace("Z3_", "").upper() + "_RETURN\n"
out4.append(x)
tmp4 = "\n".join(out4)
if DEBUG: open("out4","w").write(tmp4)
return tmp4
def main():
api = open(os.path.join(sys.argv[1], "z3_api.h")).read()
interp_api = open(os.path.join(sys.argv[1], "z3_interp.h")).read()
result_text = process_text(api)
result_interp = process_text(interp_api)
out_f = open("org_sosy_lab_cpachecker_util_predicates_z3_Z3NativeApi.c","w")
# Write result
out_f.write(HEADER)
out_f.write(result_text)
out_f.write('\n\n// INTERPOLATION\n\n')
out_f.write(result_interp)
out_f.close()
def getType(typ):
return typ.replace("Z3","J")
if __name__ == "__main__":
main()
| nishanttotla/predator | cpachecker/lib/native/source/libz3j/buildZ3wrapper.py | Python | gpl-3.0 | 10,950 |
#!/bin/python3.5
# Programa obtenido de hacker run, se le pasa lista con 0 y 1, que simbolizan puertas, 0 la puerta abierta 1 la puerta cerrada.
# Nuestro objetivo es abrir todas las puertas
# si se abre y las subyacentes se abrirán si no están abiertas
# el programa devuelve para una lista de 0 y 1 le mínimo de puertas a abrir y el máximo siguiendo este patrón
import sys
def puertas( doors ):
min = 0
max = 0
i = 1
while i < len( doors) -2 :
# Casos en los que hay reducción
if(doors[i]) == 1:
if doors[ i-1 : i+2] == [1,1,1]:
min += 1
max += 2
i += 2
elif doors[ i] == 1:
min += 1
max += 1
i += 1
else:
min += 1
max += 1
i += 1
return [ min , max]
def prueba ( ):
for i in range (10):
print (i )
i += i
if __name__ == "__main__":
doors = list ( map( int, input().strip().split(' ')))
print ("La puerta creada: " , doors)
result = puertas (doors)
print( " ".join( map(str , result )))
prueba();
| BlancaCC/cultutrilla | python_aprendizaje/ejemplos_básicos/puertas.py | Python | gpl-3.0 | 1,203 |
# coding=utf-8
#########################################
# kNN: k Nearest Neighbors
# Input: newInput: vector to compare to existing dataset (1xN)
# dataSet: size m data set of known vectors (NxM)
# labels: data set labels (1xM vector)
# k: number of neighbors to use for comparison
# Output: the most popular class label
#########################################
from numpy import *
import operator
# create a dataset which contains 4 samples with 2 classes
def createDataSet():
# create a matrix: each row as a sample
group = array([[1.0, 0.9], [1.0, 1.0], [0.1, 0.2], [0.0, 0.1]])
labels = ['A', 'A', 'B', 'B'] # four samples and two classes
return group, labels
# classify using kNN
def kNNClassify(newInput, dataSet, labels, k):
numSamples = dataSet.shape[0] # shape[0] stands for the num of row
## step 1: calculate Euclidean distance
# tile(A, reps): Construct an array by repeating A reps times
# the following copy numSamples rows for dataSet
diff = tile(newInput, (numSamples, 1)) - dataSet # Subtract element-wise
squaredDiff = diff ** 2 # squared for the subtract
squaredDist = sum(squaredDiff, axis = 1) # sum is performed by row
distance = squaredDist ** 0.5
## step 2: sort the distance
# argsort() returns the indices that would sort an array in a ascending order
sortedDistIndices = argsort(distance)
classCount = {} # define a dictionary (can be append element)
for i in xrange(k):
## step 3: choose the min k distance
voteLabel = labels[sortedDistIndices[i]]
## step 4: count the times labels occur
# when the key voteLabel is not in dictionary classCount, get()
# will return 0
classCount[voteLabel] = classCount.get(voteLabel, 0) + 1
## step 5: the max voted class will return
maxCount = 0
for key, value in classCount.items():
if value > maxCount:
maxCount = value
maxIndex = key
return maxIndex | gatieme/AderXCoding | technology/machine_learning/K-Nearest-Neighbor/simple/knn.py | Python | gpl-2.0 | 2,062 |
"""Settings for AutoShare"""
# Youtube url and api settings
Youtube_playlistId = ""
Youtube_api_key = ""
Youtube_base_url = "https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=1&playlistId="+Youtube_playlistId+"&key="+Youtube_api_key
Youtube_watch_url = "https://www.youtube.com/watch?v="
# Facebook url and token settings
Facebook_base_url = "https://graph.facebook.com/me/feed/"
Access_token = ""
| trishnaguha/AutoShare | settings.py | Python | mit | 429 |
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from rpc_thrift.cython.cyframed_transport_ex import TCyFramedTransportEx
from rpc_thrift.cython.cymemory_transport import TCyMemoryBuffer
from rpc_thrift.cython.cyframed_transport import TCyFramedTransport
from rpc_thrift.transport import TMemoryBuffer
# 这个相对路径如何处理呢?
from unittest import TestCase
class FramedBufferTest(TestCase):
def setUp(self):
super(FramedBufferTest, self).setUp()
print ""
def tearDown(self):
super(FramedBufferTest, self).tearDown()
def test_write_flush(self):
"""
py.test test/test_framed_buffer.py::FramedBufferTest::test_write_flush -s
"""
buf = TMemoryBuffer()
transport = TCyFramedTransport(buf)
transport.write("abcdef")
transport.flush()
print "Framed Output: ", ["%03d" % ord(i) for i in buf.getvalue()]
# MemoryBuffer作为FrameBuffer来使用
buf2 = TCyMemoryBuffer()
buf2.prepare_4_frame()
buf2.write("abcdef")
buf1 = TMemoryBuffer()
tran1 = TCyFramedTransportEx(buf1)
tran1.flush_frame_buff(buf2)
print "Framed Output: ", ["%03d" % ord(i) for i in buf1.getvalue()]
buf1.reset()
tran1 = TCyFramedTransportEx(buf1)
mem_trans = tran1.read_frame()
print "ReadFrame: ", mem_trans
value = mem_trans.getvalue()
print "Value: ", value, ["%03d" % ord(i) for i in value]
| wfxiang08/rpc_proxy_python | test/test_framed_buffer.py | Python | mit | 1,516 |
# Generated by Django 2.2.14 on 2020-08-28 18:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("utils", "0021_auto_20180826_1616"),
]
operations = [
migrations.AlterModelOptions(
name="alert",
options={"get_latest_by": "pubdate", "verbose_name": "Alerte", "verbose_name_plural": "Alertes"},
),
]
| ChantyTaguan/zds-site | zds/utils/migrations/0022_set_default_latest_by_for_alerts.py | Python | gpl-3.0 | 409 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
from frappe.utils import comma_and
from frappe.model.document import Document
from frappe.utils import get_datetime, get_datetime_str, now_datetime
class ShoppingCartSetupError(frappe.ValidationError): pass
class ShoppingCartSettings(Document):
def onload(self):
self.get("__onload").quotation_series = frappe.get_meta("Quotation").get_options("naming_series")
def validate(self):
if self.enabled:
self.validate_exchange_rates_exist()
def validate_exchange_rates_exist(self):
"""check if exchange rates exist for all Price List currencies (to company's currency)"""
company_currency = frappe.db.get_value("Company", self.company, "default_currency")
if not company_currency:
msgprint(_("Please specify currency in Company") + ": " + self.company,
raise_exception=ShoppingCartSetupError)
price_list_currency_map = frappe.db.get_values("Price List",
[self.price_list],
"currency")
# check if all price lists have a currency
for price_list, currency in price_list_currency_map.items():
if not currency:
frappe.throw(_("Currency is required for Price List {0}").format(price_list))
expected_to_exist = [currency + "-" + company_currency
for currency in price_list_currency_map.values()
if currency != company_currency]
# manqala 20/09/2016: set up selection parameters for query from tabCurrency Exchange
from_currency = [currency for currency in price_list_currency_map.values() if currency != company_currency]
to_currency = company_currency
# manqala end
if expected_to_exist:
# manqala 20/09/2016: modify query so that it uses date in the selection from Currency Exchange.
# exchange rates defined with date less than the date on which this document is being saved will be selected
exists = frappe.db.sql_list("""select CONCAT(from_currency,'-',to_currency) from `tabCurrency Exchange`
where from_currency in (%s) and to_currency = "%s" and date <= curdate()""" % (", ".join(["%s"]*len(from_currency)), to_currency), tuple(from_currency))
# manqala end
missing = list(set(expected_to_exist).difference(exists))
if missing:
msgprint(_("Missing Currency Exchange Rates for {0}").format(comma_and(missing)),
raise_exception=ShoppingCartSetupError)
def validate_tax_rule(self):
if not frappe.db.get_value("Tax Rule", {"use_for_shopping_cart" : 1}, "name"):
frappe.throw(frappe._("Set Tax Rule for shopping cart"), ShoppingCartSetupError)
def get_tax_master(self, billing_territory):
tax_master = self.get_name_from_territory(billing_territory, "sales_taxes_and_charges_masters",
"sales_taxes_and_charges_master")
return tax_master and tax_master[0] or None
def get_shipping_rules(self, shipping_territory):
return self.get_name_from_territory(shipping_territory, "shipping_rules", "shipping_rule")
def validate_cart_settings(doc, method):
frappe.get_doc("Shopping Cart Settings", "Shopping Cart Settings").run_method("validate")
def get_shopping_cart_settings():
if not getattr(frappe.local, "shopping_cart_settings", None):
frappe.local.shopping_cart_settings = frappe.get_doc("Shopping Cart Settings", "Shopping Cart Settings")
return frappe.local.shopping_cart_settings
def is_cart_enabled():
return get_shopping_cart_settings().enabled
def show_quantity_in_website():
return get_shopping_cart_settings().show_quantity_in_website
def check_shopping_cart_enabled():
if not get_shopping_cart_settings().enabled:
frappe.throw(_("You need to enable Shopping Cart"), ShoppingCartSetupError)
| RandyLowery/erpnext | erpnext/shopping_cart/doctype/shopping_cart_settings/shopping_cart_settings.py | Python | gpl-3.0 | 3,791 |
from django.core.management import call_command
import pytest
import septentrion
def test_showmigrations_command_override(mocker):
mock_django_handle = mocker.patch(
'django.core.management.commands.showmigrations.Command.handle')
mock_show_migrations = mocker.patch(
'septentrion.show_migrations', return_value=b'')
call_command('showmigrations')
assert mock_django_handle.called is False
assert mock_show_migrations.called is True
@pytest.mark.parametrize("manage", [True, False, None])
def test_north_manage_migrations(mocker, settings, manage):
if manage is not None:
settings.NORTH_MANAGE_DB = manage
if manage is None and hasattr(settings, 'NORTH_MANAGE_DB'):
del settings.NORTH_MANAGE_DB
mock = mocker.patch('septentrion.show_migrations', return_value=b'')
call_command('showmigrations')
assert mock.called == bool(manage)
def test_showmigrations_schema_not_inited(capsys, mocker):
mock_version = mocker.patch(
'septentrion.db.get_current_schema_version')
# schema not inited
mock_version.return_value = None
call_command('showmigrations')
captured = capsys.readouterr()
assert 'Current version is None' in captured.out
def test_showmigrations_schema(capsys, mocker):
# schema inited
mock_version = mocker.patch(
'septentrion.db.get_current_schema_version')
mock_version.return_value = septentrion.versions.Version.from_string('1.1')
mock_plan = mocker.patch(
'septentrion.core.build_migration_plan')
mock_plan.return_value = [
{
'version': "Version 1.2",
'plan': [
('a-ddl.sql', True, '/somewhere/a-ddl.sql', False),
('b-ddl.sql', False, '/somewhere/b-ddl.sql', True),
]
},
{
'version': "Version 1.3",
'plan': [
('c-ddl.sql', False, '/somewhere/c-ddl.sql', False),
]
}
]
call_command('showmigrations')
captured = capsys.readouterr()
assert "Current version is 1.1" in captured.out
assert "Target version is 1.3" in captured.out
assert "Version 1.2" in captured.out
assert "[X] \x1b[0ma-ddl.sql" in captured.out
assert "[ ] \x1b[0mb-ddl.sql" in captured.out
assert "Version 1.3" in captured.out
assert "[ ] \x1b[0mc-ddl.sql" in captured.out
| novafloss/django-north | tests/test_showmigrations_command.py | Python | mit | 2,400 |
#!/usr/bin/env python
# coding=utf-8
from setuptools import setup, find_packages
setup(name='french_dates_to_ical',
version='0.0.1',
author='Michaël Launay',
author_email='michaellaunay@ecreall.com',
url='http://www.ecreall.com/ressources/french_dates_to_ical',
download_url='https://github.com/michaellaunay/french_dates_to_ical',
description='Parse a string of french dates to generate the ical rules (RFC 5545)',
long_description='french_dates_to_ical can be use as a standalone tool or like a library. In both cases, you provide a string of french dates like "Tous les jeudis", and the program returns the ical rules.',
packages = find_packages(),
include_package_data = True,
package_data = {
'': ['*.txt', '*.rst'],
#'french_dates_to_ical': ['data/*'],
},
exclude_package_data = { '': ['README.txt'] },
scripts = ['bin/fr2ical.py'],
entry_points = """
[console_scripts]
fr2ical = french_dates_to_ical.main:main
""",
keywords='python tools utils ical',
license='GPL',
classifiers=['Development Status :: 2 - Planning',
'Natural Language :: French',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Topic :: Text Processing :: Dates',
],
#setup_requires = ['python-stdeb', 'fakeroot', 'python-all'],
install_requires = ['setuptools', 'docutils>=0.3', 'parsimonious', 'pytest'],
)
| michaellaunay/french_dates_to_ical | setup.py | Python | agpl-3.0 | 1,696 |
import json
import unittest
from ldap3 import (
Server,
Connection,
MOCK_SYNC,
LEVEL,
SUBTREE,
ALL_ATTRIBUTES
)
from setexpression.ldap import LDAPFilterSetExpression
def p(uid, quoted=False):
'''Very simple constructor for person dns.'''
dn = 'uid={},ou=people,ou=top'.format(uid)
if quoted:
dn = '"{}"'.format(dn)
return dn
def g(cn, quoted=False):
'''Very simple constructor for group dns.'''
dn = 'cn={},ou=groups,ou=top'.format(cn)
if quoted:
dn = '"{}"'.format(dn)
return dn
class TestLDAPFilterSetExpression(unittest.TestCase):
def setUp(self):
self.server = Server('test_server')
self.con = Connection(
self.server,
client_strategy=MOCK_SYNC
)
self.con.strategy.entries_from_json('test-data.json')
self.con.bind()
self.expr = LDAPFilterSetExpression(
json.loads(g('B', True)),
connection=self.con,
search_base='ou=people,ou=top'
)
def test_iter(self):
'''Expression should work as an iterator.'''
members = [member for member in self.expr]
self.assertListEqual(sorted(members), [p(1), p(2)])
def test_in(self):
'''Expression should work as a container.'''
self.assertIn(p(1), self.expr)
def test_group_name(self):
'''A group name expression. Check filter and correct membership.'''
# Load the expression with a simple group name.
self.expr.expression=json.loads(g('B', True))
# Make sure we get the right filter.
self.assertEqual(
self.expr.evaluate(),
'(memberOf={})'.format(g('B'))
)
# Make sure this results in the correct members.
self.assertListEqual(sorted(self.expr.members), [p(1), p(2)])
def test_direct_members(self):
'''A member list expression. Check filter and correct membership.'''
self.expr.expression=json.loads(
'{{"{}": "null", "{}": null, "{}": null}}'.format(
'uid=1', 'uid=2', 'uid=3'
)
)
self.assertRegex(
self.expr.evaluate(),
r'\(\|\(uid=[1-3]\)\(uid=[1-3]\)\(uid=[1-3]\)\)'
)
self.assertListEqual(
sorted(self.expr.members),
[p(1), p(2), p(3)]
)
def test_union(self):
'''A union expression. Check filter and correct membership.'''
self.expr.expression=json.loads(
'["union", "{}", "{}"]'.format(g('B'), g('C'))
)
self.assertEqual(
self.expr.evaluate(),
'(|(memberOf={})(memberOf={}))'.format(g('B'), g('C'))
)
self.assertListEqual(
sorted(self.expr.members),
[p(1), p(2), p(3)]
)
def test_intersect(self):
'''An intersect expression. Check filter and correct membership.'''
self.expr.expression=json.loads(
'["intersect", "{}", "{}"]'.format(g('B'), g('C'))
)
self.assertEqual(
self.expr.evaluate(),
'(&(memberOf={})(memberOf={}))'.format(g('B'), g('C'))
)
self.assertListEqual(
sorted(self.expr.members),
[p(2)]
)
def test_not(self):
'''A not expression. Check filter and correct membership.'''
self.expr.expression=json.loads(
'["not", "{}"]'.format(g('B'))
)
self.assertEqual(
self.expr.evaluate(),
'(!(memberOf={}))'.format(g('B'))
)
self.assertListEqual(
sorted(self.expr.members),
[p(3), p(4)]
)
def test_minus(self):
'''A nminus expression. Check filter and correct membership.'''
self.expr.expression=json.loads(
'["minus", "{}", "{}"]'.format(g('B'), g('A'))
)
self.assertEqual(
self.expr.evaluate(),
'(&(memberOf={})(!(memberOf={})))'.format(g('B'),g('A'))
)
self.assertListEqual(
sorted(self.expr.members),
[p(2)]
)
if __name__ == '__main__':
unittest.main()
| colinhiggs/setexpression | tests.py | Python | agpl-3.0 | 4,184 |
# -*- coding: utf-8 -*-
"""
This file contains the Qudi hardware file to control the microwave dummy.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
import random
from core.module import Base
from interface.microwave_interface import MicrowaveInterface
from interface.microwave_interface import MicrowaveLimits
from interface.microwave_interface import MicrowaveMode
from interface.microwave_interface import TriggerEdge
import time
class MicrowaveDummy(Base, MicrowaveInterface):
"""This is the Interface class to define the controls for the simple
microwave hardware.
"""
_modclass = 'MicrowaveDummy'
_modtype = 'mwsource'
def on_activate(self):
""" Initialisation performed during activation of the module.
"""
self.mw_cw_power = -120.0
self.mw_sweep_power = 0.0
self.mw_cw_frequency = 2.87e9
self.mw_frequency_list = list()
self.mw_start_freq = 2.5e9
self.mw_stop_freq = 3.1e9
self.mw_step_freq = 2.0e6
self.current_output_mode = MicrowaveMode.CW # Can be MicrowaveMode.CW, MicrowaveMode.LIST or
# MicrowaveMode.SWEEP
self.current_trig_pol = TriggerEdge.RISING # Can be TriggerEdge.RISING or
# TriggerEdge.FALLING
self.output_active = False
return
def on_deactivate(self):
""" Deinitialisation performed during deactivation of the module.
"""
pass
def get_limits(self):
"""Dummy limits"""
limits = MicrowaveLimits()
limits.supported_modes = (MicrowaveMode.CW, MicrowaveMode.LIST, MicrowaveMode.SWEEP)
limits.min_frequency = 100e3
limits.max_frequency = 20e9
limits.min_power = -120
limits.max_power = 30
limits.list_minstep = 0.001
limits.list_maxstep = 20e9
limits.list_maxentries = 10001
limits.sweep_minstep = 0.001
limits.sweep_maxstep = 20e9
limits.sweep_maxentries = 10001
return limits
def get_status(self):
"""
Gets the current status of the MW source, i.e. the mode (cw, list or sweep) and
the output state (stopped, running)
@return str, bool: mode ['cw', 'list', 'sweep'], is_running [True, False]
"""
if self.current_output_mode == MicrowaveMode.CW:
mode = 'cw'
elif self.current_output_mode == MicrowaveMode.LIST:
mode = 'list'
elif self.current_output_mode == MicrowaveMode.SWEEP:
mode = 'sweep'
return mode, self.output_active
def off(self):
""" Switches off any microwave output.
@return int: error code (0:OK, -1:error)
"""
self.output_active = False
self.log.info('MicrowaveDummy>off')
return 0
def get_power(self):
""" Gets the microwave output power.
@return float: the power set at the device in dBm
"""
self.log.debug('MicrowaveDummy>get_power')
if self.current_output_mode == MicrowaveMode.CW:
return self.mw_cw_power
else:
return self.mw_sweep_power
def get_frequency(self):
"""
Gets the frequency of the microwave output.
Returns single float value if the device is in cw mode.
Returns list if the device is in either list or sweep mode.
@return [float, list]: frequency(s) currently set for this device in Hz
"""
self.log.debug('MicrowaveDummy>get_frequency')
if self.current_output_mode == MicrowaveMode.CW:
return self.mw_cw_frequency
elif self.current_output_mode == MicrowaveMode.LIST:
return self.mw_frequency_list
elif self.current_output_mode == MicrowaveMode.SWEEP:
return (self.mw_start_freq, self.mw_stop_freq, self.mw_step_freq)
def cw_on(self):
"""
Switches on cw microwave output.
Must return AFTER the device is actually running.
@return int: error code (0:OK, -1:error)
"""
self.current_output_mode = MicrowaveMode.CW
time.sleep(0.5)
self.output_active = True
self.log.info('MicrowaveDummy>CW output on')
return 0
def set_cw(self, frequency=None, power=None):
"""
Configures the device for cw-mode and optionally sets frequency and/or power
@param float frequency: frequency to set in Hz
@param float power: power to set in dBm
@param bool useinterleave: If this mode exists you can choose it.
@return float, float, str: current frequency in Hz, current power in dBm, current mode
Interleave option is used for arbitrary waveform generator devices.
"""
self.log.debug('MicrowaveDummy>set_cw, frequency: {0:f}, power {0:f}:'.format(frequency,
power))
self.output_active = False
self.current_output_mode = MicrowaveMode.CW
if frequency is not None:
self.mw_cw_frequency = frequency
if power is not None:
self.mw_cw_power = power
return self.mw_cw_frequency, self.mw_cw_power, 'cw'
def list_on(self):
"""
Switches on the list mode microwave output.
Must return AFTER the device is actually running.
@return int: error code (0:OK, -1:error)
"""
self.current_output_mode = MicrowaveMode.LIST
time.sleep(1)
self.output_active = True
self.log.info('MicrowaveDummy>List mode output on')
return 0
def set_list(self, frequency=None, power=None):
"""
Configures the device for list-mode and optionally sets frequencies and/or power
@param list frequency: list of frequencies in Hz
@param float power: MW power of the frequency list in dBm
@return list, float, str: current frequencies in Hz, current power in dBm, current mode
"""
self.log.debug('MicrowaveDummy>set_list, frequency_list: {0}, power: {1:f}'
''.format(frequency, power))
self.output_active = False
self.current_output_mode = MicrowaveMode.LIST
if frequency is not None:
self.mw_frequency_list = frequency
if power is not None:
self.mw_cw_power = power
return self.mw_frequency_list, self.mw_cw_power, 'list'
def reset_listpos(self):
"""
Reset of MW list mode position to start (first frequency step)
@return int: error code (0:OK, -1:error)
"""
return 0
def sweep_on(self):
""" Switches on the sweep mode.
@return int: error code (0:OK, -1:error)
"""
self.current_output_mode = MicrowaveMode.SWEEP
time.sleep(1)
self.output_active = True
self.log.info('MicrowaveDummy>Sweep mode output on')
return 0
def set_sweep(self, start=None, stop=None, step=None, power=None):
"""
Configures the device for sweep-mode and optionally sets frequency start/stop/step
and/or power
@return float, float, float, float, str: current start frequency in Hz,
current stop frequency in Hz,
current frequency step in Hz,
current power in dBm,
current mode
"""
self.log.debug('MicrowaveDummy>set_sweep, start: {0:f}, stop: {1:f}, step: {2:f}, '
'power: {3:f}'.format(start, stop, step, power))
self.output_active = False
self.current_output_mode = MicrowaveMode.SWEEP
if (start is not None) and (stop is not None) and (step is not None):
self.mw_start_freq = start
self.mw_stop_freq = stop
self.mw_step_freq = step
if power is not None:
self.mw_sweep_power = power
return self.mw_start_freq, self.mw_stop_freq, self.mw_step_freq, self.mw_sweep_power, \
'sweep'
def reset_sweeppos(self):
"""
Reset of MW sweep mode position to start (start frequency)
@return int: error code (0:OK, -1:error)
"""
return 0
def set_ext_trigger(self, pol):
""" Set the external trigger for this device with proper polarization.
@param TriggerEdge pol: polarisation of the trigger (basically rising edge or falling edge)
@return object: current trigger polarity [TriggerEdge.RISING, TriggerEdge.FALLING]
"""
self.log.info('MicrowaveDummy>ext_trigger set')
self.current_trig_pol = pol
return self.current_trig_pol
| childresslab/MicrocavityExp1 | hardware/microwave/mw_source_dummy.py | Python | gpl-3.0 | 9,607 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.